Index: head/sys/dev/drm2/drm.h =================================================================== --- head/sys/dev/drm2/drm.h (revision 277486) +++ head/sys/dev/drm2/drm.h (revision 277487) @@ -1,1216 +1,1221 @@ /** * \file drm.h * Header for the Direct Rendering Manager * * \author Rickard E. (Rik) Faith * * \par Acknowledgments: * Dec 1999, Richard Henderson , move to generic \c cmpxchg. */ /*- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /** * \mainpage * * The Direct Rendering Manager (DRM) is a device-independent kernel-level * device driver that provides support for the XFree86 Direct Rendering * Infrastructure (DRI). * * The DRM supports the Direct Rendering Infrastructure (DRI) in four major * ways: * -# The DRM provides synchronized access to the graphics hardware via * the use of an optimized two-tiered lock. * -# The DRM enforces the DRI security policy for access to the graphics * hardware by only allowing authenticated X11 clients access to * restricted regions of memory. * -# The DRM provides a generic DMA engine, complete with multiple * queues and the ability to detect the need for an OpenGL context * switch. * -# The DRM is extensible via the use of small device-specific modules * that rely extensively on the API exported by the DRM module. * */ #ifndef _DRM_H_ #define _DRM_H_ #ifndef __user #define __user #endif #ifndef __iomem #define __iomem #endif #ifdef __GNUC__ # define DEPRECATED __attribute__ ((deprecated)) #else # define DEPRECATED #endif #if defined(__linux__) #include /* For _IO* macros */ #define DRM_IOCTL_NR(n) _IOC_NR(n) #define DRM_IOC_VOID _IOC_NONE #define DRM_IOC_READ _IOC_READ #define DRM_IOC_WRITE _IOC_WRITE #define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) #include #define DRM_IOCTL_NR(n) ((n) & 0xff) #define DRM_IOC_VOID IOC_VOID #define DRM_IOC_READ IOC_OUT #define DRM_IOC_WRITE IOC_IN #define DRM_IOC_READWRITE IOC_INOUT #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) #endif #ifdef __OpenBSD__ #define DRM_MAJOR 81 #endif #if defined(__linux__) || defined(__NetBSD__) #define DRM_MAJOR 226 #endif #define DRM_MAX_MINOR 15 #define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ #define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */ #define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */ #define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */ #define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */ #define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */ #define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD) #define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT) #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) #if defined(__linux__) typedef unsigned int drm_handle_t; #else #include typedef unsigned long drm_handle_t; /**< To mapped regions */ #endif typedef unsigned int drm_context_t; /**< GLXContext handle */ typedef unsigned int drm_drawable_t; typedef unsigned int drm_magic_t; /**< Magic for authentication */ /** * Cliprect. * * \warning If you change this structure, make sure you change * XF86DRIClipRectRec in the server as well * * \note KW: Actually it's illegal to change either for * backwards-compatibility reasons. */ struct drm_clip_rect { unsigned short x1; unsigned short y1; unsigned short x2; unsigned short y2; }; /** * Texture region, */ struct drm_tex_region { unsigned char next; unsigned char prev; unsigned char in_use; unsigned char padding; unsigned int age; }; /** * Hardware lock. * * The lock structure is a simple cache-line aligned integer. To avoid * processor bus contention on a multiprocessor system, there should not be any * other data stored in the same cache line. */ struct drm_hw_lock { __volatile__ unsigned int lock; /**< lock variable */ char padding[60]; /**< Pad to cache line */ }; /* This is beyond ugly, and only works on GCC. However, it allows me to use * drm.h in places (i.e., in the X-server) where I can't use size_t. The real * fix is to use uint32_t instead of size_t, but that fix will break existing * LP64 (i.e., PowerPC64, SPARC64, Alpha, etc.) systems. That *will* * eventually happen, though. I chose 'unsigned long' to be the fallback type * because that works on all the platforms I know about. Hopefully, the * real fix will happen before that bites us. */ #ifdef __SIZE_TYPE__ # define DRM_SIZE_T __SIZE_TYPE__ #else # warning "__SIZE_TYPE__ not defined. Assuming sizeof(size_t) == sizeof(unsigned long)!" # define DRM_SIZE_T unsigned long #endif /** * DRM_IOCTL_VERSION ioctl argument type. * * \sa drmGetVersion(). */ struct drm_version { int version_major; /**< Major version */ int version_minor; /**< Minor version */ int version_patchlevel; /**< Patch level */ DRM_SIZE_T name_len; /**< Length of name buffer */ char __user *name; /**< Name of driver */ DRM_SIZE_T date_len; /**< Length of date buffer */ char __user *date; /**< User-space buffer to hold date */ DRM_SIZE_T desc_len; /**< Length of desc buffer */ char __user *desc; /**< User-space buffer to hold desc */ }; /** * DRM_IOCTL_GET_UNIQUE ioctl argument type. * * \sa drmGetBusid() and drmSetBusId(). */ struct drm_unique { DRM_SIZE_T unique_len; /**< Length of unique */ char __user *unique; /**< Unique name for driver instantiation */ }; #undef DRM_SIZE_T struct drm_list { int count; /**< Length of user-space structures */ struct drm_version __user *version; }; struct drm_block { int unused; }; /** * DRM_IOCTL_CONTROL ioctl argument type. * * \sa drmCtlInstHandler() and drmCtlUninstHandler(). */ struct drm_control { enum { DRM_ADD_COMMAND, DRM_RM_COMMAND, DRM_INST_HANDLER, DRM_UNINST_HANDLER } func; int irq; }; /** * Type of memory to map. */ enum drm_map_type { _DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */ _DRM_REGISTERS = 1, /**< no caching, no core dump */ _DRM_SHM = 2, /**< shared, cached */ _DRM_AGP = 3, /**< AGP/GART */ _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ _DRM_GEM = 6 /**< GEM */ }; /** * Memory mapping flags. */ enum drm_map_flags { _DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */ _DRM_READ_ONLY = 0x02, _DRM_LOCKED = 0x04, /**< shared, cached, locked */ _DRM_KERNEL = 0x08, /**< kernel requires access */ _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */ _DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */ _DRM_REMOVABLE = 0x40, /**< Removable mapping */ _DRM_DRIVER = 0x80 /**< Managed by driver */ }; struct drm_ctx_priv_map { unsigned int ctx_id; /**< Context requesting private mapping */ void *handle; /**< Handle of map */ }; /** * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls * argument type. * * \sa drmAddMap(). */ struct drm_map { unsigned long offset; /**< Requested physical address (0 for SAREA)*/ unsigned long size; /**< Requested physical size (bytes) */ enum drm_map_type type; /**< Type of memory to map */ enum drm_map_flags flags; /**< Flags */ void *handle; /**< User-space: "Handle" to pass to mmap() */ /**< Kernel-space: kernel-virtual address */ int mtrr; /**< MTRR slot used */ /* Private data */ }; /** * DRM_IOCTL_GET_CLIENT ioctl argument type. */ struct drm_client { int idx; /**< Which client desired? */ int auth; /**< Is client authenticated? */ unsigned long pid; /**< Process ID */ unsigned long uid; /**< User ID */ unsigned long magic; /**< Magic */ unsigned long iocs; /**< Ioctl count */ }; enum drm_stat_type { _DRM_STAT_LOCK, _DRM_STAT_OPENS, _DRM_STAT_CLOSES, _DRM_STAT_IOCTLS, _DRM_STAT_LOCKS, _DRM_STAT_UNLOCKS, _DRM_STAT_VALUE, /**< Generic value */ _DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */ _DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */ _DRM_STAT_IRQ, /**< IRQ */ _DRM_STAT_PRIMARY, /**< Primary DMA bytes */ _DRM_STAT_SECONDARY, /**< Secondary DMA bytes */ _DRM_STAT_DMA, /**< DMA */ _DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */ _DRM_STAT_MISSED /**< Missed DMA opportunity */ /* Add to the *END* of the list */ }; /** * DRM_IOCTL_GET_STATS ioctl argument type. */ struct drm_stats { unsigned long count; struct { unsigned long value; enum drm_stat_type type; } data[15]; }; /** * Hardware locking flags. */ enum drm_lock_flags { _DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */ _DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */ _DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */ _DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */ /* These *HALT* flags aren't supported yet -- they will be used to support the full-screen DGA-like mode. */ _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */ _DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */ }; /** * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type. * * \sa drmGetLock() and drmUnlock(). */ struct drm_lock { int context; enum drm_lock_flags flags; }; /** * DMA flags * * \warning * These values \e must match xf86drm.h. * * \sa drm_dma. */ enum drm_dma_flags { /* Flags for DMA buffer dispatch */ _DRM_DMA_BLOCK = 0x01, /**< * Block until buffer dispatched. * * \note The buffer may not yet have * been processed by the hardware -- * getting a hardware lock with the * hardware quiescent will ensure * that the buffer has been * processed. */ _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */ _DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */ /* Flags for DMA buffer request */ _DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */ _DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */ _DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */ }; /** * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type. * * \sa drmAddBufs(). */ struct drm_buf_desc { int count; /**< Number of buffers of this size */ int size; /**< Size in bytes */ int low_mark; /**< Low water mark */ int high_mark; /**< High water mark */ enum { _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */ _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */ _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */ _DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */ _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */ } flags; unsigned long agp_start; /**< * Start address of where the AGP buffers are * in the AGP aperture */ }; /** * DRM_IOCTL_INFO_BUFS ioctl argument type. */ struct drm_buf_info { int count; /**< Number of buffers described in list */ struct drm_buf_desc __user *list; /**< List of buffer descriptions */ }; /** * DRM_IOCTL_FREE_BUFS ioctl argument type. */ struct drm_buf_free { int count; int __user *list; }; /** * Buffer information * * \sa drm_buf_map. */ struct drm_buf_pub { int idx; /**< Index into the master buffer list */ int total; /**< Buffer size */ int used; /**< Amount of buffer in use (for DMA) */ void __user *address; /**< Address of buffer */ }; /** * DRM_IOCTL_MAP_BUFS ioctl argument type. */ struct drm_buf_map { int count; /**< Length of the buffer list */ #if defined(__cplusplus) void __user *c_virtual; #else void __user *virtual; /**< Mmap'd area in user-virtual */ #endif struct drm_buf_pub __user *list; /**< Buffer information */ }; /** * DRM_IOCTL_DMA ioctl argument type. * * Indices here refer to the offset into the buffer list in drm_buf_get. * * \sa drmDMA(). */ struct drm_dma { int context; /**< Context handle */ int send_count; /**< Number of buffers to send */ int __user *send_indices; /**< List of handles to buffers */ int __user *send_sizes; /**< Lengths of data to send */ enum drm_dma_flags flags; /**< Flags */ int request_count; /**< Number of buffers requested */ int request_size; /**< Desired size for buffers */ int __user *request_indices; /**< Buffer information */ int __user *request_sizes; int granted_count; /**< Number of buffers granted */ }; enum drm_ctx_flags { _DRM_CONTEXT_PRESERVED = 0x01, _DRM_CONTEXT_2DONLY = 0x02 }; /** * DRM_IOCTL_ADD_CTX ioctl argument type. * * \sa drmCreateContext() and drmDestroyContext(). */ struct drm_ctx { drm_context_t handle; enum drm_ctx_flags flags; }; /** * DRM_IOCTL_RES_CTX ioctl argument type. */ struct drm_ctx_res { int count; struct drm_ctx __user *contexts; }; /** * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type. */ struct drm_draw { drm_drawable_t handle; }; /** * DRM_IOCTL_UPDATE_DRAW ioctl argument type. */ typedef enum { DRM_DRAWABLE_CLIPRECTS, } drm_drawable_info_type_t; struct drm_update_draw { drm_drawable_t handle; unsigned int type; unsigned int num; unsigned long long data; }; /** * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type. */ struct drm_auth { drm_magic_t magic; }; /** * DRM_IOCTL_IRQ_BUSID ioctl argument type. * * \sa drmGetInterruptFromBusID(). */ struct drm_irq_busid { int irq; /**< IRQ number */ int busnum; /**< bus number */ int devnum; /**< device number */ int funcnum; /**< function number */ }; enum drm_vblank_seq_type { _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ _DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e, _DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */ _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */ }; #define _DRM_VBLANK_HIGH_CRTC_SHIFT 1 #define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) #define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \ _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS) struct drm_wait_vblank_request { enum drm_vblank_seq_type type; unsigned int sequence; unsigned long signal; }; struct drm_wait_vblank_reply { enum drm_vblank_seq_type type; unsigned int sequence; long tval_sec; long tval_usec; }; /** * DRM_IOCTL_WAIT_VBLANK ioctl argument type. * * \sa drmWaitVBlank(). */ union drm_wait_vblank { struct drm_wait_vblank_request request; struct drm_wait_vblank_reply reply; }; #define _DRM_PRE_MODESET 1 #define _DRM_POST_MODESET 2 /** * DRM_IOCTL_MODESET_CTL ioctl argument type * * \sa drmModesetCtl(). */ struct drm_modeset_ctl { uint32_t crtc; uint32_t cmd; }; /** * DRM_IOCTL_AGP_ENABLE ioctl argument type. * * \sa drmAgpEnable(). */ struct drm_agp_mode { unsigned long mode; /**< AGP mode */ }; /** * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type. * * \sa drmAgpAlloc() and drmAgpFree(). */ struct drm_agp_buffer { unsigned long size; /**< In bytes -- will round to page boundary */ unsigned long handle; /**< Used for binding / unbinding */ unsigned long type; /**< Type of memory to allocate */ unsigned long physical; /**< Physical used by i810 */ }; /** * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type. * * \sa drmAgpBind() and drmAgpUnbind(). */ struct drm_agp_binding { unsigned long handle; /**< From drm_agp_buffer */ unsigned long offset; /**< In bytes -- will round to page boundary */ }; /** * DRM_IOCTL_AGP_INFO ioctl argument type. * * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(), * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(), * drmAgpVendorId() and drmAgpDeviceId(). */ struct drm_agp_info { int agp_version_major; int agp_version_minor; unsigned long mode; unsigned long aperture_base; /**< physical address */ unsigned long aperture_size; /**< bytes */ unsigned long memory_allowed; /**< bytes */ unsigned long memory_used; /** \name PCI information */ /*@{ */ unsigned short id_vendor; unsigned short id_device; /*@} */ }; /** * DRM_IOCTL_SG_ALLOC ioctl argument type. */ struct drm_scatter_gather { unsigned long size; /**< In bytes -- will round to page boundary */ unsigned long handle; /**< Used for mapping / unmapping */ }; /** * DRM_IOCTL_SET_VERSION ioctl argument type. */ struct drm_set_version { int drm_di_major; int drm_di_minor; int drm_dd_major; int drm_dd_minor; }; #define DRM_FENCE_FLAG_EMIT 0x00000001 #define DRM_FENCE_FLAG_SHAREABLE 0x00000002 /** * On hardware with no interrupt events for operation completion, * indicates that the kernel should sleep while waiting for any blocking * operation to complete rather than spinning. * * Has no effect otherwise. */ #define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004 #define DRM_FENCE_FLAG_NO_USER 0x00000010 /* Reserved for driver use */ #define DRM_FENCE_MASK_DRIVER 0xFF000000 #define DRM_FENCE_TYPE_EXE 0x00000001 struct drm_fence_arg { unsigned int handle; unsigned int fence_class; unsigned int type; unsigned int flags; unsigned int signaled; unsigned int error; unsigned int sequence; unsigned int pad64; uint64_t expand_pad[2]; /* Future expansion */ }; /* Buffer permissions, referring to how the GPU uses the buffers. * these translate to fence types used for the buffers. * Typically a texture buffer is read, A destination buffer is write and * a command (batch-) buffer is exe. Can be or-ed together. */ #define DRM_BO_FLAG_READ (1ULL << 0) #define DRM_BO_FLAG_WRITE (1ULL << 1) #define DRM_BO_FLAG_EXE (1ULL << 2) /* * All of the bits related to access mode */ #define DRM_BO_MASK_ACCESS (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE) /* * Status flags. Can be read to determine the actual state of a buffer. * Can also be set in the buffer mask before validation. */ /* * Mask: Never evict this buffer. Not even with force. This type of buffer is only * available to root and must be manually removed before buffer manager shutdown * or lock. * Flags: Acknowledge */ #define DRM_BO_FLAG_NO_EVICT (1ULL << 4) /* * Mask: Require that the buffer is placed in mappable memory when validated. * If not set the buffer may or may not be in mappable memory when validated. * Flags: If set, the buffer is in mappable memory. */ #define DRM_BO_FLAG_MAPPABLE (1ULL << 5) /* Mask: The buffer should be shareable with other processes. * Flags: The buffer is shareable with other processes. */ #define DRM_BO_FLAG_SHAREABLE (1ULL << 6) /* Mask: If set, place the buffer in cache-coherent memory if available. * If clear, never place the buffer in cache coherent memory if validated. * Flags: The buffer is currently in cache-coherent memory. */ #define DRM_BO_FLAG_CACHED (1ULL << 7) /* Mask: Make sure that every time this buffer is validated, * it ends up on the same location provided that the memory mask is the same. * The buffer will also not be evicted when claiming space for * other buffers. Basically a pinned buffer but it may be thrown out as * part of buffer manager shutdown or locking. * Flags: Acknowledge. */ #define DRM_BO_FLAG_NO_MOVE (1ULL << 8) /* Mask: Make sure the buffer is in cached memory when mapped. In conjunction * with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART * with unsnooped PTEs instead of snooped, by using chipset-specific cache * flushing at bind time. A better name might be DRM_BO_FLAG_TT_UNSNOOPED, * as the eviction to local memory (TTM unbind) on map is just a side effect * to prevent aggressive cache prefetch from the GPU disturbing the cache * management that the DRM is doing. * * Flags: Acknowledge. * Buffers allocated with this flag should not be used for suballocators * This type may have issues on CPUs with over-aggressive caching * http://marc.info/?l=linux-kernel&m=102376926732464&w=2 */ #define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19) /* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set. * Flags: Acknowledge. */ #define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13) /* * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear. * Flags: Acknowledge. */ #define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14) #define DRM_BO_FLAG_TILE (1ULL << 15) /* * Memory type flags that can be or'ed together in the mask, but only * one appears in flags. */ /* System memory */ #define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24) /* Translation table memory */ #define DRM_BO_FLAG_MEM_TT (1ULL << 25) /* Vram memory */ #define DRM_BO_FLAG_MEM_VRAM (1ULL << 26) /* Up to the driver to define. */ #define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27) #define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28) #define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29) #define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30) #define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31) /* We can add more of these now with a 64-bit flag type */ /* * This is a mask covering all of the memory type flags; easier to just * use a single constant than a bunch of | values. It covers * DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4 */ #define DRM_BO_MASK_MEM 0x00000000FF000000ULL /* * This adds all of the CPU-mapping options in with the memory * type to label all bits which change how the page gets mapped */ #define DRM_BO_MASK_MEMTYPE (DRM_BO_MASK_MEM | \ DRM_BO_FLAG_CACHED_MAPPED | \ DRM_BO_FLAG_CACHED | \ DRM_BO_FLAG_MAPPABLE) /* Driver-private flags */ #define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL /* * Don't block on validate and map. Instead, return EBUSY. */ #define DRM_BO_HINT_DONT_BLOCK 0x00000002 /* * Don't place this buffer on the unfenced list. This means * that the buffer will not end up having a fence associated * with it as a result of this operation */ #define DRM_BO_HINT_DONT_FENCE 0x00000004 /** * On hardware with no interrupt events for operation completion, * indicates that the kernel should sleep while waiting for any blocking * operation to complete rather than spinning. * * Has no effect otherwise. */ #define DRM_BO_HINT_WAIT_LAZY 0x00000008 /* * The client has compute relocations refering to this buffer using the * offset in the presumed_offset field. If that offset ends up matching * where this buffer lands, the kernel is free to skip executing those * relocations */ #define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010 #define DRM_BO_INIT_MAGIC 0xfe769812 #define DRM_BO_INIT_MAJOR 1 #define DRM_BO_INIT_MINOR 0 #define DRM_BO_INIT_PATCH 0 struct drm_bo_info_req { uint64_t mask; uint64_t flags; unsigned int handle; unsigned int hint; unsigned int fence_class; unsigned int desired_tile_stride; unsigned int tile_info; unsigned int pad64; uint64_t presumed_offset; }; struct drm_bo_create_req { uint64_t flags; uint64_t size; uint64_t buffer_start; unsigned int hint; unsigned int page_alignment; }; /* * Reply flags */ #define DRM_BO_REP_BUSY 0x00000001 struct drm_bo_info_rep { uint64_t flags; uint64_t proposed_flags; uint64_t size; uint64_t offset; uint64_t arg_handle; uint64_t buffer_start; unsigned int handle; unsigned int fence_flags; unsigned int rep_flags; unsigned int page_alignment; unsigned int desired_tile_stride; unsigned int hw_tile_stride; unsigned int tile_info; unsigned int pad64; uint64_t expand_pad[4]; /*Future expansion */ }; struct drm_bo_arg_rep { struct drm_bo_info_rep bo_info; int ret; unsigned int pad64; }; struct drm_bo_create_arg { union { struct drm_bo_create_req req; struct drm_bo_info_rep rep; } d; }; struct drm_bo_handle_arg { unsigned int handle; }; struct drm_bo_reference_info_arg { union { struct drm_bo_handle_arg req; struct drm_bo_info_rep rep; } d; }; struct drm_bo_map_wait_idle_arg { union { struct drm_bo_info_req req; struct drm_bo_info_rep rep; } d; }; struct drm_bo_op_req { enum { drm_bo_validate, drm_bo_fence, drm_bo_ref_fence, } op; unsigned int arg_handle; struct drm_bo_info_req bo_req; }; struct drm_bo_op_arg { uint64_t next; union { struct drm_bo_op_req req; struct drm_bo_arg_rep rep; } d; int handled; unsigned int pad64; }; #define DRM_BO_MEM_LOCAL 0 #define DRM_BO_MEM_TT 1 #define DRM_BO_MEM_VRAM 2 #define DRM_BO_MEM_PRIV0 3 #define DRM_BO_MEM_PRIV1 4 #define DRM_BO_MEM_PRIV2 5 #define DRM_BO_MEM_PRIV3 6 #define DRM_BO_MEM_PRIV4 7 #define DRM_BO_MEM_TYPES 8 /* For now. */ #define DRM_BO_LOCK_UNLOCK_BM (1 << 0) #define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1) struct drm_bo_version_arg { uint32_t major; uint32_t minor; uint32_t patchlevel; }; struct drm_mm_type_arg { unsigned int mem_type; unsigned int lock_flags; }; struct drm_mm_init_arg { unsigned int magic; unsigned int major; unsigned int minor; unsigned int mem_type; uint64_t p_offset; uint64_t p_size; }; struct drm_mm_info_arg { unsigned int mem_type; uint64_t p_size; }; struct drm_gem_close { /** Handle of the object to be closed. */ uint32_t handle; uint32_t pad; }; struct drm_gem_flink { /** Handle for the object being named */ uint32_t handle; /** Returned global name */ uint32_t name; }; struct drm_gem_open { /** Name of object being opened */ uint32_t name; /** Returned handle for the object */ uint32_t handle; /** Returned size of the object */ uint64_t size; }; struct drm_get_cap { uint64_t capability; uint64_t value; }; struct drm_event { uint32_t type; uint32_t length; }; #define DRM_EVENT_VBLANK 0x01 #define DRM_EVENT_FLIP_COMPLETE 0x02 struct drm_event_vblank { struct drm_event base; uint64_t user_data; uint32_t tv_sec; uint32_t tv_usec; uint32_t sequence; uint32_t reserved; }; #define DRM_CAP_DUMB_BUFFER 0x1 #define DRM_CAP_VBLANK_HIGH_CRTC 0x2 #define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3 #define DRM_CAP_DUMB_PREFER_SHADOW 0x4 #define DRM_CAP_PRIME 0x5 #define DRM_CAP_TIMESTAMP_MONOTONIC 0x6 +#define DRM_PRIME_CAP_IMPORT 0x1 +#define DRM_PRIME_CAP_EXPORT 0x2 + #include "drm_mode.h" /** * \name Ioctls Definitions */ /*@{*/ #define DRM_IOCTL_BASE 'd' #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) #define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type) #define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type) #define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version) #define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique) #define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth) #define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid) #define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map) #define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client) #define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) #define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) #define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl) #define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close) #define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink) #define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open) #define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap) #define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) #define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) #define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block) #define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block) #define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control) #define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map) #define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc) #define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc) #define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info) #define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map) #define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free) #define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map) #define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map) #define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map) #define DRM_IOCTL_SET_MASTER DRM_IO(0x1e) #define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f) #define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx) #define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx) #define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx) #define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx) #define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx) #define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx) #define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res) #define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw) #define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw) #define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma) #define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock) #define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock) #define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock) #define DRM_IOCTL_GEM_PRIME_OPEN DRM_IOWR(0x2e, struct drm_gem_open) #define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) #define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31) #define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode) #define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info) #define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer) #define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer) #define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding) #define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding) #define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather) #define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather) #define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank) #define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) #define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res) #define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc) #define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc) #define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor) #define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut) #define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut) #define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder) #define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector) #define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd) #define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd) #define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property) #define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property) #define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob) #define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd) #define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd) #define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int) #define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip) #define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd) #define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb) #define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb) #define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb) #define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res) #define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane) #define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane) #define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2) +#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties) +#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property) #define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg) #define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg) #define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg) #define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg) #define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg) #define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg) #define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg) #define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg) #define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, struct drm_fence_arg) #define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, struct drm_fence_arg) #define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, struct drm_fence_arg) #define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg) #define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg) #define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg) #define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg) #define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg) #define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, struct drm_bo_handle_arg) #define DRM_IOCTL_BO_SETSTATUS DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg) #define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg) #define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg) #define DRM_IOCTL_BO_VERSION DRM_IOR(0xd6, struct drm_bo_version_arg) #define DRM_IOCTL_MM_INFO DRM_IOWR(0xd7, struct drm_mm_info_arg) /*@}*/ /** * Device specific ioctls should only be in their respective headers * The device specific ioctl range is from 0x40 to 0x99. * Generic IOCTLS restart at 0xA0. * * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and * drmCommandReadWrite(). */ #define DRM_COMMAND_BASE 0x40 #define DRM_COMMAND_END 0xA0 /* typedef area */ #ifndef __KERNEL__ typedef struct drm_clip_rect drm_clip_rect_t; typedef struct drm_tex_region drm_tex_region_t; typedef struct drm_hw_lock drm_hw_lock_t; typedef struct drm_version drm_version_t; typedef struct drm_unique drm_unique_t; typedef struct drm_list drm_list_t; typedef struct drm_block drm_block_t; typedef struct drm_control drm_control_t; typedef enum drm_map_type drm_map_type_t; typedef enum drm_map_flags drm_map_flags_t; typedef struct drm_ctx_priv_map drm_ctx_priv_map_t; typedef struct drm_map drm_map_t; typedef struct drm_client drm_client_t; typedef enum drm_stat_type drm_stat_type_t; typedef struct drm_stats drm_stats_t; typedef enum drm_lock_flags drm_lock_flags_t; typedef struct drm_lock drm_lock_t; typedef enum drm_dma_flags drm_dma_flags_t; typedef struct drm_buf_desc drm_buf_desc_t; typedef struct drm_buf_info drm_buf_info_t; typedef struct drm_buf_free drm_buf_free_t; typedef struct drm_buf_pub drm_buf_pub_t; typedef struct drm_buf_map drm_buf_map_t; typedef struct drm_dma drm_dma_t; typedef union drm_wait_vblank drm_wait_vblank_t; typedef struct drm_agp_mode drm_agp_mode_t; typedef enum drm_ctx_flags drm_ctx_flags_t; typedef struct drm_ctx drm_ctx_t; typedef struct drm_ctx_res drm_ctx_res_t; typedef struct drm_draw drm_draw_t; typedef struct drm_update_draw drm_update_draw_t; typedef struct drm_auth drm_auth_t; typedef struct drm_irq_busid drm_irq_busid_t; typedef enum drm_vblank_seq_type drm_vblank_seq_type_t; typedef struct drm_agp_buffer drm_agp_buffer_t; typedef struct drm_agp_binding drm_agp_binding_t; typedef struct drm_agp_info drm_agp_info_t; typedef struct drm_scatter_gather drm_scatter_gather_t; typedef struct drm_set_version drm_set_version_t; typedef struct drm_fence_arg drm_fence_arg_t; typedef struct drm_mm_type_arg drm_mm_type_arg_t; typedef struct drm_mm_init_arg drm_mm_init_arg_t; typedef enum drm_bo_type drm_bo_type_t; #endif #endif Index: head/sys/dev/drm2/drmP.h =================================================================== --- head/sys/dev/drm2/drmP.h (revision 277486) +++ head/sys/dev/drm2/drmP.h (revision 277487) @@ -1,1518 +1,1519 @@ /** * \file drmP.h * Private header for Direct Rendering Manager * * \author Rickard E. (Rik) Faith * \author Gareth Hughes */ /* * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * Copyright (c) 2009-2010, Code Aurora Forum. * All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #ifndef _DRM_P_H_ #define _DRM_P_H_ #if defined(_KERNEL) || defined(__KERNEL__) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct drm_file; struct drm_device; #include #include #include #include "opt_compat.h" #include "opt_drm.h" #include "opt_syscons.h" #ifdef DRM_DEBUG #undef DRM_DEBUG #define DRM_DEBUG_DEFAULT_ON 1 #endif /* DRM_DEBUG */ #define DRM_DEBUGBITS_DEBUG 0x1 #define DRM_DEBUGBITS_KMS 0x2 #define DRM_DEBUGBITS_FAILED_IOCTL 0x4 #undef DRM_LINUX #define DRM_LINUX 0 /***********************************************************************/ /** \name DRM template customization defaults */ /*@{*/ /* driver capabilities and requirements mask */ #define DRIVER_USE_AGP 0x1 #define DRIVER_REQUIRE_AGP 0x2 #define DRIVER_USE_MTRR 0x4 #define DRIVER_PCI_DMA 0x8 #define DRIVER_SG 0x10 #define DRIVER_HAVE_DMA 0x20 #define DRIVER_HAVE_IRQ 0x40 #define DRIVER_IRQ_SHARED 0x80 #define DRIVER_IRQ_VBL 0x100 #define DRIVER_DMA_QUEUE 0x200 #define DRIVER_FB_DMA 0x400 #define DRIVER_IRQ_VBL2 0x800 #define DRIVER_GEM 0x1000 #define DRIVER_MODESET 0x2000 #define DRIVER_PRIME 0x4000 #define DRIVER_LOCKLESS_IRQ 0x8000 #define DRM_HASH_SIZE 16 /* Size of key hash table */ #define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */ #define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */ #define DRM_GEM_MAPPING_MASK (3ULL << 62) #define DRM_GEM_MAPPING_KEY (2ULL << 62) /* Non-canonical address form */ #define DRM_GEM_MAX_IDX 0x3fffff #define DRM_GEM_MAPPING_IDX(o) (((o) >> 40) & DRM_GEM_MAX_IDX) #define DRM_GEM_MAPPING_OFF(i) (((uint64_t)(i)) << 40) #define DRM_GEM_MAPPING_MAPOFF(o) \ ((o) & ~(DRM_GEM_MAPPING_OFF(DRM_GEM_MAX_IDX) | DRM_GEM_MAPPING_KEY)) MALLOC_DECLARE(DRM_MEM_DMA); MALLOC_DECLARE(DRM_MEM_SAREA); MALLOC_DECLARE(DRM_MEM_DRIVER); MALLOC_DECLARE(DRM_MEM_MAGIC); MALLOC_DECLARE(DRM_MEM_IOCTLS); MALLOC_DECLARE(DRM_MEM_MAPS); MALLOC_DECLARE(DRM_MEM_BUFS); MALLOC_DECLARE(DRM_MEM_SEGS); MALLOC_DECLARE(DRM_MEM_PAGES); MALLOC_DECLARE(DRM_MEM_FILES); MALLOC_DECLARE(DRM_MEM_QUEUES); MALLOC_DECLARE(DRM_MEM_CMDS); MALLOC_DECLARE(DRM_MEM_MAPPINGS); MALLOC_DECLARE(DRM_MEM_BUFLISTS); MALLOC_DECLARE(DRM_MEM_AGPLISTS); MALLOC_DECLARE(DRM_MEM_CTXBITMAP); MALLOC_DECLARE(DRM_MEM_SGLISTS); MALLOC_DECLARE(DRM_MEM_DRAWABLE); MALLOC_DECLARE(DRM_MEM_MM); MALLOC_DECLARE(DRM_MEM_HASHTAB); MALLOC_DECLARE(DRM_MEM_KMS); SYSCTL_DECL(_hw_drm); #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) /***********************************************************************/ /** \name Internal types and structures */ /*@{*/ #define DRM_ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) #define DRM_MIN(a,b) ((a)<(b)?(a):(b)) #define DRM_MAX(a,b) ((a)>(b)?(a):(b)) #define DRM_IF_VERSION(maj, min) (maj << 16 | min) #define __OS_HAS_AGP 1 #define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP) #define DRM_DEV_UID 0 #define DRM_DEV_GID 0 #define wait_queue_head_t atomic_t #define DRM_WAKEUP(w) wakeup((void *)w) #define DRM_WAKEUP_INT(w) wakeup(w) #define DRM_INIT_WAITQUEUE(queue) do {(void)(queue);} while (0) #define DRM_CURPROC curthread #define DRM_STRUCTPROC struct thread #define DRM_SPINTYPE struct mtx #define DRM_SPININIT(l,name) mtx_init(l, name, NULL, MTX_DEF) #define DRM_SPINUNINIT(l) mtx_destroy(l) #define DRM_SPINLOCK(l) mtx_lock(l) #define DRM_SPINUNLOCK(u) mtx_unlock(u) #define DRM_SPINLOCK_IRQSAVE(l, irqflags) do { \ mtx_lock(l); \ (void)irqflags; \ } while (0) #define DRM_SPINUNLOCK_IRQRESTORE(u, irqflags) mtx_unlock(u) #define DRM_SPINLOCK_ASSERT(l) mtx_assert(l, MA_OWNED) #define DRM_CURRENTPID curthread->td_proc->p_pid #define DRM_LOCK(dev) sx_xlock(&(dev)->dev_struct_lock) #define DRM_UNLOCK(dev) sx_xunlock(&(dev)->dev_struct_lock) #define DRM_LOCK_SLEEP(dev, chan, flags, msg, timeout) \ (sx_sleep((chan), &(dev)->dev_struct_lock, (flags), (msg), (timeout))) #if defined(INVARIANTS) #define DRM_LOCK_ASSERT(dev) sx_assert(&(dev)->dev_struct_lock, SA_XLOCKED) #define DRM_UNLOCK_ASSERT(dev) sx_assert(&(dev)->dev_struct_lock, SA_UNLOCKED) #else #define DRM_LOCK_ASSERT(d) #define DRM_UNLOCK_ASSERT(d) #endif #define DRM_SYSCTL_HANDLER_ARGS (SYSCTL_HANDLER_ARGS) #define DRM_IRQ_ARGS void *arg typedef void irqreturn_t; #define IRQ_HANDLED /* nothing */ #define IRQ_NONE /* nothing */ enum { DRM_IS_NOT_AGP, DRM_IS_AGP, DRM_MIGHT_BE_AGP }; #define DRM_AGP_MEM struct agp_memory_info #define drm_get_device_from_kdev(_kdev) (_kdev->si_drv1) #define PAGE_ALIGN(addr) round_page(addr) /* DRM_SUSER returns true if the user is superuser */ #define DRM_SUSER(p) (priv_check(p, PRIV_DRIVER) == 0) #define DRM_AGP_FIND_DEVICE() agp_find_device() #define DRM_MTRR_WC MDF_WRITECOMBINE #define jiffies ticks #define jiffies_to_msecs(x) (((int64_t)(x)) * 1000 / hz) #define msecs_to_jiffies(x) (((int64_t)(x)) * hz / 1000) #define time_after(a,b) ((long)(b) - (long)(a) < 0) #define time_after_eq(a,b) ((long)(b) - (long)(a) <= 0) #define drm_msleep(x, msg) pause((msg), ((int64_t)(x)) * hz / 1000) /* DRM_READMEMORYBARRIER() prevents reordering of reads. * DRM_WRITEMEMORYBARRIER() prevents reordering of writes. * DRM_MEMORYBARRIER() prevents reordering of reads and writes. */ #define DRM_READMEMORYBARRIER() rmb() #define DRM_WRITEMEMORYBARRIER() wmb() #define DRM_MEMORYBARRIER() mb() #define DRM_READ8(map, offset) \ *(volatile u_int8_t *)(((vm_offset_t)(map)->virtual) + \ (vm_offset_t)(offset)) #define DRM_READ16(map, offset) \ le16toh(*(volatile u_int16_t *)(((vm_offset_t)(map)->virtual) + \ (vm_offset_t)(offset))) #define DRM_READ32(map, offset) \ le32toh(*(volatile u_int32_t *)(((vm_offset_t)(map)->virtual) + \ (vm_offset_t)(offset))) #define DRM_READ64(map, offset) \ le64toh(*(volatile u_int64_t *)(((vm_offset_t)(map)->virtual) + \ (vm_offset_t)(offset))) #define DRM_WRITE8(map, offset, val) \ *(volatile u_int8_t *)(((vm_offset_t)(map)->virtual) + \ (vm_offset_t)(offset)) = val #define DRM_WRITE16(map, offset, val) \ *(volatile u_int16_t *)(((vm_offset_t)(map)->virtual) + \ (vm_offset_t)(offset)) = htole16(val) #define DRM_WRITE32(map, offset, val) \ *(volatile u_int32_t *)(((vm_offset_t)(map)->virtual) + \ (vm_offset_t)(offset)) = htole32(val) #define DRM_WRITE64(map, offset, val) \ *(volatile u_int64_t *)(((vm_offset_t)(map)->virtual) + \ (vm_offset_t)(offset)) = htole64(val) #define DRM_VERIFYAREA_READ( uaddr, size ) \ (!useracc(__DECONST(caddr_t, uaddr), size, VM_PROT_READ)) #define DRM_COPY_TO_USER(user, kern, size) \ copyout(kern, user, size) #define DRM_COPY_FROM_USER(kern, user, size) \ copyin(user, kern, size) #define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \ copyin(arg2, arg1, arg3) #define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \ copyout(arg2, arg1, arg3) #define DRM_GET_USER_UNCHECKED(val, uaddr) \ ((val) = fuword32(uaddr), 0) #define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \ (_map) = (_dev)->context_sareas[_ctx]; \ } while(0) #define LOCK_TEST_WITH_RETURN(dev, file_priv) \ do { \ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) || \ dev->lock.file_priv != file_priv) { \ DRM_ERROR("%s called without lock held\n", \ __FUNCTION__); \ return EINVAL; \ } \ } while (0) /* Returns -errno to shared code */ #define DRM_WAIT_ON( ret, queue, timeout, condition ) \ for ( ret = 0 ; !ret && !(condition) ; ) { \ DRM_UNLOCK(dev); \ mtx_lock(&dev->irq_lock); \ if (!(condition)) \ ret = -mtx_sleep(&(queue), &dev->irq_lock, \ PCATCH, "drmwtq", (timeout)); \ mtx_unlock(&dev->irq_lock); \ DRM_LOCK(dev); \ } #define DRM_ERROR(fmt, ...) \ printf("error: [" DRM_NAME ":pid%d:%s] *ERROR* " fmt, \ DRM_CURRENTPID, __func__ , ##__VA_ARGS__) #define DRM_INFO(fmt, ...) printf("info: [" DRM_NAME "] " fmt , ##__VA_ARGS__) #define DRM_DEBUG(fmt, ...) do { \ if ((drm_debug_flag & DRM_DEBUGBITS_DEBUG) != 0) \ printf("[" DRM_NAME ":pid%d:%s] " fmt, DRM_CURRENTPID, \ __func__ , ##__VA_ARGS__); \ } while (0) #define DRM_DEBUG_KMS(fmt, ...) do { \ if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0) \ printf("[" DRM_NAME ":KMS:pid%d:%s] " fmt, DRM_CURRENTPID,\ __func__ , ##__VA_ARGS__); \ } while (0) #define DRM_DEBUG_DRIVER(fmt, ...) do { \ if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0) \ printf("[" DRM_NAME ":KMS:pid%d:%s] " fmt, DRM_CURRENTPID,\ __func__ , ##__VA_ARGS__); \ } while (0) #define dev_err(dev, fmt, ...) \ device_printf((dev), "error: " fmt, ## __VA_ARGS__) #define dev_warn(dev, fmt, ...) \ device_printf((dev), "warning: " fmt, ## __VA_ARGS__) #define dev_info(dev, fmt, ...) \ device_printf((dev), "info: " fmt, ## __VA_ARGS__) #define dev_dbg(dev, fmt, ...) do { \ if ((drm_debug_flag& DRM_DEBUGBITS_KMS) != 0) { \ device_printf((dev), "debug: " fmt, ## __VA_ARGS__); \ } \ } while (0) typedef struct drm_pci_id_list { int vendor; int device; long driver_private; char *name; } drm_pci_id_list_t; struct drm_msi_blacklist_entry { int vendor; int device; }; #define DRM_AUTH 0x1 #define DRM_MASTER 0x2 #define DRM_ROOT_ONLY 0x4 #define DRM_CONTROL_ALLOW 0x8 #define DRM_UNLOCKED 0x10 typedef struct drm_ioctl_desc { unsigned long cmd; int (*func)(struct drm_device *dev, void *data, struct drm_file *file_priv); int flags; unsigned int cmd_drv; } drm_ioctl_desc_t; /** * Creates a driver or general drm_ioctl_desc array entry for the given * ioctl, for use by drm_ioctl(). */ #define DRM_IOCTL_DEF(ioctl, func, flags) \ [DRM_IOCTL_NR(ioctl)] = {ioctl, func, flags} #define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \ [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl} typedef struct drm_magic_entry { drm_magic_t magic; struct drm_file *priv; struct drm_magic_entry *next; } drm_magic_entry_t; typedef struct drm_magic_head { struct drm_magic_entry *head; struct drm_magic_entry *tail; } drm_magic_head_t; typedef struct drm_buf { int idx; /**< Index into master buflist */ int total; /**< Buffer size */ int order; /**< log-base-2(total) */ int used; /**< Amount of buffer in use (for DMA) */ unsigned long offset; /**< Byte offset (used internally) */ void *address; /**< Address of buffer */ unsigned long bus_address; /**< Bus address of buffer */ struct drm_buf *next; /**< Kernel-only: used for free list */ __volatile__ int waiting; /**< On kernel DMA queue */ __volatile__ int pending; /**< On hardware DMA queue */ struct drm_file *file_priv; /**< Private of holding file descr */ int context; /**< Kernel queue for this buffer */ int while_locked; /**< Dispatch this buffer while locked */ enum { DRM_LIST_NONE = 0, DRM_LIST_FREE = 1, DRM_LIST_WAIT = 2, DRM_LIST_PEND = 3, DRM_LIST_PRIO = 4, DRM_LIST_RECLAIM = 5 } list; /**< Which list we're on */ int dev_priv_size; /**< Size of buffer private storage */ void *dev_private; /**< Per-buffer private storage */ } drm_buf_t; typedef struct drm_freelist { int initialized; /* Freelist in use */ atomic_t count; /* Number of free buffers */ drm_buf_t *next; /* End pointer */ int low_mark; /* Low water mark */ int high_mark; /* High water mark */ } drm_freelist_t; typedef struct drm_dma_handle { void *vaddr; bus_addr_t busaddr; bus_dma_tag_t tag; bus_dmamap_t map; } drm_dma_handle_t; typedef struct drm_buf_entry { int buf_size; int buf_count; drm_buf_t *buflist; int seg_count; drm_dma_handle_t **seglist; int page_order; drm_freelist_t freelist; } drm_buf_entry_t; /* Event queued up for userspace to read */ struct drm_pending_event { struct drm_event *event; struct list_head link; struct drm_file *file_priv; pid_t pid; /* pid of requester, no guarantee it's valid by the time we deliver the event, for tracing only */ void (*destroy)(struct drm_pending_event *event); }; /* initial implementaton using a linked list - todo hashtab */ struct drm_prime_file_private { struct list_head head; #ifdef DUMBBELL_WIP struct mutex lock; #endif /* DUMBBELL_WIP */ }; typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t; struct drm_file { TAILQ_ENTRY(drm_file) link; struct drm_device *dev; int authenticated; int master; pid_t pid; uid_t uid; drm_magic_t magic; unsigned long ioctl_count; void *driver_priv; struct drm_gem_names object_names; int is_master; struct drm_master *masterp; struct list_head fbs; struct list_head event_list; int event_space; struct selinfo event_poll; struct drm_prime_file_private prime; }; typedef struct drm_lock_data { struct drm_hw_lock *hw_lock; /* Hardware lock */ struct drm_file *file_priv; /* Unique identifier of holding process (NULL is kernel)*/ int lock_queue; /* Queue of blocked processes */ unsigned long lock_time; /* Time of last lock in jiffies */ } drm_lock_data_t; /* This structure, in the struct drm_device, is always initialized while the * device * is open. dev->dma_lock protects the incrementing of dev->buf_use, which * when set marks that no further bufs may be allocated until device teardown * occurs (when the last open of the device has closed). The high/low * watermarks of bufs are only touched by the X Server, and thus not * concurrently accessed, so no locking is needed. */ typedef struct drm_device_dma { struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ int buf_count; /**< total number of buffers */ struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */ int seg_count; int page_count; /**< number of pages */ unsigned long *pagelist; /**< page list */ unsigned long byte_count; enum { _DRM_DMA_USE_AGP = 0x01, _DRM_DMA_USE_SG = 0x02, _DRM_DMA_USE_FB = 0x04, _DRM_DMA_USE_PCI_RO = 0x08 } flags; } drm_device_dma_t; typedef struct drm_agp_mem { void *handle; unsigned long bound; /* address */ int pages; struct drm_agp_mem *prev; struct drm_agp_mem *next; } drm_agp_mem_t; typedef struct drm_agp_head { device_t agpdev; struct agp_info info; const char *chipset; drm_agp_mem_t *memory; unsigned long mode; int enabled; int acquired; unsigned long base; int mtrr; int cant_use_aperture; unsigned long page_mask; } drm_agp_head_t; typedef struct drm_sg_mem { vm_offset_t vaddr; vm_paddr_t *busaddr; vm_pindex_t pages; } drm_sg_mem_t; #define DRM_MAP_HANDLE_BITS (sizeof(void *) == 4 ? 4 : 24) #define DRM_MAP_HANDLE_SHIFT (sizeof(void *) * 8 - DRM_MAP_HANDLE_BITS) typedef TAILQ_HEAD(drm_map_list, drm_local_map) drm_map_list_t; typedef struct drm_local_map { unsigned long offset; /* Physical address (0 for SAREA) */ unsigned long size; /* Physical size (bytes) */ enum drm_map_type type; /* Type of memory mapped */ enum drm_map_flags flags; /* Flags */ void *handle; /* User-space: "Handle" to pass to mmap */ /* Kernel-space: kernel-virtual address */ int mtrr; /* Boolean: MTRR used */ /* Private data */ int rid; /* PCI resource ID for bus_space */ void *virtual; /* Kernel-space: kernel-virtual address */ struct resource *bsr; bus_space_tag_t bst; bus_space_handle_t bsh; drm_dma_handle_t *dmah; TAILQ_ENTRY(drm_local_map) link; } drm_local_map_t; struct drm_vblank_info { wait_queue_head_t queue; /* vblank wait queue */ atomic_t count; /* number of VBLANK interrupts */ /* (driver must alloc the right number of counters) */ atomic_t refcount; /* number of users of vblank interrupts */ u32 last; /* protected by dev->vbl_lock, used */ /* for wraparound handling */ int enabled; /* so we don't call enable more than */ /* once per disable */ int inmodeset; /* Display driver is setting mode */ }; /* location of GART table */ #define DRM_ATI_GART_MAIN 1 #define DRM_ATI_GART_FB 2 #define DRM_ATI_GART_PCI 1 #define DRM_ATI_GART_PCIE 2 #define DRM_ATI_GART_IGP 3 struct drm_ati_pcigart_info { int gart_table_location; int gart_reg_if; void *addr; dma_addr_t bus_addr; dma_addr_t table_mask; dma_addr_t member_mask; struct drm_dma_handle *table_handle; drm_local_map_t mapping; int table_size; struct drm_dma_handle *dmah; /* handle for ATI PCIGART table */ }; typedef vm_paddr_t resource_size_t; /** * GEM specific mm private for tracking GEM objects */ struct drm_gem_mm { struct drm_open_hash offset_hash; /**< User token hash table for maps */ struct unrhdr *idxunr; }; struct drm_gem_object { /** Reference count of this object */ u_int refcount; /** Handle count of this object. Each handle also holds a reference */ u_int handle_count; /* number of handles on this object */ /** Related drm device */ struct drm_device *dev; /** File representing the shmem storage: filp in Linux parlance */ vm_object_t vm_obj; bool on_map; struct drm_hash_item map_list; /** * Size of the object, in bytes. Immutable over the object's * lifetime. */ size_t size; /** * Global name for this object, starts at 1. 0 means unnamed. * Access is covered by the object_name_lock in the related drm_device */ int name; /** * Memory domains. These monitor which caches contain read/write data * related to the object. When transitioning from one set of domains * to another, the driver is called to ensure that caches are suitably * flushed and invalidated */ uint32_t read_domains; uint32_t write_domain; /** * While validating an exec operation, the * new read/write domain values are computed here. * They will be transferred to the above values * at the point that any cache flushing occurs */ uint32_t pending_read_domains; uint32_t pending_write_domain; void *driver_private; #ifdef DUMBBELL_WIP /* dma buf exported from this GEM object */ struct dma_buf *export_dma_buf; /* dma buf attachment backing this object */ struct dma_buf_attachment *import_attach; #endif /* DUMBBELL_WIP */ }; #include "drm_crtc.h" /* per-master structure */ struct drm_master { u_int refcount; /* refcount for this master */ struct list_head head; /**< each minor contains a list of masters */ struct drm_minor *minor; /**< link back to minor we are a master for */ char *unique; /**< Unique identifier: e.g., busid */ int unique_len; /**< Length of unique field */ int unique_size; /**< amount allocated */ int blocked; /**< Blocked due to VC switch? */ /** \name Authentication */ /*@{ */ struct drm_open_hash magiclist; struct list_head magicfree; /*@} */ struct drm_lock_data lock; /**< Information on hardware lock */ void *driver_priv; /**< Private structure for driver to use */ }; /* Size of ringbuffer for vblank timestamps. Just double-buffer * in initial implementation. */ #define DRM_VBLANKTIME_RBSIZE 2 /* Flags and return codes for get_vblank_timestamp() driver function. */ #define DRM_CALLED_FROM_VBLIRQ 1 #define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0) #define DRM_VBLANKTIME_INVBL (1 << 1) /* get_scanout_position() return flags */ #define DRM_SCANOUTPOS_VALID (1 << 0) #define DRM_SCANOUTPOS_INVBL (1 << 1) #define DRM_SCANOUTPOS_ACCURATE (1 << 2) #ifndef DMA_BIT_MASK #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1) #endif #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) struct drm_driver_info { int (*load)(struct drm_device *, unsigned long flags); int (*use_msi)(struct drm_device *, unsigned long flags); int (*firstopen)(struct drm_device *); int (*open)(struct drm_device *, struct drm_file *); void (*preclose)(struct drm_device *, struct drm_file *file_priv); void (*postclose)(struct drm_device *, struct drm_file *); void (*lastclose)(struct drm_device *); int (*unload)(struct drm_device *); void (*reclaim_buffers_locked)(struct drm_device *, struct drm_file *file_priv); int (*dma_ioctl)(struct drm_device *dev, void *data, struct drm_file *file_priv); void (*dma_ready)(struct drm_device *); int (*dma_quiescent)(struct drm_device *); int (*dma_flush_block_and_flush)(struct drm_device *, int context, enum drm_lock_flags flags); int (*dma_flush_unblock)(struct drm_device *, int context, enum drm_lock_flags flags); int (*context_ctor)(struct drm_device *dev, int context); int (*context_dtor)(struct drm_device *dev, int context); int (*kernel_context_switch)(struct drm_device *dev, int old, int new); int (*kernel_context_switch_unlock)(struct drm_device *dev); void (*irq_preinstall)(struct drm_device *dev); int (*irq_postinstall)(struct drm_device *dev); void (*irq_uninstall)(struct drm_device *dev); void (*irq_handler)(DRM_IRQ_ARGS); u32 (*get_vblank_counter)(struct drm_device *dev, int crtc); int (*enable_vblank)(struct drm_device *dev, int crtc); void (*disable_vblank)(struct drm_device *dev, int crtc); int (*get_scanout_position)(struct drm_device *dev, int crtc, int *vpos, int *hpos); int (*get_vblank_timestamp)(struct drm_device *dev, int crtc, int *max_error, struct timeval *vblank_time, unsigned flags); int (*gem_init_object)(struct drm_gem_object *obj); void (*gem_free_object)(struct drm_gem_object *obj); int (*gem_open_object)(struct drm_gem_object *, struct drm_file *); void (*gem_close_object)(struct drm_gem_object *, struct drm_file *); struct cdev_pager_ops *gem_pager_ops; int (*dumb_create)(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); int (*dumb_map_offset)(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset); int (*dumb_destroy)(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle); int (*sysctl_init)(struct drm_device *dev, struct sysctl_ctx_list *ctx, struct sysctl_oid *top); void (*sysctl_cleanup)(struct drm_device *dev); drm_pci_id_list_t *id_entry; /* PCI ID, name, and chipset private */ /** * Called by \c drm_device_is_agp. Typically used to determine if a * card is really attached to AGP or not. * * \param dev DRM device handle * * \returns * One of three values is returned depending on whether or not the * card is absolutely \b not AGP (return of 0), absolutely \b is AGP * (return of 1), or may or may not be AGP (return of 2). */ int (*device_is_agp) (struct drm_device * dev); drm_ioctl_desc_t *ioctls; #ifdef COMPAT_FREEBSD32 drm_ioctl_desc_t *compat_ioctls; int *compat_ioctls_nr; #endif int max_ioctl; int buf_priv_size; int major; int minor; int patchlevel; const char *name; /* Simple driver name */ const char *desc; /* Longer driver name */ const char *date; /* Date of last major changes. */ u32 driver_features; }; /** * DRM minor structure. This structure represents a drm minor number. */ struct drm_minor { int index; /**< Minor device number */ int type; /**< Control or render */ device_t kdev; /**< OS device */ struct drm_device *dev; struct drm_master *master; /* currently active master for this node */ struct list_head master_list; struct drm_mode_group mode_group; }; /* mode specified on the command line */ struct drm_cmdline_mode { bool specified; bool refresh_specified; bool bpp_specified; int xres, yres; int bpp; int refresh; bool rb; bool interlace; bool cvt; bool margins; enum drm_connector_force force; }; struct drm_pending_vblank_event { struct drm_pending_event base; int pipe; struct drm_event_vblank event; }; /* Length for the array of resource pointers for drm_get_resource_*. */ #define DRM_MAX_PCI_RESOURCE 6 /** * DRM device structure. This structure represent a complete card that * may contain multiple heads. */ struct drm_device { struct drm_driver_info *driver; drm_pci_id_list_t *id_entry; /* PCI ID, name, and chipset private */ uint16_t pci_device; /* PCI device id */ uint16_t pci_vendor; /* PCI vendor id */ uint16_t pci_subdevice; /* PCI subsystem device id */ uint16_t pci_subvendor; /* PCI subsystem vendor id */ char *unique; /* Unique identifier: e.g., busid */ int unique_len; /* Length of unique field */ device_t device; /* Device instance from newbus */ struct cdev *devnode; /* Device number for mknod */ int if_version; /* Highest interface version set */ int flags; /* Flags to open(2) */ /* Locks */ struct mtx dma_lock; /* protects dev->dma */ struct mtx irq_lock; /* protects irq condition checks */ struct mtx dev_lock; /* protects everything else */ struct sx dev_struct_lock; DRM_SPINTYPE drw_lock; /* Usage Counters */ int open_count; /* Outstanding files open */ int buf_use; /* Buffers in use -- cannot alloc */ /* Performance counters */ unsigned long counters; enum drm_stat_type types[15]; atomic_t counts[15]; /* Authentication */ drm_file_list_t files; drm_magic_head_t magiclist[DRM_HASH_SIZE]; /* Linked list of mappable regions. Protected by dev_lock */ drm_map_list_t maplist; struct unrhdr *map_unrhdr; drm_local_map_t **context_sareas; int max_context; drm_lock_data_t lock; /* Information on hardware lock */ /* DMA queues (contexts) */ drm_device_dma_t *dma; /* Optional pointer for DMA support */ /* Context support */ int irq; /* Interrupt used by board */ int irq_enabled; /* True if the irq handler is enabled */ int msi_enabled; /* MSI enabled */ int irqrid; /* Interrupt used by board */ struct resource *irqr; /* Resource for interrupt used by board */ void *irqh; /* Handle from bus_setup_intr */ /* Storage of resource pointers for drm_get_resource_* */ struct resource *pcir[DRM_MAX_PCI_RESOURCE]; int pcirid[DRM_MAX_PCI_RESOURCE]; int pci_domain; int pci_bus; int pci_slot; int pci_func; atomic_t context_flag; /* Context swapping flag */ int last_context; /* Last current context */ int num_crtcs; struct sigio *buf_sigio; /* Processes waiting for SIGIO */ /* Sysctl support */ struct drm_sysctl_info *sysctl; int sysctl_node_idx; drm_agp_head_t *agp; drm_sg_mem_t *sg; /* Scatter gather memory */ unsigned long *ctx_bitmap; void *dev_private; unsigned int agp_buffer_token; drm_local_map_t *agp_buffer_map; struct drm_minor *control; /**< Control node for card */ struct drm_minor *primary; /**< render type primary screen head */ void *drm_ttm_bdev; struct unrhdr *drw_unrhdr; /* RB tree of drawable infos */ RB_HEAD(drawable_tree, bsd_drm_drawable_info) drw_head; int vblank_disable_allowed; atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */ struct timeval *_vblank_time; /**< timestamp of current vblank_count (drivers must alloc right number of fields) */ struct mtx vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */ struct mtx vbl_lock; atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */ u32 *last_vblank; /* protected by dev->vbl_lock, used */ /* for wraparound handling */ int *vblank_enabled; /* so we don't call enable more than once per disable */ int *vblank_inmodeset; /* Display driver is setting mode */ u32 *last_vblank_wait; /* Last vblank seqno waited per CRTC */ struct callout vblank_disable_callout; u32 max_vblank_count; /**< size of vblank counter register */ struct list_head vblank_event_list; struct mtx event_lock; struct drm_mode_config mode_config; /**< Current mode config */ /* GEM part */ struct sx object_name_lock; struct drm_gem_names object_names; void *mm_private; void *sysctl_private; char busid_str[128]; int modesetting; int switch_power_state; }; #define DRM_SWITCH_POWER_ON 0 #define DRM_SWITCH_POWER_OFF 1 #define DRM_SWITCH_POWER_CHANGING 2 static __inline__ int drm_core_check_feature(struct drm_device *dev, int feature) { return ((dev->driver->driver_features & feature) ? 1 : 0); } #if __OS_HAS_AGP static inline int drm_core_has_AGP(struct drm_device *dev) { return drm_core_check_feature(dev, DRIVER_USE_AGP); } #else #define drm_core_has_AGP(dev) (0) #endif enum dmi_field { DMI_NONE, DMI_BIOS_VENDOR, DMI_BIOS_VERSION, DMI_BIOS_DATE, DMI_SYS_VENDOR, DMI_PRODUCT_NAME, DMI_PRODUCT_VERSION, DMI_PRODUCT_SERIAL, DMI_PRODUCT_UUID, DMI_BOARD_VENDOR, DMI_BOARD_NAME, DMI_BOARD_VERSION, DMI_BOARD_SERIAL, DMI_BOARD_ASSET_TAG, DMI_CHASSIS_VENDOR, DMI_CHASSIS_TYPE, DMI_CHASSIS_VERSION, DMI_CHASSIS_SERIAL, DMI_CHASSIS_ASSET_TAG, DMI_STRING_MAX, }; struct dmi_strmatch { unsigned char slot; char substr[79]; }; struct dmi_system_id { int (*callback)(const struct dmi_system_id *); const char *ident; struct dmi_strmatch matches[4]; }; #define DMI_MATCH(a, b) {(a), (b)} bool dmi_check_system(const struct dmi_system_id *); extern int drm_debug_flag; extern int drm_notyet_flag; extern unsigned int drm_vblank_offdelay; extern unsigned int drm_timestamp_precision; extern unsigned int drm_timestamp_monotonic; /* Device setup support (drm_drv.c) */ int drm_probe(device_t kdev, drm_pci_id_list_t *idlist); int drm_attach(device_t kdev, drm_pci_id_list_t *idlist); int drm_create_cdevs(device_t kdev); void drm_close(void *data); int drm_detach(device_t kdev); d_ioctl_t drm_ioctl; d_open_t drm_open; d_read_t drm_read; d_poll_t drm_poll; d_mmap_t drm_mmap; extern drm_local_map_t *drm_getsarea(struct drm_device *dev); void drm_event_wakeup(struct drm_pending_event *e); int drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx, struct sysctl_oid *top); /* File operations helpers (drm_fops.c) */ extern int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p, struct drm_device *dev); #ifdef DUMBBELL_WIP extern int drm_gem_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd); extern int drm_gem_prime_fd_to_handle(struct drm_device *dev, struct drm_file *file_priv, int prime_fd, uint32_t *handle); extern int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); #ifdef DUMBBELL_WIP /* * See drm_prime.c * -- dumbbell@ */ extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, vm_page_t *pages, dma_addr_t *addrs, int max_pages); #endif /* DUMBBELL_WIP */ extern struct sg_table *drm_prime_pages_to_sg(vm_page_t *pages, int nr_pages); extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg); void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv); void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv); int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle); int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle); void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf); int drm_prime_add_dma_buf(struct drm_device *dev, struct drm_gem_object *obj); int drm_prime_lookup_obj(struct drm_device *dev, struct dma_buf *buf, struct drm_gem_object **obj); #endif /* DUMBBELL_WIP */ /* Memory management support (drm_memory.c) */ void drm_mem_init(void); void drm_mem_uninit(void); void *drm_ioremap_wc(struct drm_device *dev, drm_local_map_t *map); void *drm_ioremap(struct drm_device *dev, drm_local_map_t *map); void drm_ioremapfree(drm_local_map_t *map); int drm_mtrr_add(unsigned long offset, size_t size, int flags); int drm_mtrr_del(int handle, unsigned long offset, size_t size, int flags); int drm_context_switch(struct drm_device *dev, int old, int new); int drm_context_switch_complete(struct drm_device *dev, int new); int drm_ctxbitmap_init(struct drm_device *dev); void drm_ctxbitmap_cleanup(struct drm_device *dev); void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle); int drm_ctxbitmap_next(struct drm_device *dev); /* Locking IOCTL support (drm_lock.c) */ int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); int drm_lock_transfer(struct drm_lock_data *lock_data, unsigned int context); int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context); /* Buffer management support (drm_bufs.c) */ unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource); unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource); void drm_rmmap(struct drm_device *dev, drm_local_map_t *map); int drm_order(unsigned long size); int drm_addmap(struct drm_device *dev, unsigned long offset, unsigned long size, enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t **map_ptr); int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request); int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request); int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request); /* DMA support (drm_dma.c) */ int drm_dma_setup(struct drm_device *dev); void drm_dma_takedown(struct drm_device *dev); void drm_free_buffer(struct drm_device *dev, drm_buf_t *buf); void drm_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv); #define drm_core_reclaim_buffers drm_reclaim_buffers /* IRQ support (drm_irq.c) */ int drm_irq_install(struct drm_device *dev); int drm_irq_uninstall(struct drm_device *dev); irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); void drm_driver_irq_preinstall(struct drm_device *dev); void drm_driver_irq_postinstall(struct drm_device *dev); void drm_driver_irq_uninstall(struct drm_device *dev); void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); void drm_vblank_post_modeset(struct drm_device *dev, int crtc); int drm_modeset_ctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_vblank_init(struct drm_device *dev, int num_crtcs); extern int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *filp); extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); extern u32 drm_vblank_count(struct drm_device *dev, int crtc); extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, struct timeval *vblanktime); extern bool drm_handle_vblank(struct drm_device *dev, int crtc); void drm_handle_vblank_events(struct drm_device *dev, int crtc); extern int drm_vblank_get(struct drm_device *dev, int crtc); extern void drm_vblank_put(struct drm_device *dev, int crtc); extern void drm_vblank_off(struct drm_device *dev, int crtc); extern void drm_vblank_cleanup(struct drm_device *dev); extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, struct timeval *tvblank, unsigned flags); extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, int *max_error, struct timeval *vblank_time, unsigned flags, struct drm_crtc *refcrtc); extern void drm_calc_timestamping_constants(struct drm_crtc *crtc); struct timeval ns_to_timeval(const int64_t nsec); int64_t timeval_to_ns(const struct timeval *tv); /* AGP/PCI Express/GART support (drm_agpsupport.c) */ int drm_device_is_agp(struct drm_device *dev); int drm_device_is_pcie(struct drm_device *dev); drm_agp_head_t *drm_agp_init(void); int drm_agp_acquire(struct drm_device *dev); int drm_agp_release(struct drm_device *dev); int drm_agp_info(struct drm_device * dev, struct drm_agp_info *info); int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); void *drm_agp_allocate_memory(size_t pages, u32 type); int drm_agp_free_memory(void *handle); int drm_agp_bind_memory(void *handle, off_t start); int drm_agp_unbind_memory(void *handle); int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); /* Scatter Gather Support (drm_scatter.c) */ void drm_sg_cleanup(drm_sg_mem_t *entry); int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request); /* sysctl support (drm_sysctl.h) */ extern int drm_sysctl_init(struct drm_device *dev); extern int drm_sysctl_cleanup(struct drm_device *dev); /* ATI PCIGART support (ati_pcigart.c) */ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info); int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info); /* Cache management (drm_memory.c) */ void drm_clflush_pages(vm_page_t *pages, unsigned long num_pages); +void drm_clflush_virt_range(char *addr, unsigned long length); /* Locking IOCTL support (drm_drv.c) */ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv); /* Misc. IOCTL support (drm_ioctl.c) */ int drm_irq_by_busid(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_getunique(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_setunique(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_getmap(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_getclient(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_getstats(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_noop(struct drm_device *dev, void *data, struct drm_file *file_priv); /* Context IOCTL support (drm_context.c) */ int drm_resctx(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_addctx(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_switchctx(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_newctx(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_rmctx(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_setsareactx(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_getsareactx(struct drm_device *dev, void *data, struct drm_file *file_priv); /* Drawable IOCTL support (drm_drawable.c) */ int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_update_draw(struct drm_device *dev, void *data, struct drm_file *file_priv); struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, int handle); /* Drawable support (drm_drawable.c) */ void drm_drawable_free_all(struct drm_device *dev); /* Authentication IOCTL support (drm_auth.c) */ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_authmagic(struct drm_device *dev, void *data, struct drm_file *file_priv); /* Buffer management support (drm_bufs.c) */ int drm_addmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_rmmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv); /* DMA support (drm_dma.c) */ int drm_dma(struct drm_device *dev, void *data, struct drm_file *file_priv); /* IRQ support (drm_irq.c) */ int drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv); /* AGP/GART support (drm_agpsupport.c) */ int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_agp_release_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_agp_enable_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_agp_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_agp_free_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_agp_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); /* Stub support (drm_stub.h) */ extern int drm_setmaster_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); /* Scatter Gather Support (drm_scatter.c) */ int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_sg_free(struct drm_device *dev, void *data, struct drm_file *file_priv); /* consistent PCI memory functions (drm_pci.c) */ drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size, size_t align, dma_addr_t maxaddr); void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah); /* Graphics Execution Manager library functions (drm_gem.c) */ int drm_gem_init(struct drm_device *dev); void drm_gem_destroy(struct drm_device *dev); int drm_gem_close_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_gem_flink_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_gem_open_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_gem_handle_create(struct drm_file *file_priv, struct drm_gem_object *obj, u32 *handlep); int drm_gem_handle_delete(struct drm_file *file_priv, uint32_t handle); void drm_gem_object_handle_reference(struct drm_gem_object *obj); void drm_gem_object_handle_unreference(struct drm_gem_object *obj); void drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj); void drm_gem_object_handle_free(struct drm_gem_object *obj); void drm_gem_object_reference(struct drm_gem_object *obj); void drm_gem_object_unreference(struct drm_gem_object *obj); void drm_gem_object_unreference_unlocked(struct drm_gem_object *obj); void drm_gem_object_release(struct drm_gem_object *obj); void drm_gem_object_free(struct drm_gem_object *obj); int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, size_t size); int drm_gem_private_object_init(struct drm_device *dev, struct drm_gem_object *obj, size_t size); struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, size_t size); struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle); void drm_gem_open(struct drm_device *dev, struct drm_file *file_priv); void drm_gem_release(struct drm_device *dev, struct drm_file *file_priv); int drm_gem_create_mmap_offset(struct drm_gem_object *obj); void drm_gem_free_mmap_offset(struct drm_gem_object *obj); int drm_gem_mmap_single(struct drm_device *dev, vm_ooffset_t *offset, vm_size_t size, struct vm_object **obj_res, int nprot); void drm_gem_pager_dtr(void *obj); struct ttm_bo_device; int ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size, struct vm_object **obj_res, int nprot); struct ttm_buffer_object; void ttm_bo_release_mmap(struct ttm_buffer_object *bo); void drm_device_lock_mtx(struct drm_device *dev); void drm_device_unlock_mtx(struct drm_device *dev); int drm_device_sleep_mtx(struct drm_device *dev, void *chan, int flags, const char *msg, int timeout); void drm_device_assert_mtx_locked(struct drm_device *dev); void drm_device_assert_mtx_unlocked(struct drm_device *dev); void drm_device_lock_struct(struct drm_device *dev); void drm_device_unlock_struct(struct drm_device *dev); int drm_device_sleep_struct(struct drm_device *dev, void *chan, int flags, const char *msg, int timeout); void drm_device_assert_struct_locked(struct drm_device *dev); void drm_device_assert_struct_unlocked(struct drm_device *dev); void drm_compat_locking_init(struct drm_device *dev); void drm_sleep_locking_init(struct drm_device *dev); /* drm_modes.c */ bool drm_mode_parse_command_line_for_connector(const char *mode_option, struct drm_connector *connector, struct drm_cmdline_mode *mode); struct drm_display_mode *drm_mode_create_from_cmdline_mode( struct drm_device *dev, struct drm_cmdline_mode *cmd); /* drm_edid.c */ u8 *drm_find_cea_extension(struct edid *edid); /* Inline replacements for drm_alloc and friends */ static __inline__ void * drm_alloc(size_t size, struct malloc_type *area) { return malloc(size, area, M_NOWAIT); } static __inline__ void * drm_calloc(size_t nmemb, size_t size, struct malloc_type *area) { return malloc(size * nmemb, area, M_NOWAIT | M_ZERO); } static __inline__ void * drm_realloc(void *oldpt, size_t oldsize, size_t size, struct malloc_type *area) { return reallocf(oldpt, size, area, M_NOWAIT); } static __inline__ void drm_free(void *pt, size_t size, struct malloc_type *area) { free(pt, area); } /* Inline replacements for DRM_IOREMAP macros */ static __inline__ void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev) { map->virtual = drm_ioremap_wc(dev, map); } static __inline__ void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev) { map->virtual = drm_ioremap(dev, map); } static __inline__ void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev) { if ( map->virtual && map->size ) drm_ioremapfree(map); } static __inline__ struct drm_local_map * drm_core_findmap(struct drm_device *dev, unsigned long offset) { drm_local_map_t *map; DRM_LOCK_ASSERT(dev); TAILQ_FOREACH(map, &dev->maplist, link) { if (offset == (unsigned long)map->handle) return map; } return NULL; } static __inline__ void drm_core_dropmap(struct drm_map *map) { } #define DRM_PCIE_SPEED_25 1 #define DRM_PCIE_SPEED_50 2 #define DRM_PCIE_SPEED_80 4 extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask); #define drm_can_sleep() (DRM_HZ & 1) #endif /* __KERNEL__ */ #endif /* _DRM_P_H_ */ Index: head/sys/dev/drm2/drm_crtc.c =================================================================== --- head/sys/dev/drm2/drm_crtc.c (revision 277486) +++ head/sys/dev/drm2/drm_crtc.c (revision 277487) @@ -1,3419 +1,3771 @@ /* * Copyright (c) 2006-2008 Intel Corporation * Copyright (c) 2007 Dave Airlie * Copyright (c) 2008 Red Hat Inc. * * DRM core CRTC related functions * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. * * Authors: * Keith Packard * Eric Anholt * Dave Airlie * Jesse Barnes */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include /* Avoid boilerplate. I'm tired of typing. */ #define DRM_ENUM_NAME_FN(fnname, list) \ char *fnname(int val) \ { \ int i; \ for (i = 0; i < DRM_ARRAY_SIZE(list); i++) { \ if (list[i].type == val) \ return list[i].name; \ } \ return "(unknown)"; \ } /* * Global properties */ static struct drm_prop_enum_list drm_dpms_enum_list[] = { { DRM_MODE_DPMS_ON, "On" }, { DRM_MODE_DPMS_STANDBY, "Standby" }, { DRM_MODE_DPMS_SUSPEND, "Suspend" }, { DRM_MODE_DPMS_OFF, "Off" } }; DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list) /* * Optional properties */ static struct drm_prop_enum_list drm_scaling_mode_enum_list[] = { { DRM_MODE_SCALE_NONE, "None" }, { DRM_MODE_SCALE_FULLSCREEN, "Full" }, { DRM_MODE_SCALE_CENTER, "Center" }, { DRM_MODE_SCALE_ASPECT, "Full aspect" }, }; static struct drm_prop_enum_list drm_dithering_mode_enum_list[] = { { DRM_MODE_DITHERING_OFF, "Off" }, { DRM_MODE_DITHERING_ON, "On" }, { DRM_MODE_DITHERING_AUTO, "Automatic" }, }; /* * Non-global properties, but "required" for certain connectors. */ static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = { { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */ { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */ { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */ }; DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list) static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = { { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */ { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */ { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */ }; DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name, drm_dvi_i_subconnector_enum_list) static struct drm_prop_enum_list drm_tv_select_enum_list[] = { { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */ { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */ }; DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list) static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = { { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */ { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */ }; DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, drm_tv_subconnector_enum_list) static struct drm_prop_enum_list drm_dirty_info_enum_list[] = { { DRM_MODE_DIRTY_OFF, "Off" }, { DRM_MODE_DIRTY_ON, "On" }, { DRM_MODE_DIRTY_ANNOTATE, "Annotate" }, }; DRM_ENUM_NAME_FN(drm_get_dirty_info_name, drm_dirty_info_enum_list) struct drm_conn_prop_enum_list { int type; char *name; int count; }; /* * Connector and encoder types. */ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = { { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 }, { DRM_MODE_CONNECTOR_VGA, "VGA", 0 }, { DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 }, { DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 }, { DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 }, { DRM_MODE_CONNECTOR_Composite, "Composite", 0 }, { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 }, { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 }, { DRM_MODE_CONNECTOR_Component, "Component", 0 }, { DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 }, { DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 }, { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 }, { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 }, { DRM_MODE_CONNECTOR_TV, "TV", 0 }, { DRM_MODE_CONNECTOR_eDP, "eDP", 0 }, }; static struct drm_prop_enum_list drm_encoder_enum_list[] = { { DRM_MODE_ENCODER_NONE, "None" }, { DRM_MODE_ENCODER_DAC, "DAC" }, { DRM_MODE_ENCODER_TMDS, "TMDS" }, { DRM_MODE_ENCODER_LVDS, "LVDS" }, { DRM_MODE_ENCODER_TVDAC, "TV" }, }; static void drm_property_destroy_blob(struct drm_device *dev, struct drm_property_blob *blob); char *drm_get_encoder_name(struct drm_encoder *encoder) { static char buf[32]; snprintf(buf, 32, "%s-%d", drm_encoder_enum_list[encoder->encoder_type].name, encoder->base.id); return buf; } char *drm_get_connector_name(struct drm_connector *connector) { static char buf[32]; snprintf(buf, 32, "%s-%d", drm_connector_enum_list[connector->connector_type].name, connector->connector_type_id); return buf; } char *drm_get_connector_status_name(enum drm_connector_status status) { if (status == connector_status_connected) return "connected"; else if (status == connector_status_disconnected) return "disconnected"; else return "unknown"; } /** * drm_mode_object_get - allocate a new identifier * @dev: DRM device * @ptr: object pointer, used to generate unique ID * @type: object type * * LOCKING: * * Create a unique identifier based on @ptr in @dev's identifier space. Used * for tracking modes, CRTCs and connectors. * * RETURNS: * New unique (relative to other objects in @dev) integer identifier for the * object. */ static int drm_mode_object_get(struct drm_device *dev, struct drm_mode_object *obj, uint32_t obj_type) { int new_id; int ret; new_id = 0; ret = drm_gem_name_create(&dev->mode_config.crtc_names, obj, &new_id); if (ret != 0) return (ret); obj->id = new_id; obj->type = obj_type; return 0; } /** * drm_mode_object_put - free an identifer * @dev: DRM device * @id: ID to free * * LOCKING: * Caller must hold DRM mode_config lock. * * Free @id from @dev's unique identifier pool. */ static void drm_mode_object_put(struct drm_device *dev, struct drm_mode_object *object) { drm_gem_names_remove(&dev->mode_config.crtc_names, object->id); } struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type) { struct drm_mode_object *obj; obj = drm_gem_name_ref(&dev->mode_config.crtc_names, id, NULL); if (!obj || (obj->type != type) || (obj->id != id)) obj = NULL; return obj; } /** * drm_framebuffer_init - initialize a framebuffer * @dev: DRM device * * LOCKING: * Caller must hold mode config lock. * * Allocates an ID for the framebuffer's parent mode object, sets its mode * functions & device file and adds it to the master fd list. * * RETURNS: * Zero on success, error code on failure. */ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, const struct drm_framebuffer_funcs *funcs) { int ret; DRM_MODE_CONFIG_ASSERT_LOCKED(dev); ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB); if (ret) return ret; fb->dev = dev; fb->funcs = funcs; dev->mode_config.num_fb++; list_add(&fb->head, &dev->mode_config.fb_list); return 0; } /** * drm_framebuffer_cleanup - remove a framebuffer object * @fb: framebuffer to remove * * LOCKING: * Caller must hold mode config lock. * * Scans all the CRTCs in @dev's mode_config. If they're using @fb, removes * it, setting it to NULL. */ void drm_framebuffer_cleanup(struct drm_framebuffer *fb) { struct drm_device *dev = fb->dev; struct drm_crtc *crtc; struct drm_plane *plane; struct drm_mode_set set; int ret; DRM_MODE_CONFIG_ASSERT_LOCKED(dev); /* remove from any CRTC */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { if (crtc->fb == fb) { /* should turn off the crtc */ memset(&set, 0, sizeof(struct drm_mode_set)); set.crtc = crtc; set.fb = NULL; ret = crtc->funcs->set_config(&set); if (ret) DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc); } } list_for_each_entry(plane, &dev->mode_config.plane_list, head) { if (plane->fb == fb) { /* should turn off the crtc */ ret = plane->funcs->disable_plane(plane); if (ret) DRM_ERROR("failed to disable plane with busy fb\n"); /* disconnect the plane from the fb and crtc: */ plane->fb = NULL; plane->crtc = NULL; } } drm_mode_object_put(dev, &fb->base); list_del(&fb->head); dev->mode_config.num_fb--; } /** * drm_crtc_init - Initialise a new CRTC object * @dev: DRM device * @crtc: CRTC object to init * @funcs: callbacks for the new CRTC * * LOCKING: - * Caller must hold mode config lock. + * Takes mode_config lock. * * Inits a new object created as base part of an driver crtc object. * * RETURNS: * Zero on success, error code on failure. */ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, const struct drm_crtc_funcs *funcs) { int ret; crtc->dev = dev; crtc->funcs = funcs; sx_xlock(&dev->mode_config.mutex); ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC); if (ret) goto out; + crtc->base.properties = &crtc->properties; + list_add_tail(&crtc->head, &dev->mode_config.crtc_list); dev->mode_config.num_crtc++; + out: sx_xunlock(&dev->mode_config.mutex); return ret; } /** * drm_crtc_cleanup - Cleans up the core crtc usage. * @crtc: CRTC to cleanup * * LOCKING: * Caller must hold mode config lock. * * Cleanup @crtc. Removes from drm modesetting space * does NOT free object, caller does that. */ void drm_crtc_cleanup(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; DRM_MODE_CONFIG_ASSERT_LOCKED(dev); if (crtc->gamma_store) { free(crtc->gamma_store, DRM_MEM_KMS); crtc->gamma_store = NULL; } drm_mode_object_put(dev, &crtc->base); list_del(&crtc->head); dev->mode_config.num_crtc--; } /** * drm_mode_probed_add - add a mode to a connector's probed mode list * @connector: connector the new mode * @mode: mode data * * LOCKING: * Caller must hold mode config lock. * * Add @mode to @connector's mode list for later use. */ void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode) { DRM_MODE_CONFIG_ASSERT_LOCKED(connector->dev); list_add(&mode->head, &connector->probed_modes); } /** * drm_mode_remove - remove and free a mode * @connector: connector list to modify * @mode: mode to remove * * LOCKING: * Caller must hold mode config lock. * * Remove @mode from @connector's mode list, then free it. */ void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode) { DRM_MODE_CONFIG_ASSERT_LOCKED(connector->dev); list_del(&mode->head); drm_mode_destroy(connector->dev, mode); } /** * drm_connector_init - Init a preallocated connector * @dev: DRM device * @connector: the connector to init * @funcs: callbacks for this connector * @name: user visible name of the connector * * LOCKING: * Takes mode config lock. * * Initialises a preallocated connector. Connectors should be * subclassed as part of driver connector objects. * * RETURNS: * Zero on success, error code on failure. */ int drm_connector_init(struct drm_device *dev, struct drm_connector *connector, const struct drm_connector_funcs *funcs, int connector_type) { int ret; sx_xlock(&dev->mode_config.mutex); ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR); if (ret) goto out; + connector->base.properties = &connector->properties; connector->dev = dev; connector->funcs = funcs; connector->connector_type = connector_type; connector->connector_type_id = ++drm_connector_enum_list[connector_type].count; /* TODO */ INIT_LIST_HEAD(&connector->user_modes); INIT_LIST_HEAD(&connector->probed_modes); INIT_LIST_HEAD(&connector->modes); connector->edid_blob_ptr = NULL; list_add_tail(&connector->head, &dev->mode_config.connector_list); dev->mode_config.num_connector++; drm_connector_attach_property(connector, dev->mode_config.edid_property, 0); drm_connector_attach_property(connector, dev->mode_config.dpms_property, 0); out: sx_xunlock(&dev->mode_config.mutex); return ret; } /** * drm_connector_cleanup - cleans up an initialised connector * @connector: connector to cleanup * * LOCKING: * Takes mode config lock. * * Cleans up the connector but doesn't free the object. */ void drm_connector_cleanup(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_display_mode *mode, *t; list_for_each_entry_safe(mode, t, &connector->probed_modes, head) drm_mode_remove(connector, mode); list_for_each_entry_safe(mode, t, &connector->modes, head) drm_mode_remove(connector, mode); list_for_each_entry_safe(mode, t, &connector->user_modes, head) drm_mode_remove(connector, mode); sx_xlock(&dev->mode_config.mutex); if (connector->edid_blob_ptr) drm_property_destroy_blob(dev, connector->edid_blob_ptr); drm_mode_object_put(dev, &connector->base); list_del(&connector->head); dev->mode_config.num_connector--; sx_xunlock(&dev->mode_config.mutex); } int drm_encoder_init(struct drm_device *dev, struct drm_encoder *encoder, const struct drm_encoder_funcs *funcs, int encoder_type) { int ret; sx_xlock(&dev->mode_config.mutex); ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER); if (ret) goto out; encoder->dev = dev; encoder->encoder_type = encoder_type; encoder->funcs = funcs; list_add_tail(&encoder->head, &dev->mode_config.encoder_list); dev->mode_config.num_encoder++; out: sx_xunlock(&dev->mode_config.mutex); return ret; } void drm_encoder_cleanup(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; sx_xlock(&dev->mode_config.mutex); drm_mode_object_put(dev, &encoder->base); list_del(&encoder->head); dev->mode_config.num_encoder--; sx_xunlock(&dev->mode_config.mutex); } int drm_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, const struct drm_plane_funcs *funcs, const uint32_t *formats, uint32_t format_count, bool priv) { int ret; sx_xlock(&dev->mode_config.mutex); ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE); if (ret) goto out; + plane->base.properties = &plane->properties; plane->dev = dev; plane->funcs = funcs; plane->format_types = malloc(sizeof(uint32_t) * format_count, DRM_MEM_KMS, M_WAITOK); memcpy(plane->format_types, formats, format_count * sizeof(uint32_t)); plane->format_count = format_count; plane->possible_crtcs = possible_crtcs; /* private planes are not exposed to userspace, but depending on * display hardware, might be convenient to allow sharing programming * for the scanout engine with the crtc implementation. */ if (!priv) { list_add_tail(&plane->head, &dev->mode_config.plane_list); dev->mode_config.num_plane++; } else { INIT_LIST_HEAD(&plane->head); } out: sx_xunlock(&dev->mode_config.mutex); return ret; } void drm_plane_cleanup(struct drm_plane *plane) { struct drm_device *dev = plane->dev; sx_xlock(&dev->mode_config.mutex); free(plane->format_types, DRM_MEM_KMS); drm_mode_object_put(dev, &plane->base); /* if not added to a list, it must be a private plane */ if (!list_empty(&plane->head)) { list_del(&plane->head); dev->mode_config.num_plane--; } sx_xunlock(&dev->mode_config.mutex); } /** * drm_mode_create - create a new display mode * @dev: DRM device * * LOCKING: * Caller must hold DRM mode_config lock. * * Create a new drm_display_mode, give it an ID, and return it. * * RETURNS: * Pointer to new mode on success, NULL on error. */ struct drm_display_mode *drm_mode_create(struct drm_device *dev) { struct drm_display_mode *nmode; nmode = malloc(sizeof(struct drm_display_mode), DRM_MEM_KMS, M_WAITOK | M_ZERO); if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) { free(nmode, DRM_MEM_KMS); return (NULL); } return nmode; } /** * drm_mode_destroy - remove a mode * @dev: DRM device * @mode: mode to remove * * LOCKING: * Caller must hold mode config lock. * * Free @mode's unique identifier, then free it. */ void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode) { if (!mode) return; drm_mode_object_put(dev, &mode->base); free(mode, DRM_MEM_KMS); } static int drm_mode_create_standard_connector_properties(struct drm_device *dev) { struct drm_property *edid; struct drm_property *dpms; /* * Standard properties (apply to all connectors) */ edid = drm_property_create(dev, DRM_MODE_PROP_BLOB | DRM_MODE_PROP_IMMUTABLE, "EDID", 0); dev->mode_config.edid_property = edid; dpms = drm_property_create_enum(dev, 0, "DPMS", drm_dpms_enum_list, DRM_ARRAY_SIZE(drm_dpms_enum_list)); dev->mode_config.dpms_property = dpms; return 0; } /** * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties * @dev: DRM device * * Called by a driver the first time a DVI-I connector is made. */ int drm_mode_create_dvi_i_properties(struct drm_device *dev) { struct drm_property *dvi_i_selector; struct drm_property *dvi_i_subconnector; if (dev->mode_config.dvi_i_select_subconnector_property) return 0; dvi_i_selector = drm_property_create_enum(dev, 0, "select subconnector", drm_dvi_i_select_enum_list, DRM_ARRAY_SIZE(drm_dvi_i_select_enum_list)); dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector; dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, "subconnector", drm_dvi_i_subconnector_enum_list, DRM_ARRAY_SIZE(drm_dvi_i_subconnector_enum_list)); dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector; return 0; } /** * drm_create_tv_properties - create TV specific connector properties * @dev: DRM device * @num_modes: number of different TV formats (modes) supported * @modes: array of pointers to strings containing name of each format * * Called by a driver's TV initialization routine, this function creates * the TV specific connector properties for a given device. Caller is * responsible for allocating a list of format names and passing them to * this routine. */ int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes, char *modes[]) { struct drm_property *tv_selector; struct drm_property *tv_subconnector; int i; if (dev->mode_config.tv_select_subconnector_property) return 0; /* * Basic connector properties */ tv_selector = drm_property_create_enum(dev, 0, "select subconnector", drm_tv_select_enum_list, DRM_ARRAY_SIZE(drm_tv_select_enum_list)); dev->mode_config.tv_select_subconnector_property = tv_selector; tv_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, "subconnector", drm_tv_subconnector_enum_list, DRM_ARRAY_SIZE(drm_tv_subconnector_enum_list)); dev->mode_config.tv_subconnector_property = tv_subconnector; /* * Other, TV specific properties: margins & TV modes. */ dev->mode_config.tv_left_margin_property = drm_property_create_range(dev, 0, "left margin", 0, 100); dev->mode_config.tv_right_margin_property = drm_property_create_range(dev, 0, "right margin", 0, 100); dev->mode_config.tv_top_margin_property = drm_property_create_range(dev, 0, "top margin", 0, 100); dev->mode_config.tv_bottom_margin_property = drm_property_create_range(dev, 0, "bottom margin", 0, 100); dev->mode_config.tv_mode_property = drm_property_create(dev, DRM_MODE_PROP_ENUM, "mode", num_modes); for (i = 0; i < num_modes; i++) drm_property_add_enum(dev->mode_config.tv_mode_property, i, i, modes[i]); dev->mode_config.tv_brightness_property = drm_property_create_range(dev, 0, "brightness", 0, 100); dev->mode_config.tv_contrast_property = drm_property_create_range(dev, 0, "contrast", 0, 100); dev->mode_config.tv_flicker_reduction_property = drm_property_create_range(dev, 0, "flicker reduction", 0, 100); dev->mode_config.tv_overscan_property = drm_property_create_range(dev, 0, "overscan", 0, 100); dev->mode_config.tv_saturation_property = drm_property_create_range(dev, 0, "saturation", 0, 100); dev->mode_config.tv_hue_property = drm_property_create_range(dev, 0, "hue", 0, 100); return 0; } /** * drm_mode_create_scaling_mode_property - create scaling mode property * @dev: DRM device * * Called by a driver the first time it's needed, must be attached to desired * connectors. */ int drm_mode_create_scaling_mode_property(struct drm_device *dev) { struct drm_property *scaling_mode; if (dev->mode_config.scaling_mode_property) return 0; scaling_mode = drm_property_create_enum(dev, 0, "scaling mode", drm_scaling_mode_enum_list, DRM_ARRAY_SIZE(drm_scaling_mode_enum_list)); dev->mode_config.scaling_mode_property = scaling_mode; return 0; } /** * drm_mode_create_dithering_property - create dithering property * @dev: DRM device * * Called by a driver the first time it's needed, must be attached to desired * connectors. */ int drm_mode_create_dithering_property(struct drm_device *dev) { struct drm_property *dithering_mode; if (dev->mode_config.dithering_mode_property) return 0; dithering_mode = drm_property_create_enum(dev, 0, "dithering", drm_dithering_mode_enum_list, DRM_ARRAY_SIZE(drm_dithering_mode_enum_list)); dev->mode_config.dithering_mode_property = dithering_mode; return 0; } /** * drm_mode_create_dirty_property - create dirty property * @dev: DRM device * * Called by a driver the first time it's needed, must be attached to desired * connectors. */ int drm_mode_create_dirty_info_property(struct drm_device *dev) { struct drm_property *dirty_info; if (dev->mode_config.dirty_info_property) return 0; dirty_info = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, "dirty", drm_dirty_info_enum_list, DRM_ARRAY_SIZE(drm_dirty_info_enum_list)); dev->mode_config.dirty_info_property = dirty_info; return 0; } /** * drm_mode_config_init - initialize DRM mode_configuration structure * @dev: DRM device * * LOCKING: * None, should happen single threaded at init time. * * Initialize @dev's mode_config structure, used for tracking the graphics * configuration of @dev. */ void drm_mode_config_init(struct drm_device *dev) { sx_init(&dev->mode_config.mutex, "kmslk"); INIT_LIST_HEAD(&dev->mode_config.fb_list); INIT_LIST_HEAD(&dev->mode_config.crtc_list); INIT_LIST_HEAD(&dev->mode_config.connector_list); INIT_LIST_HEAD(&dev->mode_config.encoder_list); INIT_LIST_HEAD(&dev->mode_config.property_list); INIT_LIST_HEAD(&dev->mode_config.property_blob_list); INIT_LIST_HEAD(&dev->mode_config.plane_list); drm_gem_names_init(&dev->mode_config.crtc_names); sx_xlock(&dev->mode_config.mutex); drm_mode_create_standard_connector_properties(dev); sx_xunlock(&dev->mode_config.mutex); /* Just to be sure */ dev->mode_config.num_fb = 0; dev->mode_config.num_connector = 0; dev->mode_config.num_crtc = 0; dev->mode_config.num_encoder = 0; } static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group) { uint32_t total_objects = 0; total_objects += dev->mode_config.num_crtc; total_objects += dev->mode_config.num_connector; total_objects += dev->mode_config.num_encoder; group->id_list = malloc(total_objects * sizeof(uint32_t), DRM_MEM_KMS, M_WAITOK | M_ZERO); group->num_crtcs = 0; group->num_connectors = 0; group->num_encoders = 0; return 0; } int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group) { struct drm_crtc *crtc; struct drm_encoder *encoder; struct drm_connector *connector; int ret; if ((ret = drm_mode_group_init(dev, group))) return ret; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) group->id_list[group->num_crtcs++] = crtc->base.id; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) group->id_list[group->num_crtcs + group->num_encoders++] = encoder->base.id; list_for_each_entry(connector, &dev->mode_config.connector_list, head) group->id_list[group->num_crtcs + group->num_encoders + group->num_connectors++] = connector->base.id; return 0; } /** * drm_mode_config_cleanup - free up DRM mode_config info * @dev: DRM device * * LOCKING: * Caller must hold mode config lock. * * Free up all the connectors and CRTCs associated with this DRM device, then * free up the framebuffers and associated buffer objects. * * FIXME: cleanup any dangling user buffer objects too */ void drm_mode_config_cleanup(struct drm_device *dev) { struct drm_connector *connector, *ot; struct drm_crtc *crtc, *ct; struct drm_encoder *encoder, *enct; struct drm_framebuffer *fb, *fbt; struct drm_property *property, *pt; struct drm_plane *plane, *plt; list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list, head) { encoder->funcs->destroy(encoder); } list_for_each_entry_safe(connector, ot, &dev->mode_config.connector_list, head) { connector->funcs->destroy(connector); } list_for_each_entry_safe(property, pt, &dev->mode_config.property_list, head) { drm_property_destroy(dev, property); } list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) { fb->funcs->destroy(fb); } list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) { crtc->funcs->destroy(crtc); } list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list, head) { plane->funcs->destroy(plane); } drm_gem_names_fini(&dev->mode_config.crtc_names); } /** * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo * @out: drm_mode_modeinfo struct to return to the user * @in: drm_display_mode to use * * LOCKING: * None. * * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to * the user. */ static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out, const struct drm_display_mode *in) { if (in->hdisplay > USHRT_MAX || in->hsync_start > USHRT_MAX || in->hsync_end > USHRT_MAX || in->htotal > USHRT_MAX || in->hskew > USHRT_MAX || in->vdisplay > USHRT_MAX || in->vsync_start > USHRT_MAX || in->vsync_end > USHRT_MAX || in->vtotal > USHRT_MAX || in->vscan > USHRT_MAX) printf("timing values too large for mode info\n"); out->clock = in->clock; out->hdisplay = in->hdisplay; out->hsync_start = in->hsync_start; out->hsync_end = in->hsync_end; out->htotal = in->htotal; out->hskew = in->hskew; out->vdisplay = in->vdisplay; out->vsync_start = in->vsync_start; out->vsync_end = in->vsync_end; out->vtotal = in->vtotal; out->vscan = in->vscan; out->vrefresh = in->vrefresh; out->flags = in->flags; out->type = in->type; strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN); out->name[DRM_DISPLAY_MODE_LEN-1] = 0; } /** * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode * @out: drm_display_mode to return to the user * @in: drm_mode_modeinfo to use * * LOCKING: * None. * * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to * the caller. * * RETURNS: * Zero on success, errno on failure. */ static int drm_crtc_convert_umode(struct drm_display_mode *out, const struct drm_mode_modeinfo *in) { if (in->clock > INT_MAX || in->vrefresh > INT_MAX) return ERANGE; out->clock = in->clock; out->hdisplay = in->hdisplay; out->hsync_start = in->hsync_start; out->hsync_end = in->hsync_end; out->htotal = in->htotal; out->hskew = in->hskew; out->vdisplay = in->vdisplay; out->vsync_start = in->vsync_start; out->vsync_end = in->vsync_end; out->vtotal = in->vtotal; out->vscan = in->vscan; out->vrefresh = in->vrefresh; out->flags = in->flags; out->type = in->type; strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN); out->name[DRM_DISPLAY_MODE_LEN-1] = 0; return 0; } /** * drm_mode_getresources - get graphics configuration * @inode: inode from the ioctl * @filp: file * from the ioctl * @cmd: cmd from ioctl * @arg: arg from ioctl * * LOCKING: * Takes mode config lock. * * Construct a set of configuration description structures and return * them to the user, including CRTC, connector and framebuffer configuration. * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_getresources(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_card_res *card_res = data; struct list_head *lh; struct drm_framebuffer *fb; struct drm_connector *connector; struct drm_crtc *crtc; struct drm_encoder *encoder; int ret = 0; int connector_count = 0; int crtc_count = 0; int fb_count = 0; int encoder_count = 0; int copied = 0, i; uint32_t __user *fb_id; uint32_t __user *crtc_id; uint32_t __user *connector_id; uint32_t __user *encoder_id; struct drm_mode_group *mode_group; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return (EINVAL); sx_xlock(&dev->mode_config.mutex); /* * For the non-control nodes we need to limit the list of resources * by IDs in the group list for this node */ list_for_each(lh, &file_priv->fbs) fb_count++; #if 1 mode_group = NULL; /* XXXKIB */ if (1 || file_priv->master) { #else mode_group = &file_priv->masterp->minor->mode_group; if (file_priv->masterp->minor->type == DRM_MINOR_CONTROL) { #endif list_for_each(lh, &dev->mode_config.crtc_list) crtc_count++; list_for_each(lh, &dev->mode_config.connector_list) connector_count++; list_for_each(lh, &dev->mode_config.encoder_list) encoder_count++; } else { crtc_count = mode_group->num_crtcs; connector_count = mode_group->num_connectors; encoder_count = mode_group->num_encoders; } card_res->max_height = dev->mode_config.max_height; card_res->min_height = dev->mode_config.min_height; card_res->max_width = dev->mode_config.max_width; card_res->min_width = dev->mode_config.min_width; /* handle this in 4 parts */ /* FBs */ if (card_res->count_fbs >= fb_count) { copied = 0; fb_id = (uint32_t *)(uintptr_t)card_res->fb_id_ptr; list_for_each_entry(fb, &file_priv->fbs, filp_head) { if (copyout(&fb->base.id, fb_id + copied, sizeof(uint32_t))) { ret = EFAULT; goto out; } copied++; } } card_res->count_fbs = fb_count; /* CRTCs */ if (card_res->count_crtcs >= crtc_count) { copied = 0; crtc_id = (uint32_t *)(uintptr_t)card_res->crtc_id_ptr; #if 1 if (1 || file_priv->master) { #else if (file_priv->masterp->minor->type == DRM_MINOR_CONTROL) { #endif list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); if (copyout(&crtc->base.id, crtc_id + copied, sizeof(uint32_t))) { ret = EFAULT; goto out; } copied++; } } else { for (i = 0; i < mode_group->num_crtcs; i++) { if (copyout(&mode_group->id_list[i], crtc_id + copied, sizeof(uint32_t))) { ret = EFAULT; goto out; } copied++; } } } card_res->count_crtcs = crtc_count; /* Encoders */ if (card_res->count_encoders >= encoder_count) { copied = 0; encoder_id = (uint32_t *)(uintptr_t)card_res->encoder_id_ptr; #if 1 if (file_priv->master) { #else if (file_priv->masterp->minor->type == DRM_MINOR_CONTROL) { #endif list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id, drm_get_encoder_name(encoder)); if (copyout(&encoder->base.id, encoder_id + copied, sizeof(uint32_t))) { ret = EFAULT; goto out; } copied++; } } else { for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) { if (copyout(&mode_group->id_list[i], encoder_id + copied, sizeof(uint32_t))) { ret = EFAULT; goto out; } copied++; } } } card_res->count_encoders = encoder_count; /* Connectors */ if (card_res->count_connectors >= connector_count) { copied = 0; connector_id = (uint32_t *)(uintptr_t)card_res->connector_id_ptr; #if 1 if (file_priv->master) { #else if (file_priv->masterp->minor->type == DRM_MINOR_CONTROL) { #endif list_for_each_entry(connector, &dev->mode_config.connector_list, head) { DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, drm_get_connector_name(connector)); if (copyout(&connector->base.id, connector_id + copied, sizeof(uint32_t))) { ret = EFAULT; goto out; } copied++; } } else { int start = mode_group->num_crtcs + mode_group->num_encoders; for (i = start; i < start + mode_group->num_connectors; i++) { if (copyout(&mode_group->id_list[i], connector_id + copied, sizeof(uint32_t))) { ret = EFAULT; goto out; } copied++; } } } card_res->count_connectors = connector_count; DRM_DEBUG_KMS("CRTC[%d] CONNECTORS[%d] ENCODERS[%d]\n", card_res->count_crtcs, card_res->count_connectors, card_res->count_encoders); out: sx_xunlock(&dev->mode_config.mutex); return ret; } /** * drm_mode_getcrtc - get CRTC configuration * @inode: inode from the ioctl * @filp: file * from the ioctl * @cmd: cmd from ioctl * @arg: arg from ioctl * * LOCKING: * Takes mode config lock. * * Construct a CRTC configuration structure to return to the user. * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_getcrtc(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_crtc *crtc_resp = data; struct drm_crtc *crtc; struct drm_mode_object *obj; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return (EINVAL); sx_xlock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, crtc_resp->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { ret = (EINVAL); goto out; } crtc = obj_to_crtc(obj); crtc_resp->x = crtc->x; crtc_resp->y = crtc->y; crtc_resp->gamma_size = crtc->gamma_size; if (crtc->fb) crtc_resp->fb_id = crtc->fb->base.id; else crtc_resp->fb_id = 0; if (crtc->enabled) { drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode); crtc_resp->mode_valid = 1; } else { crtc_resp->mode_valid = 0; } out: sx_xunlock(&dev->mode_config.mutex); return ret; } /** * drm_mode_getconnector - get connector configuration * @inode: inode from the ioctl * @filp: file * from the ioctl * @cmd: cmd from ioctl * @arg: arg from ioctl * * LOCKING: * Takes mode config lock. * * Construct a connector configuration structure to return to the user. * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_getconnector(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_get_connector *out_resp = data; struct drm_mode_object *obj; struct drm_connector *connector; struct drm_display_mode *mode; int mode_count = 0; int props_count = 0; int encoders_count = 0; int ret = 0; int copied = 0; int i; struct drm_mode_modeinfo u_mode; struct drm_mode_modeinfo __user *mode_ptr; uint32_t *prop_ptr; uint64_t *prop_values; uint32_t *encoder_ptr; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return (EINVAL); memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo)); DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id); sx_xlock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR); if (!obj) { ret = EINVAL; goto out; } connector = obj_to_connector(obj); - for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { - if (connector->property_ids[i] != 0) { - props_count++; - } - } + props_count = connector->properties.count; for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] != 0) { encoders_count++; } } if (out_resp->count_modes == 0) { connector->funcs->fill_modes(connector, dev->mode_config.max_width, dev->mode_config.max_height); } /* delayed so we get modes regardless of pre-fill_modes state */ list_for_each_entry(mode, &connector->modes, head) mode_count++; out_resp->connector_id = connector->base.id; out_resp->connector_type = connector->connector_type; out_resp->connector_type_id = connector->connector_type_id; out_resp->mm_width = connector->display_info.width_mm; out_resp->mm_height = connector->display_info.height_mm; out_resp->subpixel = connector->display_info.subpixel_order; out_resp->connection = connector->status; if (connector->encoder) out_resp->encoder_id = connector->encoder->base.id; else out_resp->encoder_id = 0; /* * This ioctl is called twice, once to determine how much space is * needed, and the 2nd time to fill it. */ if ((out_resp->count_modes >= mode_count) && mode_count) { copied = 0; mode_ptr = (struct drm_mode_modeinfo *)(uintptr_t)out_resp->modes_ptr; list_for_each_entry(mode, &connector->modes, head) { drm_crtc_convert_to_umode(&u_mode, mode); if (copyout(&u_mode, mode_ptr + copied, sizeof(u_mode))) { ret = EFAULT; goto out; } copied++; } } out_resp->count_modes = mode_count; if ((out_resp->count_props >= props_count) && props_count) { copied = 0; prop_ptr = (uint32_t *)(uintptr_t)(out_resp->props_ptr); prop_values = (uint64_t *)(uintptr_t)(out_resp->prop_values_ptr); - for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { - if (connector->property_ids[i] != 0) { - if (copyout(&connector->property_ids[i], - prop_ptr + copied, sizeof(uint32_t))) { - ret = EFAULT; - goto out; - } + for (i = 0; i < connector->properties.count; i++) { + if (copyout(&connector->properties.ids[i], + prop_ptr + copied, sizeof(uint32_t))) { + ret = EFAULT; + goto out; + } - if (copyout(&connector->property_values[i], - prop_values + copied, sizeof(uint64_t))) { - ret = EFAULT; - goto out; - } - copied++; + if (copyout(&connector->properties.values[i], + prop_values + copied, sizeof(uint64_t))) { + ret = EFAULT; + goto out; } + copied++; } } out_resp->count_props = props_count; if ((out_resp->count_encoders >= encoders_count) && encoders_count) { copied = 0; encoder_ptr = (uint32_t *)(uintptr_t)(out_resp->encoders_ptr); for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] != 0) { if (copyout(&connector->encoder_ids[i], encoder_ptr + copied, sizeof(uint32_t))) { ret = EFAULT; goto out; } copied++; } } } out_resp->count_encoders = encoders_count; out: sx_xunlock(&dev->mode_config.mutex); return ret; } int drm_mode_getencoder(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_get_encoder *enc_resp = data; struct drm_mode_object *obj; struct drm_encoder *encoder; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return (EINVAL); sx_xlock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, enc_resp->encoder_id, DRM_MODE_OBJECT_ENCODER); if (!obj) { ret = EINVAL; goto out; } encoder = obj_to_encoder(obj); if (encoder->crtc) enc_resp->crtc_id = encoder->crtc->base.id; else enc_resp->crtc_id = 0; enc_resp->encoder_type = encoder->encoder_type; enc_resp->encoder_id = encoder->base.id; enc_resp->possible_crtcs = encoder->possible_crtcs; enc_resp->possible_clones = encoder->possible_clones; out: sx_xunlock(&dev->mode_config.mutex); return ret; } /** * drm_mode_getplane_res - get plane info * @dev: DRM device * @data: ioctl data * @file_priv: DRM file info * * LOCKING: * Takes mode config lock. * * Return an plane count and set of IDs. */ int drm_mode_getplane_res(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_get_plane_res *plane_resp = data; struct drm_mode_config *config; struct drm_plane *plane; uint32_t *plane_ptr; int copied = 0, ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return (EINVAL); sx_xlock(&dev->mode_config.mutex); config = &dev->mode_config; /* * This ioctl is called twice, once to determine how much space is * needed, and the 2nd time to fill it. */ if (config->num_plane && (plane_resp->count_planes >= config->num_plane)) { plane_ptr = (uint32_t *)(unsigned long)plane_resp->plane_id_ptr; list_for_each_entry(plane, &config->plane_list, head) { if (copyout(&plane->base.id, plane_ptr + copied, sizeof(uint32_t))) { ret = EFAULT; goto out; } copied++; } } plane_resp->count_planes = config->num_plane; out: sx_xunlock(&dev->mode_config.mutex); return ret; } /** * drm_mode_getplane - get plane info * @dev: DRM device * @data: ioctl data * @file_priv: DRM file info * * LOCKING: * Takes mode config lock. * * Return plane info, including formats supported, gamma size, any * current fb, etc. */ int drm_mode_getplane(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_get_plane *plane_resp = data; struct drm_mode_object *obj; struct drm_plane *plane; uint32_t *format_ptr; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return (EINVAL); sx_xlock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, plane_resp->plane_id, DRM_MODE_OBJECT_PLANE); if (!obj) { ret = ENOENT; goto out; } plane = obj_to_plane(obj); if (plane->crtc) plane_resp->crtc_id = plane->crtc->base.id; else plane_resp->crtc_id = 0; if (plane->fb) plane_resp->fb_id = plane->fb->base.id; else plane_resp->fb_id = 0; plane_resp->plane_id = plane->base.id; plane_resp->possible_crtcs = plane->possible_crtcs; plane_resp->gamma_size = plane->gamma_size; /* * This ioctl is called twice, once to determine how much space is * needed, and the 2nd time to fill it. */ if (plane->format_count && (plane_resp->count_format_types >= plane->format_count)) { format_ptr = (uint32_t *)(unsigned long)plane_resp->format_type_ptr; if (copyout(format_ptr, plane->format_types, sizeof(uint32_t) * plane->format_count)) { ret = EFAULT; goto out; } } plane_resp->count_format_types = plane->format_count; out: sx_xunlock(&dev->mode_config.mutex); return ret; } /** * drm_mode_setplane - set up or tear down an plane * @dev: DRM device * @data: ioctl data* * @file_prive: DRM file info * * LOCKING: * Takes mode config lock. * * Set plane info, including placement, fb, scaling, and other factors. * Or pass a NULL fb to disable. */ int drm_mode_setplane(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_set_plane *plane_req = data; struct drm_mode_object *obj; struct drm_plane *plane; struct drm_crtc *crtc; struct drm_framebuffer *fb; int ret = 0; unsigned int fb_width, fb_height; int i; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return (EINVAL); sx_xlock(&dev->mode_config.mutex); /* * First, find the plane, crtc, and fb objects. If not available, * we don't bother to call the driver. */ obj = drm_mode_object_find(dev, plane_req->plane_id, DRM_MODE_OBJECT_PLANE); if (!obj) { DRM_DEBUG_KMS("Unknown plane ID %d\n", plane_req->plane_id); ret = ENOENT; goto out; } plane = obj_to_plane(obj); /* No fb means shut it down */ if (!plane_req->fb_id) { plane->funcs->disable_plane(plane); plane->crtc = NULL; plane->fb = NULL; goto out; } obj = drm_mode_object_find(dev, plane_req->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { DRM_DEBUG_KMS("Unknown crtc ID %d\n", plane_req->crtc_id); ret = ENOENT; goto out; } crtc = obj_to_crtc(obj); obj = drm_mode_object_find(dev, plane_req->fb_id, DRM_MODE_OBJECT_FB); if (!obj) { DRM_DEBUG_KMS("Unknown framebuffer ID %d\n", plane_req->fb_id); ret = ENOENT; goto out; } fb = obj_to_fb(obj); /* Check whether this plane supports the fb pixel format. */ for (i = 0; i < plane->format_count; i++) if (fb->pixel_format == plane->format_types[i]) break; if (i == plane->format_count) { DRM_DEBUG_KMS("Invalid pixel format 0x%08x\n", fb->pixel_format); ret = EINVAL; goto out; } fb_width = fb->width << 16; fb_height = fb->height << 16; /* Make sure source coordinates are inside the fb. */ if (plane_req->src_w > fb_width || plane_req->src_x > fb_width - plane_req->src_w || plane_req->src_h > fb_height || plane_req->src_y > fb_height - plane_req->src_h) { DRM_DEBUG_KMS("Invalid source coordinates " "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n", plane_req->src_w >> 16, ((plane_req->src_w & 0xffff) * 15625) >> 10, plane_req->src_h >> 16, ((plane_req->src_h & 0xffff) * 15625) >> 10, plane_req->src_x >> 16, ((plane_req->src_x & 0xffff) * 15625) >> 10, plane_req->src_y >> 16, ((plane_req->src_y & 0xffff) * 15625) >> 10); ret = ENOSPC; goto out; } /* Give drivers some help against integer overflows */ if (plane_req->crtc_w > INT_MAX || plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w || plane_req->crtc_h > INT_MAX || plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) { DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n", plane_req->crtc_w, plane_req->crtc_h, plane_req->crtc_x, plane_req->crtc_y); ret = ERANGE; goto out; } ret = -plane->funcs->update_plane(plane, crtc, fb, plane_req->crtc_x, plane_req->crtc_y, plane_req->crtc_w, plane_req->crtc_h, plane_req->src_x, plane_req->src_y, plane_req->src_w, plane_req->src_h); if (!ret) { plane->crtc = crtc; plane->fb = fb; } out: sx_xunlock(&dev->mode_config.mutex); return ret; } /** * drm_mode_setcrtc - set CRTC configuration * @inode: inode from the ioctl * @filp: file * from the ioctl * @cmd: cmd from ioctl * @arg: arg from ioctl * * LOCKING: * Takes mode config lock. * * Build a new CRTC configuration based on user request. * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_setcrtc(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_config *config = &dev->mode_config; struct drm_mode_crtc *crtc_req = data; struct drm_mode_object *obj; struct drm_crtc *crtc; struct drm_connector **connector_set = NULL, *connector; struct drm_framebuffer *fb = NULL; struct drm_display_mode *mode = NULL; struct drm_mode_set set; uint32_t *set_connectors_ptr; - int ret = 0; + int ret; int i; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return (EINVAL); /* For some reason crtc x/y offsets are signed internally. */ if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX) return (ERANGE); sx_xlock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, crtc_req->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id); ret = EINVAL; goto out; } crtc = obj_to_crtc(obj); DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); if (crtc_req->mode_valid) { /* If we have a mode we need a framebuffer. */ /* If we pass -1, set the mode with the currently bound fb */ if (crtc_req->fb_id == -1) { if (!crtc->fb) { DRM_DEBUG_KMS("CRTC doesn't have current FB\n"); ret = -EINVAL; goto out; } fb = crtc->fb; } else { obj = drm_mode_object_find(dev, crtc_req->fb_id, DRM_MODE_OBJECT_FB); if (!obj) { DRM_DEBUG_KMS("Unknown FB ID%d\n", crtc_req->fb_id); ret = EINVAL; goto out; } fb = obj_to_fb(obj); } mode = drm_mode_create(dev); if (!mode) { ret = ENOMEM; goto out; } ret = drm_crtc_convert_umode(mode, &crtc_req->mode); if (ret) { DRM_DEBUG_KMS("Invalid mode\n"); goto out; } drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); if (mode->hdisplay > fb->width || mode->vdisplay > fb->height || crtc_req->x > fb->width - mode->hdisplay || crtc_req->y > fb->height - mode->vdisplay) { DRM_DEBUG_KMS("Invalid CRTC viewport %ux%u+%u+%u for fb size %ux%u.\n", mode->hdisplay, mode->vdisplay, crtc_req->x, crtc_req->y, fb->width, fb->height); ret = ENOSPC; goto out; } } if (crtc_req->count_connectors == 0 && mode) { DRM_DEBUG_KMS("Count connectors is 0 but mode set\n"); ret = EINVAL; goto out; } if (crtc_req->count_connectors > 0 && (!mode || !fb)) { DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n", crtc_req->count_connectors); ret = EINVAL; goto out; } if (crtc_req->count_connectors > 0) { u32 out_id; /* Avoid unbounded kernel memory allocation */ if (crtc_req->count_connectors > config->num_connector) { ret = EINVAL; goto out; } connector_set = malloc(crtc_req->count_connectors * sizeof(struct drm_connector *), DRM_MEM_KMS, M_WAITOK); for (i = 0; i < crtc_req->count_connectors; i++) { set_connectors_ptr = (uint32_t *)(uintptr_t)crtc_req->set_connectors_ptr; if (copyin(&set_connectors_ptr[i], &out_id, sizeof(uint32_t))) { ret = EFAULT; goto out; } obj = drm_mode_object_find(dev, out_id, DRM_MODE_OBJECT_CONNECTOR); if (!obj) { DRM_DEBUG_KMS("Connector id %d unknown\n", out_id); ret = EINVAL; goto out; } connector = obj_to_connector(obj); DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, drm_get_connector_name(connector)); connector_set[i] = connector; } } set.crtc = crtc; set.x = crtc_req->x; set.y = crtc_req->y; set.mode = mode; set.connectors = connector_set; set.num_connectors = crtc_req->count_connectors; set.fb = fb; ret = crtc->funcs->set_config(&set); out: free(connector_set, DRM_MEM_KMS); drm_mode_destroy(dev, mode); sx_xunlock(&dev->mode_config.mutex); return ret; } int drm_mode_cursor_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_cursor *req = data; struct drm_mode_object *obj; struct drm_crtc *crtc; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return (EINVAL); if (!req->flags) return (EINVAL); sx_xlock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id); ret = EINVAL; goto out; } crtc = obj_to_crtc(obj); if (req->flags & DRM_MODE_CURSOR_BO) { if (!crtc->funcs->cursor_set) { ret = ENXIO; goto out; } /* Turns off the cursor if handle is 0 */ ret = -crtc->funcs->cursor_set(crtc, file_priv, req->handle, req->width, req->height); } if (req->flags & DRM_MODE_CURSOR_MOVE) { if (crtc->funcs->cursor_move) { ret = crtc->funcs->cursor_move(crtc, req->x, req->y); } else { ret = EFAULT; goto out; } } out: sx_xunlock(&dev->mode_config.mutex); return ret; } /* Original addfb only supported RGB formats, so figure out which one */ uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth) { uint32_t fmt; switch (bpp) { case 8: fmt = DRM_FORMAT_RGB332; break; case 16: if (depth == 15) fmt = DRM_FORMAT_XRGB1555; else fmt = DRM_FORMAT_RGB565; break; case 24: fmt = DRM_FORMAT_RGB888; break; case 32: if (depth == 24) fmt = DRM_FORMAT_XRGB8888; else if (depth == 30) fmt = DRM_FORMAT_XRGB2101010; else fmt = DRM_FORMAT_ARGB8888; break; default: DRM_ERROR("bad bpp, assuming RGB24 pixel format\n"); fmt = DRM_FORMAT_XRGB8888; break; } return fmt; } /** * drm_mode_addfb - add an FB to the graphics configuration * @inode: inode from the ioctl * @filp: file * from the ioctl * @cmd: cmd from ioctl * @arg: arg from ioctl * * LOCKING: * Takes mode config lock. * * Add a new FB to the specified CRTC, given a user request. * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_addfb(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_fb_cmd *or = data; struct drm_mode_fb_cmd2 r = {}; struct drm_mode_config *config = &dev->mode_config; struct drm_framebuffer *fb; int ret = 0; /* Use new struct with format internally */ r.fb_id = or->fb_id; r.width = or->width; r.height = or->height; r.pitches[0] = or->pitch; r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth); r.handles[0] = or->handle; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return (EINVAL); if ((config->min_width > r.width) || (r.width > config->max_width)) return (EINVAL); if ((config->min_height > r.height) || (r.height > config->max_height)) return (EINVAL); sx_xlock(&dev->mode_config.mutex); ret = -dev->mode_config.funcs->fb_create(dev, file_priv, &r, &fb); if (ret != 0) { - DRM_ERROR("could not create framebuffer, error %d\n", ret); + DRM_DEBUG_KMS("could not create framebuffer, error %d\n", ret); goto out; } or->fb_id = fb->base.id; list_add(&fb->filp_head, &file_priv->fbs); DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); out: sx_xunlock(&dev->mode_config.mutex); return ret; } static int format_check(struct drm_mode_fb_cmd2 *r) { uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN; switch (format) { case DRM_FORMAT_C8: case DRM_FORMAT_RGB332: case DRM_FORMAT_BGR233: case DRM_FORMAT_XRGB4444: case DRM_FORMAT_XBGR4444: case DRM_FORMAT_RGBX4444: case DRM_FORMAT_BGRX4444: case DRM_FORMAT_ARGB4444: case DRM_FORMAT_ABGR4444: case DRM_FORMAT_RGBA4444: case DRM_FORMAT_BGRA4444: case DRM_FORMAT_XRGB1555: case DRM_FORMAT_XBGR1555: case DRM_FORMAT_RGBX5551: case DRM_FORMAT_BGRX5551: case DRM_FORMAT_ARGB1555: case DRM_FORMAT_ABGR1555: case DRM_FORMAT_RGBA5551: case DRM_FORMAT_BGRA5551: case DRM_FORMAT_RGB565: case DRM_FORMAT_BGR565: case DRM_FORMAT_RGB888: case DRM_FORMAT_BGR888: case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: case DRM_FORMAT_RGBX8888: case DRM_FORMAT_BGRX8888: case DRM_FORMAT_ARGB8888: case DRM_FORMAT_ABGR8888: case DRM_FORMAT_RGBA8888: case DRM_FORMAT_BGRA8888: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: case DRM_FORMAT_RGBX1010102: case DRM_FORMAT_BGRX1010102: case DRM_FORMAT_ARGB2101010: case DRM_FORMAT_ABGR2101010: case DRM_FORMAT_RGBA1010102: case DRM_FORMAT_BGRA1010102: case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: case DRM_FORMAT_VYUY: case DRM_FORMAT_AYUV: case DRM_FORMAT_NV12: case DRM_FORMAT_NV21: case DRM_FORMAT_NV16: case DRM_FORMAT_NV61: case DRM_FORMAT_YUV410: case DRM_FORMAT_YVU410: case DRM_FORMAT_YUV411: case DRM_FORMAT_YVU411: case DRM_FORMAT_YUV420: case DRM_FORMAT_YVU420: case DRM_FORMAT_YUV422: case DRM_FORMAT_YVU422: case DRM_FORMAT_YUV444: case DRM_FORMAT_YVU444: return 0; default: return (EINVAL); } } +static int framebuffer_check(struct drm_mode_fb_cmd2 *r) +{ + int ret, hsub, vsub, num_planes, i; + + ret = format_check(r); + if (ret) { + DRM_DEBUG_KMS("bad framebuffer format 0x%08x\n", r->pixel_format); + return ret; + } + + hsub = drm_format_horz_chroma_subsampling(r->pixel_format); + vsub = drm_format_vert_chroma_subsampling(r->pixel_format); + num_planes = drm_format_num_planes(r->pixel_format); + + if (r->width == 0 || r->width % hsub) { + DRM_DEBUG_KMS("bad framebuffer width %u\n", r->height); + return -EINVAL; + } + + if (r->height == 0 || r->height % vsub) { + DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height); + return -EINVAL; + } + + for (i = 0; i < num_planes; i++) { + unsigned int width = r->width / (i != 0 ? hsub : 1); + + if (!r->handles[i]) { + DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i); + return -EINVAL; + } + + if (r->pitches[i] < drm_format_plane_cpp(r->pixel_format, i) * width) { + DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i); + return -EINVAL; + } + } + + return 0; +} + /** * drm_mode_addfb2 - add an FB to the graphics configuration * @inode: inode from the ioctl * @filp: file * from the ioctl * @cmd: cmd from ioctl * @arg: arg from ioctl * * LOCKING: * Takes mode config lock. * * Add a new FB to the specified CRTC, given a user request with format. * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_addfb2(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_fb_cmd2 *r = data; struct drm_mode_config *config = &dev->mode_config; struct drm_framebuffer *fb; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return (EINVAL); if ((config->min_width > r->width) || (r->width > config->max_width)) { - DRM_ERROR("bad framebuffer width %d, should be >= %d && <= %d\n", + DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n", r->width, config->min_width, config->max_width); return (EINVAL); } if ((config->min_height > r->height) || (r->height > config->max_height)) { - DRM_ERROR("bad framebuffer height %d, should be >= %d && <= %d\n", + DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n", r->height, config->min_height, config->max_height); return (EINVAL); } - ret = format_check(r); - if (ret) { - DRM_ERROR("bad framebuffer format 0x%08x\n", r->pixel_format); - return ret; - } + ret = framebuffer_check(r); + if (ret) + return -ret; sx_xlock(&dev->mode_config.mutex); /* TODO check buffer is sufficiently large */ /* TODO setup destructor callback */ ret = -dev->mode_config.funcs->fb_create(dev, file_priv, r, &fb); if (ret != 0) { - DRM_ERROR("could not create framebuffer, error %d\n", ret); + DRM_DEBUG_KMS("could not create framebuffer, error %d\n", ret); goto out; } r->fb_id = fb->base.id; list_add(&fb->filp_head, &file_priv->fbs); DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); out: sx_xunlock(&dev->mode_config.mutex); return (ret); } /** * drm_mode_rmfb - remove an FB from the configuration * @inode: inode from the ioctl * @filp: file * from the ioctl * @cmd: cmd from ioctl * @arg: arg from ioctl * * LOCKING: * Takes mode config lock. * * Remove the FB specified by the user. * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_rmfb(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_object *obj; struct drm_framebuffer *fb = NULL; struct drm_framebuffer *fbl = NULL; uint32_t *id = data; int ret = 0; int found = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return (EINVAL); sx_xlock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB); /* TODO check that we really get a framebuffer back. */ if (!obj) { ret = EINVAL; goto out; } fb = obj_to_fb(obj); list_for_each_entry(fbl, &file_priv->fbs, filp_head) if (fb == fbl) found = 1; if (!found) { ret = EINVAL; goto out; } /* TODO release all crtc connected to the framebuffer */ /* TODO unhock the destructor from the buffer object */ list_del(&fb->filp_head); fb->funcs->destroy(fb); out: sx_xunlock(&dev->mode_config.mutex); return ret; } /** * drm_mode_getfb - get FB info * @inode: inode from the ioctl * @filp: file * from the ioctl * @cmd: cmd from ioctl * @arg: arg from ioctl * * LOCKING: * Takes mode config lock. * * Lookup the FB given its ID and return info about it. * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_getfb(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_fb_cmd *r = data; struct drm_mode_object *obj; struct drm_framebuffer *fb; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return (EINVAL); sx_xlock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); if (!obj) { ret = EINVAL; goto out; } fb = obj_to_fb(obj); r->height = fb->height; r->width = fb->width; r->depth = fb->depth; r->bpp = fb->bits_per_pixel; r->pitch = fb->pitches[0]; r->handle = 0; fb->funcs->create_handle(fb, file_priv, &r->handle); out: sx_xunlock(&dev->mode_config.mutex); return ret; } int drm_mode_dirtyfb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_clip_rect __user *clips_ptr; struct drm_clip_rect *clips = NULL; struct drm_mode_fb_dirty_cmd *r = data; struct drm_mode_object *obj; struct drm_framebuffer *fb; unsigned flags; int num_clips; - int ret = 0; + int ret; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return (EINVAL); sx_xlock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); if (!obj) { ret = EINVAL; goto out_err1; } fb = obj_to_fb(obj); num_clips = r->num_clips; clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr; if (!num_clips != !clips_ptr) { ret = EINVAL; goto out_err1; } flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags; /* If userspace annotates copy, clips must come in pairs */ if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) { ret = EINVAL; goto out_err1; } if (num_clips && clips_ptr) { if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) { ret = EINVAL; goto out_err1; } clips = malloc(num_clips * sizeof(*clips), DRM_MEM_KMS, M_WAITOK | M_ZERO); ret = copyin(clips_ptr, clips, num_clips * sizeof(*clips)); if (ret) goto out_err2; } if (fb->funcs->dirty) { ret = -fb->funcs->dirty(fb, file_priv, flags, r->color, clips, num_clips); } else { ret = ENOSYS; goto out_err2; } out_err2: free(clips, DRM_MEM_KMS); out_err1: sx_xunlock(&dev->mode_config.mutex); return ret; } /** * drm_fb_release - remove and free the FBs on this file * @filp: file * from the ioctl * * LOCKING: * Takes mode config lock. * * Destroy all the FBs associated with @filp. * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ void drm_fb_release(struct drm_file *priv) { #if 1 struct drm_device *dev = priv->dev; #else struct drm_device *dev = priv->minor->dev; #endif struct drm_framebuffer *fb, *tfb; sx_xlock(&dev->mode_config.mutex); list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) { list_del(&fb->filp_head); fb->funcs->destroy(fb); } sx_xunlock(&dev->mode_config.mutex); } /** * drm_mode_attachmode - add a mode to the user mode list * @dev: DRM device * @connector: connector to add the mode to * @mode: mode to add * * Add @mode to @connector's user mode list. */ static void drm_mode_attachmode(struct drm_device *dev, struct drm_connector *connector, struct drm_display_mode *mode) { list_add_tail(&mode->head, &connector->user_modes); } int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc, const struct drm_display_mode *mode) { struct drm_connector *connector; int ret = 0; struct drm_display_mode *dup_mode, *next; DRM_LIST_HEAD(list); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (!connector->encoder) continue; if (connector->encoder->crtc == crtc) { dup_mode = drm_mode_duplicate(dev, mode); if (!dup_mode) { ret = ENOMEM; goto out; } list_add_tail(&dup_mode->head, &list); } } list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (!connector->encoder) continue; if (connector->encoder->crtc == crtc) list_move_tail(list.next, &connector->user_modes); } MPASS(!list_empty(&list)); out: list_for_each_entry_safe(dup_mode, next, &list, head) drm_mode_destroy(dev, dup_mode); return ret; } static int drm_mode_detachmode(struct drm_device *dev, struct drm_connector *connector, struct drm_display_mode *mode) { int found = 0; int ret = 0; struct drm_display_mode *match_mode, *t; list_for_each_entry_safe(match_mode, t, &connector->user_modes, head) { if (drm_mode_equal(match_mode, mode)) { list_del(&match_mode->head); drm_mode_destroy(dev, match_mode); found = 1; break; } } if (!found) ret = -EINVAL; return ret; } int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode) { struct drm_connector *connector; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { drm_mode_detachmode(dev, connector, mode); } return 0; } /** * drm_fb_attachmode - Attach a user mode to an connector * @inode: inode from the ioctl * @filp: file * from the ioctl * @cmd: cmd from ioctl * @arg: arg from ioctl * * This attaches a user specified mode to an connector. * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_attachmode_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_mode_cmd *mode_cmd = data; struct drm_connector *connector; struct drm_display_mode *mode; struct drm_mode_object *obj; struct drm_mode_modeinfo *umode = &mode_cmd->mode; - int ret = 0; + int ret; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; sx_xlock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); if (!obj) { ret = -EINVAL; goto out; } connector = obj_to_connector(obj); mode = drm_mode_create(dev); if (!mode) { ret = -ENOMEM; goto out; } ret = drm_crtc_convert_umode(mode, umode); if (ret) { DRM_DEBUG_KMS("Invalid mode\n"); drm_mode_destroy(dev, mode); goto out; } drm_mode_attachmode(dev, connector, mode); out: sx_xunlock(&dev->mode_config.mutex); return ret; } /** * drm_fb_detachmode - Detach a user specified mode from an connector * @inode: inode from the ioctl * @filp: file * from the ioctl * @cmd: cmd from ioctl * @arg: arg from ioctl * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_detachmode_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_object *obj; struct drm_mode_mode_cmd *mode_cmd = data; struct drm_connector *connector; struct drm_display_mode mode; struct drm_mode_modeinfo *umode = &mode_cmd->mode; - int ret = 0; + int ret; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; sx_xlock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); if (!obj) { ret = -EINVAL; goto out; } connector = obj_to_connector(obj); ret = drm_crtc_convert_umode(&mode, umode); if (ret) { DRM_DEBUG_KMS("Invalid mode\n"); goto out; } ret = drm_mode_detachmode(dev, connector, &mode); out: sx_xunlock(&dev->mode_config.mutex); return ret; } struct drm_property *drm_property_create(struct drm_device *dev, int flags, const char *name, int num_values) { struct drm_property *property = NULL; int ret; property = malloc(sizeof(struct drm_property), DRM_MEM_KMS, M_WAITOK | M_ZERO); if (num_values) { property->values = malloc(sizeof(uint64_t)*num_values, DRM_MEM_KMS, M_WAITOK | M_ZERO); } ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY); if (ret) goto fail; property->flags = flags; property->num_values = num_values; INIT_LIST_HEAD(&property->enum_blob_list); if (name) { strncpy(property->name, name, DRM_PROP_NAME_LEN); property->name[DRM_PROP_NAME_LEN-1] = '\0'; } list_add_tail(&property->head, &dev->mode_config.property_list); return property; fail: free(property->values, DRM_MEM_KMS); free(property, DRM_MEM_KMS); return (NULL); } struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags, const char *name, const struct drm_prop_enum_list *props, int num_values) { struct drm_property *property; int i, ret; flags |= DRM_MODE_PROP_ENUM; property = drm_property_create(dev, flags, name, num_values); if (!property) return NULL; for (i = 0; i < num_values; i++) { ret = drm_property_add_enum(property, i, props[i].type, props[i].name); if (ret) { drm_property_destroy(dev, property); return NULL; } } return property; } +struct drm_property *drm_property_create_bitmask(struct drm_device *dev, + int flags, const char *name, + const struct drm_prop_enum_list *props, + int num_values) +{ + struct drm_property *property; + int i, ret; + + flags |= DRM_MODE_PROP_BITMASK; + + property = drm_property_create(dev, flags, name, num_values); + if (!property) + return NULL; + + for (i = 0; i < num_values; i++) { + ret = drm_property_add_enum(property, i, + props[i].type, + props[i].name); + if (ret) { + drm_property_destroy(dev, property); + return NULL; + } + } + + return property; +} + struct drm_property *drm_property_create_range(struct drm_device *dev, int flags, const char *name, uint64_t min, uint64_t max) { struct drm_property *property; flags |= DRM_MODE_PROP_RANGE; property = drm_property_create(dev, flags, name, 2); if (!property) return NULL; property->values[0] = min; property->values[1] = max; return property; } int drm_property_add_enum(struct drm_property *property, int index, uint64_t value, const char *name) { struct drm_property_enum *prop_enum; - if (!(property->flags & DRM_MODE_PROP_ENUM)) + if (!(property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK))) return -EINVAL; + /* + * Bitmask enum properties have the additional constraint of values + * from 0 to 63 + */ + if ((property->flags & DRM_MODE_PROP_BITMASK) && (value > 63)) + return -EINVAL; + if (!list_empty(&property->enum_blob_list)) { list_for_each_entry(prop_enum, &property->enum_blob_list, head) { if (prop_enum->value == value) { strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN); prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0'; return 0; } } } prop_enum = malloc(sizeof(struct drm_property_enum), DRM_MEM_KMS, M_WAITOK | M_ZERO); strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN); prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0'; prop_enum->value = value; property->values[index] = value; list_add_tail(&prop_enum->head, &property->enum_blob_list); return 0; } void drm_property_destroy(struct drm_device *dev, struct drm_property *property) { struct drm_property_enum *prop_enum, *pt; list_for_each_entry_safe(prop_enum, pt, &property->enum_blob_list, head) { list_del(&prop_enum->head); free(prop_enum, DRM_MEM_KMS); } if (property->num_values) free(property->values, DRM_MEM_KMS); drm_mode_object_put(dev, &property->base); list_del(&property->head); free(property, DRM_MEM_KMS); } -int drm_connector_attach_property(struct drm_connector *connector, +void drm_connector_attach_property(struct drm_connector *connector, struct drm_property *property, uint64_t init_val) { - int i; - - for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { - if (connector->property_ids[i] == 0) { - connector->property_ids[i] = property->base.id; - connector->property_values[i] = init_val; - break; - } - } - - if (i == DRM_CONNECTOR_MAX_PROPERTY) - return -EINVAL; - return 0; + drm_object_attach_property(&connector->base, property, init_val); } int drm_connector_property_set_value(struct drm_connector *connector, struct drm_property *property, uint64_t value) { + return drm_object_property_set_value(&connector->base, property, value); +} + +int drm_connector_property_get_value(struct drm_connector *connector, + struct drm_property *property, uint64_t *val) +{ + return drm_object_property_get_value(&connector->base, property, val); +} + +void drm_object_attach_property(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t init_val) +{ + int count = obj->properties->count; + + if (count == DRM_OBJECT_MAX_PROPERTY) { + printf("Failed to attach object property (type: 0x%x). Please " + "increase DRM_OBJECT_MAX_PROPERTY by 1 for each time " + "you see this message on the same object type.\n", + obj->type); + return; + } + + obj->properties->ids[count] = property->base.id; + obj->properties->values[count] = init_val; + obj->properties->count++; +} + +int drm_object_property_set_value(struct drm_mode_object *obj, + struct drm_property *property, uint64_t val) +{ int i; - for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { - if (connector->property_ids[i] == property->base.id) { - connector->property_values[i] = value; - break; + for (i = 0; i < obj->properties->count; i++) { + if (obj->properties->ids[i] == property->base.id) { + obj->properties->values[i] = val; + return 0; } } - if (i == DRM_CONNECTOR_MAX_PROPERTY) - return -EINVAL; - return 0; + return -EINVAL; } -int drm_connector_property_get_value(struct drm_connector *connector, +int drm_object_property_get_value(struct drm_mode_object *obj, struct drm_property *property, uint64_t *val) { int i; - for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { - if (connector->property_ids[i] == property->base.id) { - *val = connector->property_values[i]; - break; + for (i = 0; i < obj->properties->count; i++) { + if (obj->properties->ids[i] == property->base.id) { + *val = obj->properties->values[i]; + return 0; } } - if (i == DRM_CONNECTOR_MAX_PROPERTY) - return -EINVAL; - return 0; + return -EINVAL; } int drm_mode_getproperty_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_object *obj; struct drm_mode_get_property *out_resp = data; struct drm_property *property; int enum_count = 0; int blob_count = 0; int value_count = 0; int ret = 0, i; int copied; struct drm_property_enum *prop_enum; struct drm_mode_property_enum __user *enum_ptr; struct drm_property_blob *prop_blob; uint32_t *blob_id_ptr; uint64_t *values_ptr; uint32_t *blob_length_ptr; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; sx_xlock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY); if (!obj) { ret = -EINVAL; goto done; } property = obj_to_property(obj); - if (property->flags & DRM_MODE_PROP_ENUM) { + if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) { list_for_each_entry(prop_enum, &property->enum_blob_list, head) enum_count++; } else if (property->flags & DRM_MODE_PROP_BLOB) { list_for_each_entry(prop_blob, &property->enum_blob_list, head) blob_count++; } value_count = property->num_values; strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN); out_resp->name[DRM_PROP_NAME_LEN-1] = 0; out_resp->flags = property->flags; if ((out_resp->count_values >= value_count) && value_count) { values_ptr = (uint64_t *)(uintptr_t)out_resp->values_ptr; for (i = 0; i < value_count; i++) { if (copyout(&property->values[i], values_ptr + i, sizeof(uint64_t))) { ret = -EFAULT; goto done; } } } out_resp->count_values = value_count; - if (property->flags & DRM_MODE_PROP_ENUM) { + if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) { if ((out_resp->count_enum_blobs >= enum_count) && enum_count) { copied = 0; enum_ptr = (struct drm_mode_property_enum *)(uintptr_t)out_resp->enum_blob_ptr; list_for_each_entry(prop_enum, &property->enum_blob_list, head) { if (copyout(&prop_enum->value, &enum_ptr[copied].value, sizeof(uint64_t))) { ret = -EFAULT; goto done; } if (copyout(&prop_enum->name, &enum_ptr[copied].name,DRM_PROP_NAME_LEN)) { ret = -EFAULT; goto done; } copied++; } } out_resp->count_enum_blobs = enum_count; } if (property->flags & DRM_MODE_PROP_BLOB) { if ((out_resp->count_enum_blobs >= blob_count) && blob_count) { copied = 0; blob_id_ptr = (uint32_t *)(uintptr_t)out_resp->enum_blob_ptr; blob_length_ptr = (uint32_t *)(uintptr_t)out_resp->values_ptr; list_for_each_entry(prop_blob, &property->enum_blob_list, head) { if (copyout(&prop_blob->base.id, blob_id_ptr + copied, sizeof(uint32_t))) { ret = -EFAULT; goto done; } if (copyout(&prop_blob->length, blob_length_ptr + copied, sizeof(uint32_t))) { ret = -EFAULT; goto done; } copied++; } } out_resp->count_enum_blobs = blob_count; } done: sx_xunlock(&dev->mode_config.mutex); return ret; } static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, int length, void *data) { struct drm_property_blob *blob; int ret; if (!length || !data) return NULL; blob = malloc(sizeof(struct drm_property_blob) + length, DRM_MEM_KMS, M_WAITOK | M_ZERO); ret = drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB); if (ret) { free(blob, DRM_MEM_KMS); return (NULL); } blob->length = length; memcpy(blob->data, data, length); list_add_tail(&blob->head, &dev->mode_config.property_blob_list); return blob; } static void drm_property_destroy_blob(struct drm_device *dev, struct drm_property_blob *blob) { drm_mode_object_put(dev, &blob->base); list_del(&blob->head); free(blob, DRM_MEM_KMS); } int drm_mode_getblob_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_object *obj; struct drm_mode_get_blob *out_resp = data; struct drm_property_blob *blob; int ret = 0; void *blob_ptr; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; sx_xlock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB); if (!obj) { ret = -EINVAL; goto done; } blob = obj_to_blob(obj); if (out_resp->length == blob->length) { blob_ptr = (void *)(unsigned long)out_resp->data; if (copyout(blob->data, blob_ptr, blob->length)){ ret = -EFAULT; goto done; } } out_resp->length = blob->length; done: sx_xunlock(&dev->mode_config.mutex); return ret; } int drm_mode_connector_update_edid_property(struct drm_connector *connector, struct edid *edid) { struct drm_device *dev = connector->dev; - int ret = 0, size; + int ret, size; if (connector->edid_blob_ptr) drm_property_destroy_blob(dev, connector->edid_blob_ptr); /* Delete edid, when there is none. */ if (!edid) { connector->edid_blob_ptr = NULL; ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0); return ret; } size = EDID_LENGTH * (1 + edid->extensions); connector->edid_blob_ptr = drm_property_create_blob(connector->dev, size, edid); ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, connector->edid_blob_ptr->base.id); return ret; } +static bool drm_property_change_is_valid(struct drm_property *property, + u64 value) +{ + if (property->flags & DRM_MODE_PROP_IMMUTABLE) + return false; + if (property->flags & DRM_MODE_PROP_RANGE) { + if (value < property->values[0] || value > property->values[1]) + return false; + return true; + } else if (property->flags & DRM_MODE_PROP_BITMASK) { + int i; + u64 valid_mask = 0; + for (i = 0; i < property->num_values; i++) + valid_mask |= (1ULL << property->values[i]); + return !(value & ~valid_mask); + } else { + int i; + for (i = 0; i < property->num_values; i++) + if (property->values[i] == value) + return true; + return false; + } +} + int drm_mode_connector_property_set_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_mode_connector_set_property *out_resp = data; - struct drm_mode_object *obj; - struct drm_property *property; - struct drm_connector *connector; + struct drm_mode_connector_set_property *conn_set_prop = data; + struct drm_mode_obj_set_property obj_set_prop = { + .value = conn_set_prop->value, + .prop_id = conn_set_prop->prop_id, + .obj_id = conn_set_prop->connector_id, + .obj_type = DRM_MODE_OBJECT_CONNECTOR + }; + + /* It does all the locking and checking we need */ + return drm_mode_obj_set_property_ioctl(dev, &obj_set_prop, file_priv); +} + +static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t value) +{ int ret = -EINVAL; + struct drm_connector *connector = obj_to_connector(obj); + + /* Do DPMS ourselves */ + if (property == connector->dev->mode_config.dpms_property) { + if (connector->funcs->dpms) + (*connector->funcs->dpms)(connector, (int)value); + ret = 0; + } else if (connector->funcs->set_property) + ret = connector->funcs->set_property(connector, property, value); + + /* store the property value if successful */ + if (!ret) + drm_connector_property_set_value(connector, property, value); + return ret; +} + +static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t value) +{ + int ret = -EINVAL; + struct drm_crtc *crtc = obj_to_crtc(obj); + + if (crtc->funcs->set_property) + ret = crtc->funcs->set_property(crtc, property, value); + if (!ret) + drm_object_property_set_value(obj, property, value); + + return ret; +} + +static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t value) +{ + int ret = -EINVAL; + struct drm_plane *plane = obj_to_plane(obj); + + if (plane->funcs->set_property) + ret = plane->funcs->set_property(plane, property, value); + if (!ret) + drm_object_property_set_value(obj, property, value); + + return ret; +} + +int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_mode_obj_get_properties *arg = data; + struct drm_mode_object *obj; + int ret = 0; int i; + int copied = 0; + int props_count = 0; + uint32_t __user *props_ptr; + uint64_t __user *prop_values_ptr; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; sx_xlock(&dev->mode_config.mutex); - obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR); + obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type); if (!obj) { + ret = -EINVAL; goto out; } - connector = obj_to_connector(obj); + if (!obj->properties) { + ret = -EINVAL; + goto out; + } - for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { - if (connector->property_ids[i] == out_resp->prop_id) - break; + props_count = obj->properties->count; + + /* This ioctl is called twice, once to determine how much space is + * needed, and the 2nd time to fill it. */ + if ((arg->count_props >= props_count) && props_count) { + copied = 0; + props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr); + prop_values_ptr = (uint64_t __user *)(unsigned long) + (arg->prop_values_ptr); + for (i = 0; i < props_count; i++) { + if (copyout(props_ptr + copied, + &obj->properties->ids[i], sizeof(uint32_t))) { + ret = -EFAULT; + goto out; + } + if (copyout(prop_values_ptr + copied, + &obj->properties->values[i], sizeof(uint64_t))) { + ret = -EFAULT; + goto out; + } + copied++; + } } + arg->count_props = props_count; +out: + sx_xunlock(&dev->mode_config.mutex); + return ret; +} - if (i == DRM_CONNECTOR_MAX_PROPERTY) { +int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_mode_obj_set_property *arg = data; + struct drm_mode_object *arg_obj; + struct drm_mode_object *prop_obj; + struct drm_property *property; + int ret = -EINVAL; + int i; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + + sx_xlock(&dev->mode_config.mutex); + + arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type); + if (!arg_obj) goto out; - } + if (!arg_obj->properties) + goto out; - obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY); - if (!obj) { + for (i = 0; i < arg_obj->properties->count; i++) + if (arg_obj->properties->ids[i] == arg->prop_id) + break; + + if (i == arg_obj->properties->count) goto out; - } - property = obj_to_property(obj); - if (property->flags & DRM_MODE_PROP_IMMUTABLE) + prop_obj = drm_mode_object_find(dev, arg->prop_id, + DRM_MODE_OBJECT_PROPERTY); + if (!prop_obj) goto out; + property = obj_to_property(prop_obj); - if (property->flags & DRM_MODE_PROP_RANGE) { - if (out_resp->value < property->values[0]) - goto out; + if (!drm_property_change_is_valid(property, arg->value)) + goto out; - if (out_resp->value > property->values[1]) - goto out; - } else { - int found = 0; - for (i = 0; i < property->num_values; i++) { - if (property->values[i] == out_resp->value) { - found = 1; - break; - } - } - if (!found) { - goto out; - } + switch (arg_obj->type) { + case DRM_MODE_OBJECT_CONNECTOR: + ret = drm_mode_connector_set_obj_prop(arg_obj, property, + arg->value); + break; + case DRM_MODE_OBJECT_CRTC: + ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value); + break; + case DRM_MODE_OBJECT_PLANE: + ret = drm_mode_plane_set_obj_prop(arg_obj, property, arg->value); + break; } - /* Do DPMS ourselves */ - if (property == connector->dev->mode_config.dpms_property) { - if (connector->funcs->dpms) - (*connector->funcs->dpms)(connector, (int) out_resp->value); - ret = 0; - } else if (connector->funcs->set_property) - ret = connector->funcs->set_property(connector, property, out_resp->value); - - /* store the property value if successful */ - if (!ret) - drm_connector_property_set_value(connector, property, out_resp->value); out: sx_xunlock(&dev->mode_config.mutex); return ret; } int drm_mode_connector_attach_encoder(struct drm_connector *connector, struct drm_encoder *encoder) { int i; for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] == 0) { connector->encoder_ids[i] = encoder->base.id; return 0; } } return -ENOMEM; } void drm_mode_connector_detach_encoder(struct drm_connector *connector, struct drm_encoder *encoder) { int i; for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] == encoder->base.id) { connector->encoder_ids[i] = 0; if (connector->encoder == encoder) connector->encoder = NULL; break; } } } int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, int gamma_size) { crtc->gamma_size = gamma_size; crtc->gamma_store = malloc(gamma_size * sizeof(uint16_t) * 3, DRM_MEM_KMS, M_WAITOK | M_ZERO); return 0; } int drm_mode_gamma_set_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_crtc_lut *crtc_lut = data; struct drm_mode_object *obj; struct drm_crtc *crtc; void *r_base, *g_base, *b_base; int size; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; sx_xlock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { ret = -EINVAL; goto out; } crtc = obj_to_crtc(obj); /* memcpy into gamma store */ if (crtc_lut->gamma_size != crtc->gamma_size) { ret = -EINVAL; goto out; } size = crtc_lut->gamma_size * (sizeof(uint16_t)); r_base = crtc->gamma_store; if (copyin((void *)(uintptr_t)crtc_lut->red, r_base, size)) { ret = -EFAULT; goto out; } g_base = (char *)r_base + size; if (copyin((void *)(uintptr_t)crtc_lut->green, g_base, size)) { ret = -EFAULT; goto out; } b_base = (char *)g_base + size; if (copyin((void *)(uintptr_t)crtc_lut->blue, b_base, size)) { ret = -EFAULT; goto out; } crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size); out: sx_xunlock(&dev->mode_config.mutex); return ret; } int drm_mode_gamma_get_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_crtc_lut *crtc_lut = data; struct drm_mode_object *obj; struct drm_crtc *crtc; void *r_base, *g_base, *b_base; int size; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; sx_xlock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { ret = -EINVAL; goto out; } crtc = obj_to_crtc(obj); + if (crtc->funcs->gamma_set == NULL) { + ret = -ENOSYS; + goto out; + } + /* memcpy into gamma store */ if (crtc_lut->gamma_size != crtc->gamma_size) { ret = -EINVAL; goto out; } size = crtc_lut->gamma_size * (sizeof(uint16_t)); r_base = crtc->gamma_store; if (copyout(r_base, (void *)(uintptr_t)crtc_lut->red, size)) { ret = -EFAULT; goto out; } g_base = (char *)r_base + size; if (copyout(g_base, (void *)(uintptr_t)crtc_lut->green, size)) { ret = -EFAULT; goto out; } b_base = (char *)g_base + size; if (copyout(b_base, (void *)(uintptr_t)crtc_lut->blue, size)) { ret = -EFAULT; goto out; } out: sx_xunlock(&dev->mode_config.mutex); return ret; } static void drm_kms_free(void *arg) { free(arg, DRM_MEM_KMS); } int drm_mode_page_flip_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_crtc_page_flip *page_flip = data; struct drm_mode_object *obj; struct drm_crtc *crtc; struct drm_framebuffer *fb; struct drm_pending_vblank_event *e = NULL; int ret = EINVAL; if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || page_flip->reserved != 0) return (EINVAL); sx_xlock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) goto out; crtc = obj_to_crtc(obj); if (crtc->fb == NULL) { /* The framebuffer is currently unbound, presumably * due to a hotplug event, that userspace has not * yet discovered. */ ret = EBUSY; goto out; } if (crtc->funcs->page_flip == NULL) goto out; obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB); if (!obj) goto out; fb = obj_to_fb(obj); if (crtc->mode.hdisplay > fb->width || crtc->mode.vdisplay > fb->height || crtc->x > fb->width - crtc->mode.hdisplay || crtc->y > fb->height - crtc->mode.vdisplay) { DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d.\n", fb->width, fb->height, crtc->mode.hdisplay, crtc->mode.vdisplay, crtc->x, crtc->y); ret = ENOSPC; goto out; } if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { ret = ENOMEM; mtx_lock(&dev->event_lock); if (file_priv->event_space < sizeof e->event) { mtx_unlock(&dev->event_lock); goto out; } file_priv->event_space -= sizeof e->event; mtx_unlock(&dev->event_lock); e = malloc(sizeof *e, DRM_MEM_KMS, M_WAITOK | M_ZERO); e->event.base.type = DRM_EVENT_FLIP_COMPLETE; e->event.base.length = sizeof e->event; e->event.user_data = page_flip->user_data; e->base.event = &e->event.base; e->base.file_priv = file_priv; e->base.destroy = (void (*) (struct drm_pending_event *))drm_kms_free; } ret = -crtc->funcs->page_flip(crtc, fb, e); if (ret != 0) { if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { mtx_lock(&dev->event_lock); file_priv->event_space += sizeof e->event; mtx_unlock(&dev->event_lock); free(e, DRM_MEM_KMS); } } out: sx_xunlock(&dev->mode_config.mutex); CTR3(KTR_DRM, "page_flip_ioctl %d %d %d", curproc->p_pid, page_flip->crtc_id, ret); return (ret); } void drm_mode_config_reset(struct drm_device *dev) { struct drm_crtc *crtc; struct drm_encoder *encoder; struct drm_connector *connector; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) if (crtc->funcs->reset) crtc->funcs->reset(crtc); list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) if (encoder->funcs->reset) encoder->funcs->reset(encoder); list_for_each_entry(connector, &dev->mode_config.connector_list, head) if (connector->funcs->reset) connector->funcs->reset(connector); } int drm_mode_create_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_create_dumb *args = data; if (!dev->driver->dumb_create) return -ENOTSUP; return dev->driver->dumb_create(file_priv, dev, args); } int drm_mode_mmap_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_map_dumb *args = data; /* call driver ioctl to get mmap offset */ if (!dev->driver->dumb_map_offset) return -ENOTSUP; return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset); } int drm_mode_destroy_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_destroy_dumb *args = data; if (!dev->driver->dumb_destroy) return -ENOTSUP; return dev->driver->dumb_destroy(file_priv, dev, args->handle); } /* * Just need to support RGB formats here for compat with code that doesn't * use pixel formats directly yet. */ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp) { switch (format) { case DRM_FORMAT_RGB332: case DRM_FORMAT_BGR233: *depth = 8; *bpp = 8; break; case DRM_FORMAT_XRGB1555: case DRM_FORMAT_XBGR1555: case DRM_FORMAT_RGBX5551: case DRM_FORMAT_BGRX5551: case DRM_FORMAT_ARGB1555: case DRM_FORMAT_ABGR1555: case DRM_FORMAT_RGBA5551: case DRM_FORMAT_BGRA5551: *depth = 15; *bpp = 16; break; case DRM_FORMAT_RGB565: case DRM_FORMAT_BGR565: *depth = 16; *bpp = 16; break; case DRM_FORMAT_RGB888: case DRM_FORMAT_BGR888: *depth = 24; *bpp = 24; break; case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: case DRM_FORMAT_RGBX8888: case DRM_FORMAT_BGRX8888: *depth = 24; *bpp = 32; break; case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: case DRM_FORMAT_RGBX1010102: case DRM_FORMAT_BGRX1010102: case DRM_FORMAT_ARGB2101010: case DRM_FORMAT_ABGR2101010: case DRM_FORMAT_RGBA1010102: case DRM_FORMAT_BGRA1010102: *depth = 30; *bpp = 32; break; case DRM_FORMAT_ARGB8888: case DRM_FORMAT_ABGR8888: case DRM_FORMAT_RGBA8888: case DRM_FORMAT_BGRA8888: *depth = 32; *bpp = 32; break; default: DRM_DEBUG_KMS("unsupported pixel format\n"); *depth = 0; *bpp = 0; break; + } +} + +/** + * drm_format_num_planes - get the number of planes for format + * @format: pixel format (DRM_FORMAT_*) + * + * RETURNS: + * The number of planes used by the specified pixel format. + */ +int drm_format_num_planes(uint32_t format) +{ + switch (format) { + case DRM_FORMAT_YUV410: + case DRM_FORMAT_YVU410: + case DRM_FORMAT_YUV411: + case DRM_FORMAT_YVU411: + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + case DRM_FORMAT_YUV422: + case DRM_FORMAT_YVU422: + case DRM_FORMAT_YUV444: + case DRM_FORMAT_YVU444: + return 3; + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + return 2; + default: + return 1; + } +} + +/** + * drm_format_plane_cpp - determine the bytes per pixel value + * @format: pixel format (DRM_FORMAT_*) + * @plane: plane index + * + * RETURNS: + * The bytes per pixel value for the specified plane. + */ +int drm_format_plane_cpp(uint32_t format, int plane) +{ + unsigned int depth; + int bpp; + + if (plane >= drm_format_num_planes(format)) + return 0; + + switch (format) { + case DRM_FORMAT_YUYV: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_VYUY: + return 2; + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + return plane ? 2 : 1; + case DRM_FORMAT_YUV410: + case DRM_FORMAT_YVU410: + case DRM_FORMAT_YUV411: + case DRM_FORMAT_YVU411: + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + case DRM_FORMAT_YUV422: + case DRM_FORMAT_YVU422: + case DRM_FORMAT_YUV444: + case DRM_FORMAT_YVU444: + return 1; + default: + drm_fb_get_bpp_depth(format, &depth, &bpp); + return bpp >> 3; + } +} + +/** + * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor + * @format: pixel format (DRM_FORMAT_*) + * + * RETURNS: + * The horizontal chroma subsampling factor for the + * specified pixel format. + */ +int drm_format_horz_chroma_subsampling(uint32_t format) +{ + switch (format) { + case DRM_FORMAT_YUV411: + case DRM_FORMAT_YVU411: + case DRM_FORMAT_YUV410: + case DRM_FORMAT_YVU410: + return 4; + case DRM_FORMAT_YUYV: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_VYUY: + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + case DRM_FORMAT_YUV422: + case DRM_FORMAT_YVU422: + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + return 2; + default: + return 1; + } +} + +/** + * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor + * @format: pixel format (DRM_FORMAT_*) + * + * RETURNS: + * The vertical chroma subsampling factor for the + * specified pixel format. + */ +int drm_format_vert_chroma_subsampling(uint32_t format) +{ + switch (format) { + case DRM_FORMAT_YUV410: + case DRM_FORMAT_YVU410: + return 4; + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + return 2; + default: + return 1; } } Index: head/sys/dev/drm2/drm_crtc.h =================================================================== --- head/sys/dev/drm2/drm_crtc.h (revision 277486) +++ head/sys/dev/drm2/drm_crtc.h (revision 277487) @@ -1,935 +1,976 @@ /* * Copyright © 2006 Keith Packard * Copyright © 2007-2008 Dave Airlie * Copyright © 2007-2008 Intel Corporation * Jesse Barnes * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * $FreeBSD$ */ #ifndef __DRM_CRTC_H__ #define __DRM_CRTC_H__ #include #include struct drm_device; struct drm_mode_set; struct drm_framebuffer; struct i2c_adapter; #define DRM_MODE_OBJECT_CRTC 0xcccccccc #define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0 #define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0 #define DRM_MODE_OBJECT_MODE 0xdededede #define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0 #define DRM_MODE_OBJECT_FB 0xfbfbfbfb #define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb #define DRM_MODE_OBJECT_PLANE 0xeeeeeeee struct drm_mode_object { uint32_t id; uint32_t type; + struct drm_object_properties *properties; }; +#define DRM_OBJECT_MAX_PROPERTY 16 +struct drm_object_properties { + int count; + uint32_t ids[DRM_OBJECT_MAX_PROPERTY]; + uint64_t values[DRM_OBJECT_MAX_PROPERTY]; +}; + /* * Note on terminology: here, for brevity and convenience, we refer to connector * control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS, * DVI, etc. And 'screen' refers to the whole of the visible display, which * may span multiple monitors (and therefore multiple CRTC and connector * structures). */ enum drm_mode_status { MODE_OK = 0, /* Mode OK */ MODE_HSYNC, /* hsync out of range */ MODE_VSYNC, /* vsync out of range */ MODE_H_ILLEGAL, /* mode has illegal horizontal timings */ MODE_V_ILLEGAL, /* mode has illegal horizontal timings */ MODE_BAD_WIDTH, /* requires an unsupported linepitch */ MODE_NOMODE, /* no mode with a maching name */ MODE_NO_INTERLACE, /* interlaced mode not supported */ MODE_NO_DBLESCAN, /* doublescan mode not supported */ MODE_NO_VSCAN, /* multiscan mode not supported */ MODE_MEM, /* insufficient video memory */ MODE_VIRTUAL_X, /* mode width too large for specified virtual size */ MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */ MODE_MEM_VIRT, /* insufficient video memory given virtual size */ MODE_NOCLOCK, /* no fixed clock available */ MODE_CLOCK_HIGH, /* clock required is too high */ MODE_CLOCK_LOW, /* clock required is too low */ MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */ MODE_BAD_HVALUE, /* horizontal timing was out of range */ MODE_BAD_VVALUE, /* vertical timing was out of range */ MODE_BAD_VSCAN, /* VScan value out of range */ MODE_HSYNC_NARROW, /* horizontal sync too narrow */ MODE_HSYNC_WIDE, /* horizontal sync too wide */ MODE_HBLANK_NARROW, /* horizontal blanking too narrow */ MODE_HBLANK_WIDE, /* horizontal blanking too wide */ MODE_VSYNC_NARROW, /* vertical sync too narrow */ MODE_VSYNC_WIDE, /* vertical sync too wide */ MODE_VBLANK_NARROW, /* vertical blanking too narrow */ MODE_VBLANK_WIDE, /* vertical blanking too wide */ MODE_PANEL, /* exceeds panel dimensions */ MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */ MODE_ONE_WIDTH, /* only one width is supported */ MODE_ONE_HEIGHT, /* only one height is supported */ MODE_ONE_SIZE, /* only one resolution is supported */ MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */ MODE_UNVERIFIED = -3, /* mode needs to reverified */ MODE_BAD = -2, /* unspecified reason */ MODE_ERROR = -1 /* error condition */ }; #define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \ DRM_MODE_TYPE_CRTC_C) #define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \ .name = nm, .status = 0, .type = (t), .clock = (c), \ .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \ .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \ .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \ .vscan = (vs), .flags = (f), .vrefresh = 0 #define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */ struct drm_display_mode { /* Header */ struct list_head head; struct drm_mode_object base; char name[DRM_DISPLAY_MODE_LEN]; int connector_count; enum drm_mode_status status; int type; /* Proposed mode values */ int clock; /* in kHz */ int hdisplay; int hsync_start; int hsync_end; int htotal; int hskew; int vdisplay; int vsync_start; int vsync_end; int vtotal; int vscan; unsigned int flags; /* Addressable image size (may be 0 for projectors, etc.) */ int width_mm; int height_mm; /* Actual mode we give to hw */ int clock_index; int synth_clock; int crtc_hdisplay; int crtc_hblank_start; int crtc_hblank_end; int crtc_hsync_start; int crtc_hsync_end; int crtc_htotal; int crtc_hskew; int crtc_vdisplay; int crtc_vblank_start; int crtc_vblank_end; int crtc_vsync_start; int crtc_vsync_end; int crtc_vtotal; int crtc_hadjusted; int crtc_vadjusted; /* Driver private mode info */ int private_size; int *private; int private_flags; int vrefresh; /* in Hz */ int hsync; /* in kHz */ }; enum drm_connector_status { connector_status_connected = 1, connector_status_disconnected = 2, connector_status_unknown = 3, }; enum subpixel_order { SubPixelUnknown = 0, SubPixelHorizontalRGB, SubPixelHorizontalBGR, SubPixelVerticalRGB, SubPixelVerticalBGR, SubPixelNone, }; #define DRM_COLOR_FORMAT_RGB444 (1<<0) #define DRM_COLOR_FORMAT_YCRCB444 (1<<1) #define DRM_COLOR_FORMAT_YCRCB422 (1<<2) /* * Describes a given display (e.g. CRT or flat panel) and its limitations. */ struct drm_display_info { char name[DRM_DISPLAY_INFO_LEN]; /* Physical size */ unsigned int width_mm; unsigned int height_mm; /* Clock limits FIXME: storage format */ unsigned int min_vfreq, max_vfreq; unsigned int min_hfreq, max_hfreq; unsigned int pixel_clock; unsigned int bpc; enum subpixel_order subpixel_order; u32 color_formats; u8 cea_rev; char *raw_edid; /* if any */ }; struct drm_framebuffer_funcs { void (*destroy)(struct drm_framebuffer *framebuffer); int (*create_handle)(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned int *handle); /** * Optinal callback for the dirty fb ioctl. * * Userspace can notify the driver via this callback * that a area of the framebuffer has changed and should * be flushed to the display hardware. * * See documentation in drm_mode.h for the struct * drm_mode_fb_dirty_cmd for more information as all * the semantics and arguments have a one to one mapping * on this function. */ int (*dirty)(struct drm_framebuffer *framebuffer, struct drm_file *file_priv, unsigned flags, unsigned color, struct drm_clip_rect *clips, unsigned num_clips); }; struct drm_framebuffer { struct drm_device *dev; struct list_head head; struct drm_mode_object base; const struct drm_framebuffer_funcs *funcs; unsigned int pitches[4]; unsigned int offsets[4]; unsigned int width; unsigned int height; /* depth can be 15 or 16 */ unsigned int depth; int bits_per_pixel; int flags; uint32_t pixel_format; /* fourcc format */ struct list_head filp_head; /* if you are using the helper */ void *helper_private; }; struct drm_property_blob { struct drm_mode_object base; struct list_head head; unsigned int length; unsigned char data[]; }; struct drm_property_enum { uint64_t value; struct list_head head; char name[DRM_PROP_NAME_LEN]; }; struct drm_property { struct list_head head; struct drm_mode_object base; uint32_t flags; char name[DRM_PROP_NAME_LEN]; uint32_t num_values; uint64_t *values; struct list_head enum_blob_list; }; struct drm_crtc; struct drm_connector; struct drm_encoder; struct drm_pending_vblank_event; struct drm_plane; /** * drm_crtc_funcs - control CRTCs for a given device * @reset: reset CRTC after state has been invalidate (e.g. resume) * @dpms: control display power levels * @save: save CRTC state * @resore: restore CRTC state * @lock: lock the CRTC * @unlock: unlock the CRTC * @shadow_allocate: allocate shadow pixmap * @shadow_create: create shadow pixmap for rotation support * @shadow_destroy: free shadow pixmap * @mode_fixup: fixup proposed mode * @mode_set: set the desired mode on the CRTC * @gamma_set: specify color ramp for CRTC - * @destroy: deinit and free object. + * @destroy: deinit and free object + * @set_property: called when a property is changed * * The drm_crtc_funcs structure is the central CRTC management structure * in the DRM. Each CRTC controls one or more connectors (note that the name * CRTC is simply historical, a CRTC may control LVDS, VGA, DVI, TV out, etc. * connectors, not just CRTs). * * Each driver is responsible for filling out this structure at startup time, * in addition to providing other modesetting features, like i2c and DDC * bus accessors. */ struct drm_crtc_funcs { /* Save CRTC state */ void (*save)(struct drm_crtc *crtc); /* suspend? */ /* Restore CRTC state */ void (*restore)(struct drm_crtc *crtc); /* resume? */ /* Reset CRTC state */ void (*reset)(struct drm_crtc *crtc); /* cursor controls */ int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, uint32_t width, uint32_t height); int (*cursor_move)(struct drm_crtc *crtc, int x, int y); /* Set gamma on the CRTC */ void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, uint32_t size); /* Object destroy routine */ void (*destroy)(struct drm_crtc *crtc); int (*set_config)(struct drm_mode_set *set); /* * Flip to the given framebuffer. This implements the page * flip ioctl descibed in drm_mode.h, specifically, the * implementation must return immediately and block all * rendering to the current fb until the flip has completed. * If userspace set the event flag in the ioctl, the event * argument will point to an event to send back when the flip * completes, otherwise it will be NULL. */ int (*page_flip)(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event); + int (*set_property)(struct drm_crtc *crtc, + struct drm_property *property, uint64_t val); }; /** * drm_crtc - central CRTC control structure * @enabled: is this CRTC enabled? * @x: x position on screen * @y: y position on screen * @funcs: CRTC control functions + * @properties: property tracking for this CRTC * * Each CRTC may have one or more connectors associated with it. This structure * allows the CRTC to be controlled. */ struct drm_crtc { struct drm_device *dev; struct list_head head; struct drm_mode_object base; /* framebuffer the connector is currently bound to */ struct drm_framebuffer *fb; bool enabled; /* Requested mode from modesetting. */ struct drm_display_mode mode; /* Programmed mode in hw, after adjustments for encoders, * crtc, panel scaling etc. Needed for timestamping etc. */ struct drm_display_mode hwmode; int x, y; const struct drm_crtc_funcs *funcs; /* CRTC gamma size for reporting to userspace */ uint32_t gamma_size; uint16_t *gamma_store; /* Constants needed for precise vblank and swap timestamping. */ int64_t framedur_ns, linedur_ns, pixeldur_ns; /* if you are using the helper */ void *helper_private; + + struct drm_object_properties properties; }; /** * drm_connector_funcs - control connectors on a given device * @dpms: set power state (see drm_crtc_funcs above) * @save: save connector state * @restore: restore connector state * @reset: reset connector after state has been invalidate (e.g. resume) * @mode_valid: is this mode valid on the given connector? * @mode_fixup: try to fixup proposed mode for this connector * @mode_set: set this mode * @detect: is this connector active? * @get_modes: get mode list for this connector * @set_property: property for this connector may need update * @destroy: make object go away * @force: notify the driver the connector is forced on * * Each CRTC may have one or more connectors attached to it. The functions * below allow the core DRM code to control connectors, enumerate available modes, * etc. */ struct drm_connector_funcs { void (*dpms)(struct drm_connector *connector, int mode); void (*save)(struct drm_connector *connector); void (*restore)(struct drm_connector *connector); void (*reset)(struct drm_connector *connector); /* Check to see if anything is attached to the connector. * @force is set to false whilst polling, true when checking the * connector due to user request. @force can be used by the driver * to avoid expensive, destructive operations during automated * probing. */ enum drm_connector_status (*detect)(struct drm_connector *connector, bool force); int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height); int (*set_property)(struct drm_connector *connector, struct drm_property *property, uint64_t val); void (*destroy)(struct drm_connector *connector); void (*force)(struct drm_connector *connector); }; struct drm_encoder_funcs { void (*reset)(struct drm_encoder *encoder); void (*destroy)(struct drm_encoder *encoder); }; #define DRM_CONNECTOR_MAX_UMODES 16 -#define DRM_CONNECTOR_MAX_PROPERTY 16 #define DRM_CONNECTOR_LEN 32 #define DRM_CONNECTOR_MAX_ENCODER 2 /** * drm_encoder - central DRM encoder structure */ struct drm_encoder { struct drm_device *dev; struct list_head head; struct drm_mode_object base; int encoder_type; uint32_t possible_crtcs; uint32_t possible_clones; struct drm_crtc *crtc; const struct drm_encoder_funcs *funcs; void *helper_private; }; enum drm_connector_force { DRM_FORCE_UNSPECIFIED, DRM_FORCE_OFF, DRM_FORCE_ON, /* force on analog part normally */ DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */ }; /* should we poll this connector for connects and disconnects */ /* hot plug detectable */ #define DRM_CONNECTOR_POLL_HPD (1 << 0) /* poll for connections */ #define DRM_CONNECTOR_POLL_CONNECT (1 << 1) /* can cleanly poll for disconnections without flickering the screen */ /* DACs should rarely do this without a lot of testing */ #define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2) #define MAX_ELD_BYTES 128 /** * drm_connector - central DRM connector control structure * @crtc: CRTC this connector is currently connected to, NULL if none * @interlace_allowed: can this connector handle interlaced modes? * @doublescan_allowed: can this connector handle doublescan? * @available_modes: modes available on this connector (from get_modes() + user) * @initial_x: initial x position for this connector * @initial_y: initial y position for this connector * @status: connector connected? * @funcs: connector control functions * * Each connector may be connected to one or more CRTCs, or may be clonable by * another connector if they can share a CRTC. Each connector also has a specific * position in the broader display (referred to as a 'screen' though it could * span multiple monitors). */ struct drm_connector { struct drm_device *dev; /* struct device kdev; XXXKIB */ struct device_attribute *attr; struct list_head head; struct drm_mode_object base; int connector_type; int connector_type_id; bool interlace_allowed; bool doublescan_allowed; struct list_head modes; /* list of modes on this connector */ int initial_x, initial_y; enum drm_connector_status status; /* these are modes added by probing with DDC or the BIOS */ struct list_head probed_modes; struct drm_display_info display_info; const struct drm_connector_funcs *funcs; struct list_head user_modes; struct drm_property_blob *edid_blob_ptr; - u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY]; - uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY]; + struct drm_object_properties properties; uint8_t polled; /* DRM_CONNECTOR_POLL_* */ /* requested DPMS state */ int dpms; void *helper_private; /* forced on connector */ enum drm_connector_force force; uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; uint32_t force_encoder_id; struct drm_encoder *encoder; /* currently active encoder */ /* EDID bits */ uint8_t eld[MAX_ELD_BYTES]; bool dvi_dual; int max_tmds_clock; /* in MHz */ bool latency_present[2]; int video_latency[2]; /* [0]: progressive, [1]: interlaced */ int audio_latency[2]; int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */ }; /** * drm_plane_funcs - driver plane control functions * @update_plane: update the plane configuration * @disable_plane: shut down the plane * @destroy: clean up plane resources + * @set_property: called when a property is changed */ struct drm_plane_funcs { int (*update_plane)(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h); int (*disable_plane)(struct drm_plane *plane); void (*destroy)(struct drm_plane *plane); + int (*set_property)(struct drm_plane *plane, + struct drm_property *property, uint64_t val); }; /** * drm_plane - central DRM plane control structure * @dev: DRM device this plane belongs to * @head: for list management * @base: base mode object * @possible_crtcs: pipes this plane can be bound to * @format_types: array of formats supported by this plane * @format_count: number of formats supported * @crtc: currently bound CRTC * @fb: currently bound fb * @gamma_size: size of gamma table * @gamma_store: gamma correction table * @enabled: enabled flag * @funcs: helper functions * @helper_private: storage for drver layer + @properties: property tracking for this plane */ struct drm_plane { struct drm_device *dev; struct list_head head; struct drm_mode_object base; uint32_t possible_crtcs; uint32_t *format_types; uint32_t format_count; struct drm_crtc *crtc; struct drm_framebuffer *fb; /* CRTC gamma size for reporting to userspace */ uint32_t gamma_size; uint16_t *gamma_store; bool enabled; const struct drm_plane_funcs *funcs; void *helper_private; + + struct drm_object_properties properties; }; /** * struct drm_mode_set * * Represents a single crtc the connectors that it drives with what mode * and from which framebuffer it scans out from. * * This is used to set modes. */ struct drm_mode_set { struct list_head head; struct drm_framebuffer *fb; struct drm_crtc *crtc; struct drm_display_mode *mode; uint32_t x; uint32_t y; struct drm_connector **connectors; size_t num_connectors; }; /** * struct drm_mode_config_funcs - configure CRTCs for a given screen layout */ struct drm_mode_config_funcs { int (*fb_create)(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_framebuffer **res); void (*output_poll_changed)(struct drm_device *dev); }; struct drm_mode_group { uint32_t num_crtcs; uint32_t num_encoders; uint32_t num_connectors; /* list of object IDs for this group */ uint32_t *id_list; }; /** * drm_mode_config - Mode configuration control structure * */ struct drm_mode_config { struct sx mutex; /* protects configuration (mode lists etc.) */ struct drm_gem_names crtc_names; /* use this idr for all IDs, fb, crtc, connector, modes */ /* this is limited to one for now */ int num_fb; struct list_head fb_list; int num_connector; struct list_head connector_list; int num_encoder; struct list_head encoder_list; int num_plane; struct list_head plane_list; int num_crtc; struct list_head crtc_list; struct list_head property_list; int min_width, min_height; int max_width, max_height; const struct drm_mode_config_funcs *funcs; resource_size_t fb_base; /* output poll support */ bool poll_enabled; struct timeout_task output_poll_task; /* pointers to standard properties */ struct list_head property_blob_list; struct drm_property *edid_property; struct drm_property *dpms_property; /* DVI-I properties */ struct drm_property *dvi_i_subconnector_property; struct drm_property *dvi_i_select_subconnector_property; /* TV properties */ struct drm_property *tv_subconnector_property; struct drm_property *tv_select_subconnector_property; struct drm_property *tv_mode_property; struct drm_property *tv_left_margin_property; struct drm_property *tv_right_margin_property; struct drm_property *tv_top_margin_property; struct drm_property *tv_bottom_margin_property; struct drm_property *tv_brightness_property; struct drm_property *tv_contrast_property; struct drm_property *tv_flicker_reduction_property; struct drm_property *tv_overscan_property; struct drm_property *tv_saturation_property; struct drm_property *tv_hue_property; /* Optional properties */ struct drm_property *scaling_mode_property; struct drm_property *dithering_mode_property; struct drm_property *dirty_info_property; /* dumb ioctl parameters */ uint32_t preferred_depth, prefer_shadow; }; #define obj_to_crtc(x) container_of(x, struct drm_crtc, base) #define obj_to_connector(x) container_of(x, struct drm_connector, base) #define obj_to_encoder(x) container_of(x, struct drm_encoder, base) #define obj_to_mode(x) container_of(x, struct drm_display_mode, base) #define obj_to_fb(x) container_of(x, struct drm_framebuffer, base) #define obj_to_property(x) container_of(x, struct drm_property, base) #define obj_to_blob(x) container_of(x, struct drm_property_blob, base) #define obj_to_plane(x) container_of(x, struct drm_plane, base) struct drm_prop_enum_list { int type; char *name; }; #if defined(MODE_SETTING_LOCKING_IS_NOT_BROKEN) #define DRM_MODE_CONFIG_ASSERT_LOCKED(dev) \ sx_assert(&dev->mode_config.mutex, SA_XLOCKED) #else #define DRM_MODE_CONFIG_ASSERT_LOCKED(dev) #endif extern char *drm_get_dirty_info_name(int val); extern char *drm_get_connector_status_name(enum drm_connector_status status); extern int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, const struct drm_crtc_funcs *funcs); extern void drm_crtc_cleanup(struct drm_crtc *crtc); extern int drm_connector_init(struct drm_device *dev, struct drm_connector *connector, const struct drm_connector_funcs *funcs, int connector_type); extern void drm_connector_cleanup(struct drm_connector *connector); extern int drm_encoder_init(struct drm_device *dev, struct drm_encoder *encoder, const struct drm_encoder_funcs *funcs, int encoder_type); extern int drm_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, const struct drm_plane_funcs *funcs, const uint32_t *formats, uint32_t format_count, bool priv); extern void drm_plane_cleanup(struct drm_plane *plane); extern void drm_encoder_cleanup(struct drm_encoder *encoder); extern char *drm_get_connector_name(struct drm_connector *connector); extern char *drm_get_dpms_name(int val); extern char *drm_get_dvi_i_subconnector_name(int val); extern char *drm_get_dvi_i_select_name(int val); extern char *drm_get_tv_subconnector_name(int val); extern char *drm_get_tv_select_name(int val); extern void drm_fb_release(struct drm_file *file_priv); extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group); extern struct edid *drm_get_edid(struct drm_connector *connector, device_t adapter); extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode); extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, const struct drm_display_mode *mode); extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode); extern void drm_mode_config_init(struct drm_device *dev); extern void drm_mode_config_reset(struct drm_device *dev); extern void drm_mode_config_cleanup(struct drm_device *dev); extern void drm_mode_set_name(struct drm_display_mode *mode); extern bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2); extern int drm_mode_width(struct drm_display_mode *mode); extern int drm_mode_height(struct drm_display_mode *mode); /* for us by fb module */ extern int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc, const struct drm_display_mode *mode); extern int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode); extern struct drm_display_mode *drm_mode_create(struct drm_device *dev); extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode); extern void drm_mode_list_concat(struct list_head *head, struct list_head *new); extern void drm_mode_validate_size(struct drm_device *dev, struct list_head *mode_list, int maxX, int maxY, int maxPitch); extern void drm_mode_validate_clocks(struct drm_device *dev, struct list_head *mode_list, int *min, int *max, int n_ranges); extern void drm_mode_prune_invalid(struct drm_device *dev, struct list_head *mode_list, bool verbose); extern void drm_mode_sort(struct list_head *mode_list); extern int drm_mode_hsync(const struct drm_display_mode *mode); extern int drm_mode_vrefresh(const struct drm_display_mode *mode); extern void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags); extern void drm_mode_connector_list_update(struct drm_connector *connector); extern int drm_mode_connector_update_edid_property(struct drm_connector *connector, struct edid *edid); extern int drm_connector_property_set_value(struct drm_connector *connector, struct drm_property *property, uint64_t value); extern int drm_connector_property_get_value(struct drm_connector *connector, struct drm_property *property, uint64_t *value); +void drm_object_attach_property(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t init_val); +extern int drm_object_property_set_value(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t val); +extern int drm_object_property_get_value(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t *value); extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev); extern void drm_framebuffer_set_object(struct drm_device *dev, unsigned long handle); extern int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, const struct drm_framebuffer_funcs *funcs); extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb); extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc); extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb); extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY); extern bool drm_crtc_in_use(struct drm_crtc *crtc); -extern int drm_connector_attach_property(struct drm_connector *connector, +extern void drm_connector_attach_property(struct drm_connector *connector, struct drm_property *property, uint64_t init_val); extern struct drm_property *drm_property_create(struct drm_device *dev, int flags, const char *name, int num_values); extern struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags, const char *name, const struct drm_prop_enum_list *props, int num_values); +struct drm_property *drm_property_create_bitmask(struct drm_device *dev, + int flags, const char *name, + const struct drm_prop_enum_list *props, + int num_values); struct drm_property *drm_property_create_range(struct drm_device *dev, int flags, const char *name, uint64_t min, uint64_t max); extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property); extern int drm_property_add_enum(struct drm_property *property, int index, uint64_t value, const char *name); extern int drm_mode_create_dvi_i_properties(struct drm_device *dev); extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats, char *formats[]); extern int drm_mode_create_scaling_mode_property(struct drm_device *dev); extern int drm_mode_create_dithering_property(struct drm_device *dev); extern int drm_mode_create_dirty_info_property(struct drm_device *dev); extern char *drm_get_encoder_name(struct drm_encoder *encoder); extern int drm_mode_connector_attach_encoder(struct drm_connector *connector, struct drm_encoder *encoder); extern void drm_mode_connector_detach_encoder(struct drm_connector *connector, struct drm_encoder *encoder); extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, int gamma_size); extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type); /* IOCTLs */ extern int drm_mode_getresources(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_getplane_res(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_getcrtc(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_getconnector(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_setcrtc(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_getplane(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_setplane(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_cursor_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_addfb(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_addfb2(struct drm_device *dev, void *data, struct drm_file *file_priv); extern uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth); extern int drm_mode_rmfb(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_getfb(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_dirtyfb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_addmode_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_rmmode_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_attachmode_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_detachmode_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_getproperty_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_getblob_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_hotplug_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_replacefb(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_getencoder(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_gamma_get_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_gamma_set_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern bool drm_detect_hdmi_monitor(struct edid *edid); extern bool drm_detect_monitor_audio(struct edid *edid); extern int drm_mode_page_flip_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh, bool reduced, bool interlaced, bool margins); extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh, bool interlaced, int margins); extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh, bool interlaced, int margins, int GTF_M, int GTF_2C, int GTF_K, int GTF_2J); extern int drm_add_modes_noedid(struct drm_connector *connector, int hdisplay, int vdisplay); extern int drm_edid_header_is_valid(const u8 *raw_edid); extern bool drm_edid_is_valid(struct edid *edid); struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, - int hsize, int vsize, int fresh); + int hsize, int vsize, int fresh, + bool rb); extern int drm_mode_create_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_mmap_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_destroy_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp); +extern int drm_format_num_planes(uint32_t format); +extern int drm_format_plane_cpp(uint32_t format, int plane); +extern int drm_format_horz_chroma_subsampling(uint32_t format); +extern int drm_format_vert_chroma_subsampling(uint32_t format); + #endif /* __DRM_CRTC_H__ */ Index: head/sys/dev/drm2/drm_crtc_helper.c =================================================================== --- head/sys/dev/drm2/drm_crtc_helper.c (revision 277486) +++ head/sys/dev/drm2/drm_crtc_helper.c (revision 277487) @@ -1,1043 +1,1043 @@ /* * Copyright (c) 2006-2008 Intel Corporation * Copyright (c) 2007 Dave Airlie * * DRM core CRTC related functions * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. * * Authors: * Keith Packard * Eric Anholt * Dave Airlie * Jesse Barnes */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include bool drm_fetch_cmdline_mode_from_kenv(struct drm_connector *connector, struct drm_cmdline_mode *cmdline_mode) { char *tun_var_name, *tun_mode; static const char tun_prefix[] = "drm_mode."; bool res; res = false; tun_var_name = malloc(sizeof(tun_prefix) + strlen(drm_get_connector_name(connector)), M_TEMP, M_WAITOK); strcpy(tun_var_name, tun_prefix); strcat(tun_var_name, drm_get_connector_name(connector)); tun_mode = kern_getenv(tun_var_name); if (tun_mode != NULL) { res = drm_mode_parse_command_line_for_connector(tun_mode, connector, cmdline_mode); freeenv(tun_mode); } free(tun_var_name, M_TEMP); return (res); } static bool drm_kms_helper_poll = true; static void drm_mode_validate_flag(struct drm_connector *connector, int flags) { struct drm_display_mode *mode, *t; if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE)) return; list_for_each_entry_safe(mode, t, &connector->modes, head) { if ((mode->flags & DRM_MODE_FLAG_INTERLACE) && !(flags & DRM_MODE_FLAG_INTERLACE)) mode->status = MODE_NO_INTERLACE; if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) && !(flags & DRM_MODE_FLAG_DBLSCAN)) mode->status = MODE_NO_DBLESCAN; } return; } /** * drm_helper_probe_single_connector_modes - get complete set of display modes * @dev: DRM device * @maxX: max width for modes * @maxY: max height for modes * * LOCKING: * Caller must hold mode config lock. * * Based on @dev's mode_config layout, scan all the connectors and try to detect * modes on them. Modes will first be added to the connector's probed_modes * list, then culled (based on validity and the @maxX, @maxY parameters) and * put into the normal modes list. * * Intended to be used either at bootup time or when major configuration * changes have occurred. * * FIXME: take into account monitor limits * * RETURNS: * Number of modes found on @connector. */ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY) { struct drm_device *dev = connector->dev; struct drm_display_mode *mode, *t; struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; struct drm_cmdline_mode cmdline_mode; int count = 0; int mode_flags = 0; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, drm_get_connector_name(connector)); /* set all modes to the unverified state */ list_for_each_entry_safe(mode, t, &connector->modes, head) mode->status = MODE_UNVERIFIED; if (connector->force) { if (connector->force == DRM_FORCE_ON) connector->status = connector_status_connected; else connector->status = connector_status_disconnected; if (connector->funcs->force) connector->funcs->force(connector); } else { connector->status = connector->funcs->detect(connector, true); drm_kms_helper_poll_enable(dev); } if (connector->status == connector_status_disconnected) { DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", connector->base.id, drm_get_connector_name(connector)); drm_mode_connector_update_edid_property(connector, NULL); goto prune; } count = (*connector_funcs->get_modes)(connector); if (count == 0 && drm_fetch_cmdline_mode_from_kenv(connector, &cmdline_mode)) { mode = drm_mode_create_from_cmdline_mode(dev, &cmdline_mode); if (mode != NULL) { DRM_DEBUG_KMS( "[CONNECTOR:%d:%s] found manual override ", connector->base.id, drm_get_connector_name(connector)); drm_mode_debug_printmodeline(mode); drm_mode_probed_add(connector, mode); count++; } else { DRM_ERROR( "[CONNECTOR:%d:%s] manual override mode: parse error\n", connector->base.id, drm_get_connector_name(connector)); } } if (count == 0 && connector->status == connector_status_connected) count = drm_add_modes_noedid(connector, 1024, 768); if (count == 0) goto prune; drm_mode_connector_list_update(connector); if (maxX && maxY) drm_mode_validate_size(dev, &connector->modes, maxX, maxY, 0); if (connector->interlace_allowed) mode_flags |= DRM_MODE_FLAG_INTERLACE; if (connector->doublescan_allowed) mode_flags |= DRM_MODE_FLAG_DBLSCAN; drm_mode_validate_flag(connector, mode_flags); list_for_each_entry_safe(mode, t, &connector->modes, head) { if (mode->status == MODE_OK) mode->status = connector_funcs->mode_valid(connector, mode); } prune: drm_mode_prune_invalid(dev, &connector->modes, true); if (list_empty(&connector->modes)) return 0; drm_mode_sort(&connector->modes); DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id, drm_get_connector_name(connector)); list_for_each_entry_safe(mode, t, &connector->modes, head) { mode->vrefresh = drm_mode_vrefresh(mode); drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); drm_mode_debug_printmodeline(mode); } return count; } /** * drm_helper_encoder_in_use - check if a given encoder is in use * @encoder: encoder to check * * LOCKING: * Caller must hold mode config lock. * * Walk @encoders's DRM device's mode_config and see if it's in use. * * RETURNS: * True if @encoder is part of the mode_config, false otherwise. */ bool drm_helper_encoder_in_use(struct drm_encoder *encoder) { struct drm_connector *connector; struct drm_device *dev = encoder->dev; list_for_each_entry(connector, &dev->mode_config.connector_list, head) if (connector->encoder == encoder) return true; return false; } /** * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config * @crtc: CRTC to check * * LOCKING: * Caller must hold mode config lock. * * Walk @crtc's DRM device's mode_config and see if it's in use. * * RETURNS: * True if @crtc is part of the mode_config, false otherwise. */ bool drm_helper_crtc_in_use(struct drm_crtc *crtc) { struct drm_encoder *encoder; struct drm_device *dev = crtc->dev; /* FIXME: Locking around list access? */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder)) return true; return false; } static void drm_encoder_disable(struct drm_encoder *encoder) { struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; if (encoder_funcs->disable) (*encoder_funcs->disable)(encoder); else (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); } /** * drm_helper_disable_unused_functions - disable unused objects * @dev: DRM device * * LOCKING: * Caller must hold mode config lock. * * If an connector or CRTC isn't part of @dev's mode_config, it can be disabled * by calling its dpms function, which should power it off. */ void drm_helper_disable_unused_functions(struct drm_device *dev) { struct drm_encoder *encoder; struct drm_connector *connector; struct drm_crtc *crtc; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (!connector->encoder) continue; if (connector->status == connector_status_disconnected) connector->encoder = NULL; } list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (!drm_helper_encoder_in_use(encoder)) { drm_encoder_disable(encoder); /* disconnector encoder from any connector */ encoder->crtc = NULL; } } list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; crtc->enabled = drm_helper_crtc_in_use(crtc); if (!crtc->enabled) { if (crtc_funcs->disable) (*crtc_funcs->disable)(crtc); else (*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF); crtc->fb = NULL; } } } /** * drm_encoder_crtc_ok - can a given crtc drive a given encoder? * @encoder: encoder to test * @crtc: crtc to test * * Return false if @encoder can't be driven by @crtc, true otherwise. */ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder, struct drm_crtc *crtc) { struct drm_device *dev; struct drm_crtc *tmp; int crtc_mask = 1; if (crtc == NULL) printf("checking null crtc?\n"); dev = crtc->dev; list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) { if (tmp == crtc) break; crtc_mask <<= 1; } if (encoder->possible_crtcs & crtc_mask) return true; return false; } /* * Check the CRTC we're going to map each output to vs. its current * CRTC. If they don't match, we have to disable the output and the CRTC * since the driver will have to re-route things. */ static void drm_crtc_prepare_encoders(struct drm_device *dev) { struct drm_encoder_helper_funcs *encoder_funcs; struct drm_encoder *encoder; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { encoder_funcs = encoder->helper_private; /* Disable unused encoders */ if (encoder->crtc == NULL) drm_encoder_disable(encoder); /* Disable encoders whose CRTC is about to change */ if (encoder_funcs->get_crtc && encoder->crtc != (*encoder_funcs->get_crtc)(encoder)) drm_encoder_disable(encoder); } } /** * drm_crtc_set_mode - set a mode * @crtc: CRTC to program * @mode: mode to use * @x: width of mode * @y: height of mode * * LOCKING: * Caller must hold mode config lock. * * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance * to fixup or reject the mode prior to trying to set it. * * RETURNS: * True if the mode was set successfully, or false otherwise. */ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, int x, int y, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode; struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; struct drm_encoder_helper_funcs *encoder_funcs; int saved_x, saved_y; struct drm_encoder *encoder; bool ret = true; crtc->enabled = drm_helper_crtc_in_use(crtc); if (!crtc->enabled) return true; adjusted_mode = drm_mode_duplicate(dev, mode); if (!adjusted_mode) return false; saved_hwmode = crtc->hwmode; saved_mode = crtc->mode; saved_x = crtc->x; saved_y = crtc->y; /* Update crtc values up front so the driver can rely on them for mode * setting. */ crtc->mode = *mode; crtc->x = x; crtc->y = y; /* Pass our mode to the connectors and the CRTC to give them a chance to * adjust it according to limitations or connector properties, and also * a chance to reject the mode entirely. */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc != crtc) continue; encoder_funcs = encoder->helper_private; if (!(ret = encoder_funcs->mode_fixup(encoder, mode, adjusted_mode))) { goto done; } } if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) { goto done; } DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); /* Prepare the encoders and CRTCs before setting the mode. */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc != crtc) continue; encoder_funcs = encoder->helper_private; /* Disable the encoders as the first thing we do. */ encoder_funcs->prepare(encoder); } drm_crtc_prepare_encoders(dev); crtc_funcs->prepare(crtc); /* Set up the DPLL and any encoders state that needs to adjust or depend * on the DPLL. */ ret = !crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb); if (!ret) goto done; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc != crtc) continue; DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n", encoder->base.id, drm_get_encoder_name(encoder), mode->base.id, mode->name); encoder_funcs = encoder->helper_private; encoder_funcs->mode_set(encoder, mode, adjusted_mode); } /* Now enable the clocks, plane, pipe, and connectors that we set up. */ crtc_funcs->commit(crtc); list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc != crtc) continue; encoder_funcs = encoder->helper_private; encoder_funcs->commit(encoder); } /* Store real post-adjustment hardware mode. */ crtc->hwmode = *adjusted_mode; /* Calculate and store various constants which * are later needed by vblank and swap-completion * timestamping. They are derived from true hwmode. */ drm_calc_timestamping_constants(crtc); /* FIXME: add subpixel order */ done: drm_mode_destroy(dev, adjusted_mode); if (!ret) { crtc->hwmode = saved_hwmode; crtc->mode = saved_mode; crtc->x = saved_x; crtc->y = saved_y; } return ret; } static int drm_crtc_helper_disable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_connector *connector; struct drm_encoder *encoder; /* Decouple all encoders and their attached connectors from this crtc */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc != crtc) continue; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (connector->encoder != encoder) continue; connector->encoder = NULL; } } drm_helper_disable_unused_functions(dev); return 0; } /** * drm_crtc_helper_set_config - set a new config from userspace * @crtc: CRTC to setup * @crtc_info: user provided configuration * @new_mode: new mode to set * @connector_set: set of connectors for the new config * @fb: new framebuffer * * LOCKING: * Caller must hold mode config lock. * * Setup a new configuration, provided by the user in @crtc_info, and enable * it. * * RETURNS: * Zero. (FIXME) */ int drm_crtc_helper_set_config(struct drm_mode_set *set) { struct drm_device *dev; struct drm_crtc *save_crtcs, *new_crtc, *crtc; struct drm_encoder *save_encoders, *new_encoder, *encoder; struct drm_framebuffer *old_fb = NULL; bool mode_changed = false; /* if true do a full mode set */ bool fb_changed = false; /* if true and !mode_changed just do a flip */ struct drm_connector *save_connectors, *connector; int count = 0, ro, fail = 0; struct drm_crtc_helper_funcs *crtc_funcs; struct drm_mode_set save_set; - int ret = 0; + int ret; int i; DRM_DEBUG_KMS("\n"); if (!set) return -EINVAL; if (!set->crtc) return -EINVAL; if (!set->crtc->helper_private) return -EINVAL; crtc_funcs = set->crtc->helper_private; if (!set->mode) set->fb = NULL; if (set->fb) { DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", set->crtc->base.id, set->fb->base.id, (int)set->num_connectors, set->x, set->y); } else { DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); return drm_crtc_helper_disable(set->crtc); } dev = set->crtc->dev; /* Allocate space for the backup of all (non-pointer) crtc, encoder and * connector data. */ save_crtcs = malloc(dev->mode_config.num_crtc * sizeof(struct drm_crtc), DRM_MEM_KMS, M_WAITOK | M_ZERO); save_encoders = malloc(dev->mode_config.num_encoder * sizeof(struct drm_encoder), DRM_MEM_KMS, M_WAITOK | M_ZERO); save_connectors = malloc(dev->mode_config.num_connector * sizeof(struct drm_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO); /* Copy data. Note that driver private data is not affected. * Should anything bad happen only the expected state is * restored, not the drivers personal bookkeeping. */ count = 0; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { save_crtcs[count++] = *crtc; } count = 0; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { save_encoders[count++] = *encoder; } count = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { save_connectors[count++] = *connector; } save_set.crtc = set->crtc; save_set.mode = &set->crtc->mode; save_set.x = set->crtc->x; save_set.y = set->crtc->y; save_set.fb = set->crtc->fb; /* We should be able to check here if the fb has the same properties * and then just flip_or_move it */ if (set->crtc->fb != set->fb) { /* If we have no fb then treat it as a full mode set */ if (set->crtc->fb == NULL) { DRM_DEBUG_KMS("crtc has no fb, full mode set\n"); mode_changed = true; } else if (set->fb == NULL) { mode_changed = true; } else fb_changed = true; } if (set->x != set->crtc->x || set->y != set->crtc->y) fb_changed = true; if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) { DRM_DEBUG_KMS("modes are different, full mode set\n"); drm_mode_debug_printmodeline(&set->crtc->mode); drm_mode_debug_printmodeline(set->mode); mode_changed = true; } /* a) traverse passed in connector list and get encoders for them */ count = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; new_encoder = connector->encoder; for (ro = 0; ro < set->num_connectors; ro++) { if (set->connectors[ro] == connector) { new_encoder = connector_funcs->best_encoder(connector); /* if we can't get an encoder for a connector we are setting now - then fail */ if (new_encoder == NULL) /* don't break so fail path works correct */ fail = 1; break; } } if (new_encoder != connector->encoder) { DRM_DEBUG_KMS("encoder changed, full mode switch\n"); mode_changed = true; /* If the encoder is reused for another connector, then * the appropriate crtc will be set later. */ if (connector->encoder) connector->encoder->crtc = NULL; connector->encoder = new_encoder; } } if (fail) { ret = -EINVAL; goto fail; } count = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (!connector->encoder) continue; if (connector->encoder->crtc == set->crtc) new_crtc = NULL; else new_crtc = connector->encoder->crtc; for (ro = 0; ro < set->num_connectors; ro++) { if (set->connectors[ro] == connector) new_crtc = set->crtc; } /* Make sure the new CRTC will work with the encoder */ if (new_crtc && !drm_encoder_crtc_ok(connector->encoder, new_crtc)) { ret = -EINVAL; goto fail; } if (new_crtc != connector->encoder->crtc) { DRM_DEBUG_KMS("crtc changed, full mode switch\n"); mode_changed = true; connector->encoder->crtc = new_crtc; } if (new_crtc) { DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", connector->base.id, drm_get_connector_name(connector), new_crtc->base.id); } else { DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", connector->base.id, drm_get_connector_name(connector)); } } /* mode_set_base is not a required function */ if (fb_changed && !crtc_funcs->mode_set_base) mode_changed = true; if (mode_changed) { set->crtc->enabled = drm_helper_crtc_in_use(set->crtc); if (set->crtc->enabled) { DRM_DEBUG_KMS("attempting to set mode from" " userspace\n"); drm_mode_debug_printmodeline(set->mode); old_fb = set->crtc->fb; set->crtc->fb = set->fb; if (!drm_crtc_helper_set_mode(set->crtc, set->mode, set->x, set->y, old_fb)) { DRM_ERROR("failed to set mode on [CRTC:%d]\n", set->crtc->base.id); set->crtc->fb = old_fb; ret = -EINVAL; goto fail; } DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); for (i = 0; i < set->num_connectors; i++) { DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, drm_get_connector_name(set->connectors[i])); set->connectors[i]->dpms = DRM_MODE_DPMS_ON; } } drm_helper_disable_unused_functions(dev); } else if (fb_changed) { set->crtc->x = set->x; set->crtc->y = set->y; old_fb = set->crtc->fb; if (set->crtc->fb != set->fb) set->crtc->fb = set->fb; ret = crtc_funcs->mode_set_base(set->crtc, set->x, set->y, old_fb); if (ret != 0) { set->crtc->fb = old_fb; goto fail; } } free(save_connectors, DRM_MEM_KMS); free(save_encoders, DRM_MEM_KMS); free(save_crtcs, DRM_MEM_KMS); return 0; fail: /* Restore all previous data. */ count = 0; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { *crtc = save_crtcs[count++]; } count = 0; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { *encoder = save_encoders[count++]; } count = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { *connector = save_connectors[count++]; } /* Try to restore the config */ if (mode_changed && !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x, save_set.y, save_set.fb)) DRM_ERROR("failed to restore config after modeset failure\n"); free(save_connectors, DRM_MEM_KMS); free(save_encoders, DRM_MEM_KMS); free(save_crtcs, DRM_MEM_KMS); return ret; } static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder) { int dpms = DRM_MODE_DPMS_OFF; struct drm_connector *connector; struct drm_device *dev = encoder->dev; list_for_each_entry(connector, &dev->mode_config.connector_list, head) if (connector->encoder == encoder) if (connector->dpms < dpms) dpms = connector->dpms; return dpms; } static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc) { int dpms = DRM_MODE_DPMS_OFF; struct drm_connector *connector; struct drm_device *dev = crtc->dev; list_for_each_entry(connector, &dev->mode_config.connector_list, head) if (connector->encoder && connector->encoder->crtc == crtc) if (connector->dpms < dpms) dpms = connector->dpms; return dpms; } /** * drm_helper_connector_dpms * @connector affected connector * @mode DPMS mode * * Calls the low-level connector DPMS function, then * calls appropriate encoder and crtc DPMS functions as well */ void drm_helper_connector_dpms(struct drm_connector *connector, int mode) { struct drm_encoder *encoder = connector->encoder; struct drm_crtc *crtc = encoder ? encoder->crtc : NULL; int old_dpms; if (mode == connector->dpms) return; old_dpms = connector->dpms; connector->dpms = mode; /* from off to on, do crtc then encoder */ if (mode < old_dpms) { if (crtc) { struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; if (crtc_funcs->dpms) (*crtc_funcs->dpms) (crtc, drm_helper_choose_crtc_dpms(crtc)); } if (encoder) { struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; if (encoder_funcs->dpms) (*encoder_funcs->dpms) (encoder, drm_helper_choose_encoder_dpms(encoder)); } } /* from on to off, do encoder then crtc */ if (mode > old_dpms) { if (encoder) { struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; if (encoder_funcs->dpms) (*encoder_funcs->dpms) (encoder, drm_helper_choose_encoder_dpms(encoder)); } if (crtc) { struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; if (crtc_funcs->dpms) (*crtc_funcs->dpms) (crtc, drm_helper_choose_crtc_dpms(crtc)); } } return; } int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, struct drm_mode_fb_cmd2 *mode_cmd) { int i; fb->width = mode_cmd->width; fb->height = mode_cmd->height; for (i = 0; i < 4; i++) { fb->pitches[i] = mode_cmd->pitches[i]; fb->offsets[i] = mode_cmd->offsets[i]; } drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth, &fb->bits_per_pixel); fb->pixel_format = mode_cmd->pixel_format; return 0; } int drm_helper_resume_force_mode(struct drm_device *dev) { struct drm_crtc *crtc; struct drm_encoder *encoder; struct drm_encoder_helper_funcs *encoder_funcs; struct drm_crtc_helper_funcs *crtc_funcs; int ret; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { if (!crtc->enabled) continue; ret = drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); if (!ret) DRM_ERROR("failed to set mode on crtc %p\n", crtc); /* Turn off outputs that were already powered off */ if (drm_helper_choose_crtc_dpms(crtc)) { list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if(encoder->crtc != crtc) continue; encoder_funcs = encoder->helper_private; if (encoder_funcs->dpms) (*encoder_funcs->dpms) (encoder, drm_helper_choose_encoder_dpms(encoder)); } crtc_funcs = crtc->helper_private; if (crtc_funcs->dpms) (*crtc_funcs->dpms) (crtc, drm_helper_choose_crtc_dpms(crtc)); } } /* disable the unused connectors while restoring the modesetting */ drm_helper_disable_unused_functions(dev); return 0; } #define DRM_OUTPUT_POLL_PERIOD (10 * hz) static void output_poll_execute(void *ctx, int pending) { struct drm_device *dev; struct drm_connector *connector; enum drm_connector_status old_status; bool repoll = false, changed = false; if (!drm_kms_helper_poll) return; dev = ctx; sx_xlock(&dev->mode_config.mutex); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { /* if this is HPD or polled don't check it - TV out for instance */ if (!connector->polled) continue; else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT)) repoll = true; old_status = connector->status; /* if we are connected and don't want to poll for disconnect skip it */ if (old_status == connector_status_connected && !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) && !(connector->polled & DRM_CONNECTOR_POLL_HPD)) continue; connector->status = connector->funcs->detect(connector, false); DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", connector->base.id, drm_get_connector_name(connector), old_status, connector->status); if (old_status != connector->status) changed = true; } sx_xunlock(&dev->mode_config.mutex); if (changed) { #if 0 /* send a uevent + call fbdev */ drm_sysfs_hotplug_event(dev); #endif if (dev->mode_config.funcs->output_poll_changed) dev->mode_config.funcs->output_poll_changed(dev); } if (repoll) { taskqueue_enqueue_timeout(taskqueue_thread, &dev->mode_config.output_poll_task, DRM_OUTPUT_POLL_PERIOD); } } void drm_kms_helper_poll_disable(struct drm_device *dev) { if (!dev->mode_config.poll_enabled) return; taskqueue_cancel_timeout(taskqueue_thread, &dev->mode_config.output_poll_task, NULL); } void drm_kms_helper_poll_enable(struct drm_device *dev) { bool poll = false; struct drm_connector *connector; if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll) return; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (connector->polled) poll = true; } if (poll) { taskqueue_enqueue_timeout(taskqueue_thread, &dev->mode_config.output_poll_task, DRM_OUTPUT_POLL_PERIOD); } } void drm_kms_helper_poll_init(struct drm_device *dev) { TIMEOUT_TASK_INIT(taskqueue_thread, &dev->mode_config.output_poll_task, 0, output_poll_execute, dev); dev->mode_config.poll_enabled = true; drm_kms_helper_poll_enable(dev); } void drm_kms_helper_poll_fini(struct drm_device *dev) { drm_kms_helper_poll_disable(dev); } void drm_helper_hpd_irq_event(struct drm_device *dev) { if (!dev->mode_config.poll_enabled) return; /* kill timer and schedule immediate execution, this doesn't block */ taskqueue_cancel_timeout(taskqueue_thread, &dev->mode_config.output_poll_task, NULL); if (drm_kms_helper_poll) taskqueue_enqueue_timeout(taskqueue_thread, &dev->mode_config.output_poll_task, 0); } Index: head/sys/dev/drm2/drm_crtc_helper.h =================================================================== --- head/sys/dev/drm2/drm_crtc_helper.h (revision 277486) +++ head/sys/dev/drm2/drm_crtc_helper.h (revision 277487) @@ -1,146 +1,146 @@ /* * Copyright © 2006 Keith Packard * Copyright © 2007-2008 Dave Airlie * Copyright © 2007-2008 Intel Corporation * Jesse Barnes * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * $FreeBSD$ */ /* * The DRM mode setting helper functions are common code for drivers to use if * they wish. Drivers are not forced to use this code in their * implementations but it would be useful if they code they do use at least * provides a consistent interface and operation to userspace */ #ifndef __DRM_CRTC_HELPER_H__ #define __DRM_CRTC_HELPER_H__ enum mode_set_atomic { LEAVE_ATOMIC_MODE_SET, ENTER_ATOMIC_MODE_SET, }; struct drm_crtc_helper_funcs { /* * Control power levels on the CRTC. If the mode passed in is * unsupported, the provider must use the next lowest power level. */ void (*dpms)(struct drm_crtc *crtc, int mode); void (*prepare)(struct drm_crtc *crtc); void (*commit)(struct drm_crtc *crtc); /* Provider can fixup or change mode timings before modeset occurs */ bool (*mode_fixup)(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); /* Actually set the mode */ int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb); /* Move the crtc on the current fb to the given position *optional* */ int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb); int (*mode_set_base_atomic)(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, enum mode_set_atomic); /* reload the current crtc LUT */ void (*load_lut)(struct drm_crtc *crtc); /* disable crtc when not in use - more explicit than dpms off */ void (*disable)(struct drm_crtc *crtc); }; struct drm_encoder_helper_funcs { void (*dpms)(struct drm_encoder *encoder, int mode); void (*save)(struct drm_encoder *encoder); void (*restore)(struct drm_encoder *encoder); bool (*mode_fixup)(struct drm_encoder *encoder, - const struct drm_display_mode *mode, + struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); void (*prepare)(struct drm_encoder *encoder); void (*commit)(struct drm_encoder *encoder); void (*mode_set)(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder); /* detect for DAC style encoders */ enum drm_connector_status (*detect)(struct drm_encoder *encoder, struct drm_connector *connector); /* disable encoder when not in use - more explicit than dpms off */ void (*disable)(struct drm_encoder *encoder); }; struct drm_connector_helper_funcs { int (*get_modes)(struct drm_connector *connector); int (*mode_valid)(struct drm_connector *connector, struct drm_display_mode *mode); struct drm_encoder *(*best_encoder)(struct drm_connector *connector); }; extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY); extern void drm_helper_disable_unused_functions(struct drm_device *dev); extern int drm_crtc_helper_set_config(struct drm_mode_set *set); extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, int x, int y, struct drm_framebuffer *old_fb); extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc); extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder); extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode); extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, struct drm_mode_fb_cmd2 *mode_cmd); static inline void drm_crtc_helper_add(struct drm_crtc *crtc, const struct drm_crtc_helper_funcs *funcs) { crtc->helper_private = __DECONST(void *, funcs); } static inline void drm_encoder_helper_add(struct drm_encoder *encoder, const struct drm_encoder_helper_funcs *funcs) { encoder->helper_private = __DECONST(void *, funcs); } static inline void drm_connector_helper_add(struct drm_connector *connector, const struct drm_connector_helper_funcs *funcs) { connector->helper_private = __DECONST(void *, funcs); } extern int drm_helper_resume_force_mode(struct drm_device *dev); extern void drm_kms_helper_poll_init(struct drm_device *dev); extern void drm_kms_helper_poll_fini(struct drm_device *dev); extern void drm_helper_hpd_irq_event(struct drm_device *dev); extern void drm_kms_helper_poll_disable(struct drm_device *dev); extern void drm_kms_helper_poll_enable(struct drm_device *dev); extern bool drm_fetch_cmdline_mode_from_kenv(struct drm_connector *connector, struct drm_cmdline_mode *cmdline_mode); #endif Index: head/sys/dev/drm2/drm_drv.c =================================================================== --- head/sys/dev/drm2/drm_drv.c (revision 277486) +++ head/sys/dev/drm2/drm_drv.c (revision 277487) @@ -1,1084 +1,1140 @@ /*- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Rickard E. (Rik) Faith * Gareth Hughes * */ #include __FBSDID("$FreeBSD$"); /** @file drm_drv.c * The catch-all file for DRM device support, including module setup/teardown, * open/close, and ioctl dispatch. */ #include #include #include #include #include #include #include #include #ifdef DRM_DEBUG_DEFAULT_ON int drm_debug_flag = (DRM_DEBUGBITS_DEBUG | DRM_DEBUGBITS_KMS | DRM_DEBUGBITS_FAILED_IOCTL); #else int drm_debug_flag = 0; #endif int drm_notyet_flag = 0; unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ /* * Default to use monotonic timestamps for wait-for-vblank and page-flip * complete events. */ unsigned int drm_timestamp_monotonic = 1; static int drm_load(struct drm_device *dev); static void drm_unload(struct drm_device *dev); static drm_pci_id_list_t *drm_find_description(int vendor, int device, drm_pci_id_list_t *idlist); static int drm_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size, struct vm_object **obj_res, int nprot); static int drm_modevent(module_t mod, int type, void *data) { switch (type) { case MOD_LOAD: TUNABLE_INT_FETCH("drm.debug", &drm_debug_flag); TUNABLE_INT_FETCH("drm.notyet", &drm_notyet_flag); break; } return (0); } static moduledata_t drm_mod = { "drmn", drm_modevent, 0 }; DECLARE_MODULE(drmn, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); MODULE_VERSION(drmn, 1); MODULE_DEPEND(drmn, agp, 1, 1, 1); MODULE_DEPEND(drmn, pci, 1, 1, 1); MODULE_DEPEND(drmn, mem, 1, 1, 1); MODULE_DEPEND(drmn, iicbus, 1, 1, 1); static drm_ioctl_desc_t drm_ioctls[256] = { DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), }; static struct cdevsw drm_cdevsw = { .d_version = D_VERSION, .d_open = drm_open, .d_read = drm_read, .d_ioctl = drm_ioctl, .d_poll = drm_poll, .d_mmap = drm_mmap, .d_mmap_single = drm_mmap_single, .d_name = "drm", .d_flags = D_TRACKCLOSE }; static int drm_msi = 1; /* Enable by default. */ SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device"); SYSCTL_INT(_hw_drm, OID_AUTO, msi, CTLFLAG_RDTUN, &drm_msi, 1, "Enable MSI interrupts for drm devices"); static struct drm_msi_blacklist_entry drm_msi_blacklist[] = { {0x8086, 0x2772}, /* Intel i945G */ \ {0x8086, 0x27A2}, /* Intel i945GM */ \ {0x8086, 0x27AE}, /* Intel i945GME */ \ {0, 0} }; static int drm_msi_is_blacklisted(struct drm_device *dev, unsigned long flags) { int i = 0; if (dev->driver->use_msi != NULL) { int use_msi; use_msi = dev->driver->use_msi(dev, flags); return (!use_msi); } /* TODO: Maybe move this to a callback in i915? */ for (i = 0; drm_msi_blacklist[i].vendor != 0; i++) { if ((drm_msi_blacklist[i].vendor == dev->pci_vendor) && (drm_msi_blacklist[i].device == dev->pci_device)) { return 1; } } return 0; } int drm_probe(device_t kdev, drm_pci_id_list_t *idlist) { drm_pci_id_list_t *id_entry; int vendor, device; vendor = pci_get_vendor(kdev); device = pci_get_device(kdev); if (pci_get_class(kdev) != PCIC_DISPLAY - || pci_get_subclass(kdev) != PCIS_DISPLAY_VGA) + || (pci_get_subclass(kdev) != PCIS_DISPLAY_VGA && + pci_get_subclass(kdev) != PCIS_DISPLAY_OTHER)) return ENXIO; id_entry = drm_find_description(vendor, device, idlist); if (id_entry != NULL) { if (!device_get_desc(kdev)) { DRM_DEBUG("desc : %s\n", device_get_desc(kdev)); device_set_desc(kdev, id_entry->name); } return 0; } return ENXIO; } int drm_attach(device_t kdev, drm_pci_id_list_t *idlist) { struct drm_device *dev; drm_pci_id_list_t *id_entry; int error, msicount; dev = device_get_softc(kdev); dev->device = kdev; dev->pci_domain = pci_get_domain(dev->device); dev->pci_bus = pci_get_bus(dev->device); dev->pci_slot = pci_get_slot(dev->device); dev->pci_func = pci_get_function(dev->device); dev->pci_vendor = pci_get_vendor(dev->device); dev->pci_device = pci_get_device(dev->device); dev->pci_subvendor = pci_get_subvendor(dev->device); dev->pci_subdevice = pci_get_subdevice(dev->device); id_entry = drm_find_description(dev->pci_vendor, dev->pci_device, idlist); dev->id_entry = id_entry; if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) { if (drm_msi && !drm_msi_is_blacklisted(dev, dev->id_entry->driver_private)) { msicount = pci_msi_count(dev->device); DRM_DEBUG("MSI count = %d\n", msicount); if (msicount > 1) msicount = 1; if (pci_alloc_msi(dev->device, &msicount) == 0) { DRM_INFO("MSI enabled %d message(s)\n", msicount); dev->msi_enabled = 1; dev->irqrid = 1; } } dev->irqr = bus_alloc_resource_any(dev->device, SYS_RES_IRQ, &dev->irqrid, RF_SHAREABLE); if (!dev->irqr) { return (ENOENT); } dev->irq = (int) rman_get_start(dev->irqr); } mtx_init(&dev->dev_lock, "drmdev", NULL, MTX_DEF); mtx_init(&dev->irq_lock, "drmirq", NULL, MTX_DEF); mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF); mtx_init(&dev->drw_lock, "drmdrw", NULL, MTX_DEF); mtx_init(&dev->event_lock, "drmev", NULL, MTX_DEF); sx_init(&dev->dev_struct_lock, "drmslk"); error = drm_load(dev); if (error) goto error; error = drm_create_cdevs(kdev); if (error) goto error; return (error); error: if (dev->irqr) { bus_release_resource(dev->device, SYS_RES_IRQ, dev->irqrid, dev->irqr); } if (dev->msi_enabled) { pci_release_msi(dev->device); } return (error); } int drm_create_cdevs(device_t kdev) { struct drm_device *dev; int error, unit; unit = device_get_unit(kdev); dev = device_get_softc(kdev); error = make_dev_p(MAKEDEV_WAITOK | MAKEDEV_CHECKNAME, &dev->devnode, &drm_cdevsw, 0, DRM_DEV_UID, DRM_DEV_GID, DRM_DEV_MODE, "dri/card%d", unit); if (error == 0) dev->devnode->si_drv1 = dev; return (error); } int drm_detach(device_t kdev) { struct drm_device *dev; dev = device_get_softc(kdev); drm_unload(dev); if (dev->irqr) { bus_release_resource(dev->device, SYS_RES_IRQ, dev->irqrid, dev->irqr); if (dev->msi_enabled) { pci_release_msi(dev->device); DRM_INFO("MSI released\n"); } } return (0); } #ifndef DRM_DEV_NAME #define DRM_DEV_NAME "drm" #endif devclass_t drm_devclass; drm_pci_id_list_t *drm_find_description(int vendor, int device, drm_pci_id_list_t *idlist) { int i = 0; for (i = 0; idlist[i].vendor != 0; i++) { if ((idlist[i].vendor == vendor) && ((idlist[i].device == device) || (idlist[i].device == 0))) { return &idlist[i]; } } return NULL; } static int drm_firstopen(struct drm_device *dev) { drm_local_map_t *map; int i; DRM_LOCK_ASSERT(dev); /* prebuild the SAREA */ i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map); if (i != 0) return i; if (dev->driver->firstopen) dev->driver->firstopen(dev); dev->buf_use = 0; if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) { i = drm_dma_setup(dev); if (i != 0) return i; } for (i = 0; i < DRM_HASH_SIZE; i++) { dev->magiclist[i].head = NULL; dev->magiclist[i].tail = NULL; } dev->lock.lock_queue = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) dev->irq_enabled = 0; dev->context_flag = 0; dev->last_context = 0; dev->if_version = 0; dev->buf_sigio = NULL; DRM_DEBUG("\n"); return 0; } static int drm_lastclose(struct drm_device *dev) { drm_magic_entry_t *pt, *next; drm_local_map_t *map, *mapsave; int i; DRM_LOCK_ASSERT(dev); DRM_DEBUG("\n"); if (dev->driver->lastclose != NULL) dev->driver->lastclose(dev); if (!drm_core_check_feature(dev, DRIVER_MODESET) && dev->irq_enabled) drm_irq_uninstall(dev); if (dev->unique) { free(dev->unique, DRM_MEM_DRIVER); dev->unique = NULL; dev->unique_len = 0; } /* Clear pid list */ for (i = 0; i < DRM_HASH_SIZE; i++) { for (pt = dev->magiclist[i].head; pt; pt = next) { next = pt->next; free(pt, DRM_MEM_MAGIC); } dev->magiclist[i].head = dev->magiclist[i].tail = NULL; } DRM_UNLOCK(dev); drm_drawable_free_all(dev); DRM_LOCK(dev); /* Clear AGP information */ if (dev->agp) { drm_agp_mem_t *entry; drm_agp_mem_t *nexte; /* Remove AGP resources, but leave dev->agp intact until * drm_unload is called. */ for (entry = dev->agp->memory; entry; entry = nexte) { nexte = entry->next; if (entry->bound) drm_agp_unbind_memory(entry->handle); drm_agp_free_memory(entry->handle); free(entry, DRM_MEM_AGPLISTS); } dev->agp->memory = NULL; if (dev->agp->acquired) drm_agp_release(dev); dev->agp->acquired = 0; dev->agp->enabled = 0; } if (dev->sg != NULL) { drm_sg_cleanup(dev->sg); dev->sg = NULL; } TAILQ_FOREACH_SAFE(map, &dev->maplist, link, mapsave) { if (!(map->flags & _DRM_DRIVER)) drm_rmmap(dev, map); } drm_dma_takedown(dev); if (dev->lock.hw_lock) { dev->lock.hw_lock = NULL; /* SHM removed */ dev->lock.file_priv = NULL; DRM_WAKEUP_INT((void *)&dev->lock.lock_queue); } return 0; } static int drm_load(struct drm_device *dev) { int i, retcode; DRM_DEBUG("\n"); TAILQ_INIT(&dev->maplist); dev->map_unrhdr = new_unrhdr(1, ((1 << DRM_MAP_HANDLE_BITS) - 1), NULL); if (dev->map_unrhdr == NULL) { DRM_ERROR("Couldn't allocate map number allocator\n"); return EINVAL; } drm_mem_init(); drm_sysctl_init(dev); TAILQ_INIT(&dev->files); dev->counters = 6; dev->types[0] = _DRM_STAT_LOCK; dev->types[1] = _DRM_STAT_OPENS; dev->types[2] = _DRM_STAT_CLOSES; dev->types[3] = _DRM_STAT_IOCTLS; dev->types[4] = _DRM_STAT_LOCKS; dev->types[5] = _DRM_STAT_UNLOCKS; for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++) atomic_set(&dev->counts[i], 0); INIT_LIST_HEAD(&dev->vblank_event_list); if (drm_core_has_AGP(dev)) { if (drm_device_is_agp(dev)) dev->agp = drm_agp_init(); if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && dev->agp == NULL) { DRM_ERROR("Card isn't AGP, or couldn't initialize " "AGP.\n"); retcode = ENOMEM; goto error; } if (dev->agp != NULL && dev->agp->info.ai_aperture_base != 0) { if (drm_mtrr_add(dev->agp->info.ai_aperture_base, dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0) dev->agp->mtrr = 1; } } retcode = drm_ctxbitmap_init(dev); if (retcode != 0) { DRM_ERROR("Cannot allocate memory for context bitmap.\n"); goto error; } dev->drw_unrhdr = new_unrhdr(1, INT_MAX, NULL); if (dev->drw_unrhdr == NULL) { DRM_ERROR("Couldn't allocate drawable number allocator\n"); retcode = ENOMEM; goto error; } if (dev->driver->driver_features & DRIVER_GEM) { retcode = drm_gem_init(dev); if (retcode != 0) { DRM_ERROR("Cannot initialize graphics execution " "manager (GEM)\n"); goto error1; } } if (dev->driver->load != NULL) { DRM_LOCK(dev); /* Shared code returns -errno. */ retcode = -dev->driver->load(dev, dev->id_entry->driver_private); if (pci_enable_busmaster(dev->device)) DRM_ERROR("Request to enable bus-master failed.\n"); DRM_UNLOCK(dev); if (retcode != 0) goto error1; } DRM_INFO("Initialized %s %d.%d.%d %s\n", dev->driver->name, dev->driver->major, dev->driver->minor, dev->driver->patchlevel, dev->driver->date); return 0; error1: delete_unrhdr(dev->drw_unrhdr); drm_gem_destroy(dev); error: drm_ctxbitmap_cleanup(dev); drm_sysctl_cleanup(dev); DRM_LOCK(dev); drm_lastclose(dev); DRM_UNLOCK(dev); if (dev->devnode != NULL) destroy_dev(dev->devnode); mtx_destroy(&dev->drw_lock); mtx_destroy(&dev->vbl_lock); mtx_destroy(&dev->irq_lock); mtx_destroy(&dev->dev_lock); mtx_destroy(&dev->event_lock); sx_destroy(&dev->dev_struct_lock); return retcode; } static void drm_unload(struct drm_device *dev) { int i; DRM_DEBUG("\n"); drm_sysctl_cleanup(dev); if (dev->devnode != NULL) destroy_dev(dev->devnode); drm_ctxbitmap_cleanup(dev); if (dev->driver->driver_features & DRIVER_GEM) drm_gem_destroy(dev); if (dev->agp && dev->agp->mtrr) { int __unused retcode; retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base, dev->agp->info.ai_aperture_size, DRM_MTRR_WC); DRM_DEBUG("mtrr_del = %d", retcode); } drm_vblank_cleanup(dev); DRM_LOCK(dev); drm_lastclose(dev); DRM_UNLOCK(dev); /* Clean up PCI resources allocated by drm_bufs.c. We're not really * worried about resource consumption while the DRM is inactive (between * lastclose and firstopen or unload) because these aren't actually * taking up KVA, just keeping the PCI resource allocated. */ for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) { if (dev->pcir[i] == NULL) continue; bus_release_resource(dev->device, SYS_RES_MEMORY, dev->pcirid[i], dev->pcir[i]); dev->pcir[i] = NULL; } if (dev->agp) { free(dev->agp, DRM_MEM_AGPLISTS); dev->agp = NULL; } if (dev->driver->unload != NULL) { DRM_LOCK(dev); dev->driver->unload(dev); DRM_UNLOCK(dev); } delete_unrhdr(dev->drw_unrhdr); delete_unrhdr(dev->map_unrhdr); drm_mem_uninit(); if (pci_disable_busmaster(dev->device)) DRM_ERROR("Request to disable bus-master failed.\n"); mtx_destroy(&dev->drw_lock); mtx_destroy(&dev->vbl_lock); mtx_destroy(&dev->irq_lock); mtx_destroy(&dev->dev_lock); mtx_destroy(&dev->event_lock); sx_destroy(&dev->dev_struct_lock); } int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_version *version = data; int len; #define DRM_COPY( name, value ) \ len = strlen( value ); \ if ( len > name##_len ) len = name##_len; \ name##_len = strlen( value ); \ if ( len && name ) { \ if ( DRM_COPY_TO_USER( name, value, len ) ) \ return EFAULT; \ } version->version_major = dev->driver->major; version->version_minor = dev->driver->minor; version->version_patchlevel = dev->driver->patchlevel; DRM_COPY(version->name, dev->driver->name); DRM_COPY(version->date, dev->driver->date); DRM_COPY(version->desc, dev->driver->desc); return 0; } int drm_open(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p) { struct drm_device *dev; int retcode; dev = kdev->si_drv1; if (dev == NULL) return (ENXIO); DRM_DEBUG("open_count = %d\n", dev->open_count); retcode = drm_open_helper(kdev, flags, fmt, p, dev); if (retcode == 0) { atomic_inc(&dev->counts[_DRM_STAT_OPENS]); DRM_LOCK(dev); mtx_lock(&Giant); device_busy(dev->device); mtx_unlock(&Giant); if (!dev->open_count++) retcode = drm_firstopen(dev); DRM_UNLOCK(dev); } return (retcode); } void drm_close(void *data) { struct drm_file *file_priv = data; struct drm_device *dev = file_priv->dev; int retcode = 0; DRM_DEBUG("open_count = %d\n", dev->open_count); DRM_LOCK(dev); if (dev->driver->preclose != NULL) dev->driver->preclose(dev, file_priv); /* ======================================================== * Begin inline drm_release */ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", DRM_CURRENTPID, (long)dev->device, dev->open_count); if (dev->driver->driver_features & DRIVER_GEM) drm_gem_release(dev, file_priv); if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && dev->lock.file_priv == file_priv) { DRM_DEBUG("Process %d dead, freeing lock for context %d\n", DRM_CURRENTPID, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); if (dev->driver->reclaim_buffers_locked != NULL) dev->driver->reclaim_buffers_locked(dev, file_priv); drm_lock_free(&dev->lock, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); /* FIXME: may require heavy-handed reset of hardware at this point, possibly processed via a callback to the X server. */ } else if (dev->driver->reclaim_buffers_locked != NULL && dev->lock.hw_lock != NULL) { /* The lock is required to reclaim buffers */ for (;;) { if (!dev->lock.hw_lock) { /* Device has been unregistered */ retcode = EINTR; break; } if (drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) { dev->lock.file_priv = file_priv; dev->lock.lock_time = jiffies; atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); break; /* Got lock */ } /* Contention */ retcode = DRM_LOCK_SLEEP(dev, &dev->lock.lock_queue, PCATCH, "drmlk2", 0); if (retcode) break; } if (retcode == 0) { dev->driver->reclaim_buffers_locked(dev, file_priv); drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT); } } if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && !dev->driver->reclaim_buffers_locked) drm_reclaim_buffers(dev, file_priv); funsetown(&dev->buf_sigio); seldrain(&file_priv->event_poll); if (dev->driver->postclose != NULL) dev->driver->postclose(dev, file_priv); TAILQ_REMOVE(&dev->files, file_priv, link); free(file_priv, DRM_MEM_FILES); /* ======================================================== * End inline drm_release */ atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); mtx_lock(&Giant); device_unbusy(dev->device); mtx_unlock(&Giant); if (--dev->open_count == 0) { retcode = drm_lastclose(dev); } DRM_UNLOCK(dev); } extern drm_ioctl_desc_t drm_compat_ioctls[]; /* drm_ioctl is called whenever a process performs an ioctl on /dev/drm. */ int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags, DRM_STRUCTPROC *p) { struct drm_device *dev = drm_get_device_from_kdev(kdev); int retcode = 0; drm_ioctl_desc_t *ioctl; int (*func)(struct drm_device *dev, void *data, struct drm_file *file_priv); int nr = DRM_IOCTL_NR(cmd); int is_driver_ioctl = 0; struct drm_file *file_priv; retcode = devfs_get_cdevpriv((void **)&file_priv); if (retcode != 0) { DRM_ERROR("can't find authenticator\n"); return EINVAL; } atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); ++file_priv->ioctl_count; DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n", DRM_CURRENTPID, cmd, nr, (long)dev->device, file_priv->authenticated); switch (cmd) { case FIONBIO: case FIOASYNC: return 0; case FIOSETOWN: return fsetown(*(int *)data, &dev->buf_sigio); case FIOGETOWN: *(int *) data = fgetown(&dev->buf_sigio); return 0; } if (IOCGROUP(cmd) != DRM_IOCTL_BASE) { DRM_DEBUG("Bad ioctl group 0x%x\n", (int)IOCGROUP(cmd)); return EINVAL; } #ifdef COMPAT_FREEBSD32 /* * Called whenever a 32-bit process running under a 64-bit * kernel performs an ioctl on /dev/drm. */ if (SV_CURPROC_FLAG(SV_ILP32) && drm_compat_ioctls[nr].func != NULL) /* * Assume that ioctls without an explicit compat * routine will just work. This may not always be a * good assumption, but it's better than always * failing. */ ioctl = &drm_compat_ioctls[nr]; else #endif ioctl = &drm_ioctls[nr]; /* It's not a core DRM ioctl, try driver-specific. */ if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) { /* The array entries begin at DRM_COMMAND_BASE ioctl nr */ nr -= DRM_COMMAND_BASE; if (nr >= dev->driver->max_ioctl) { DRM_DEBUG("Bad driver ioctl number, 0x%x (of 0x%x)\n", nr, dev->driver->max_ioctl); return EINVAL; } #ifdef COMPAT_FREEBSD32 if (SV_CURPROC_FLAG(SV_ILP32) && nr < *dev->driver->compat_ioctls_nr && dev->driver->compat_ioctls[nr].func != NULL) ioctl = &dev->driver->compat_ioctls[nr]; else #endif ioctl = &dev->driver->ioctls[nr]; is_driver_ioctl = 1; } func = ioctl->func; if (func == NULL) { DRM_DEBUG("no function\n"); return EINVAL; } if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(p)) || ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) || ((ioctl->flags & DRM_MASTER) && !file_priv->master)) return EACCES; if (is_driver_ioctl) { if ((ioctl->flags & DRM_UNLOCKED) == 0) DRM_LOCK(dev); /* shared code returns -errno */ retcode = -func(dev, data, file_priv); if ((ioctl->flags & DRM_UNLOCKED) == 0) DRM_UNLOCK(dev); } else { retcode = func(dev, data, file_priv); } if (retcode != 0) DRM_DEBUG(" returning %d\n", retcode); if (retcode != 0 && (drm_debug_flag & DRM_DEBUGBITS_FAILED_IOCTL) != 0) { printf( "pid %d, cmd 0x%02lx, nr 0x%02x/%1d, dev 0x%lx, auth %d, res %d\n", DRM_CURRENTPID, cmd, nr, is_driver_ioctl, (long)dev->device, file_priv->authenticated, retcode); } return retcode; } drm_local_map_t *drm_getsarea(struct drm_device *dev) { drm_local_map_t *map; DRM_LOCK_ASSERT(dev); TAILQ_FOREACH(map, &dev->maplist, link) { if (map->type == _DRM_SHM && (map->flags & _DRM_CONTAINS_LOCK)) return map; } return NULL; } int drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx, struct sysctl_oid *top) { struct sysctl_oid *oid; snprintf(dev->busid_str, sizeof(dev->busid_str), "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus, dev->pci_slot, dev->pci_func); oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid", CTLFLAG_RD, dev->busid_str, 0, NULL); if (oid == NULL) return (ENOMEM); dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0; oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL); if (oid == NULL) return (ENOMEM); return (0); } static int drm_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size, struct vm_object **obj_res, int nprot) { struct drm_device *dev; dev = drm_get_device_from_kdev(kdev); if (dev->drm_ttm_bdev != NULL) { return (ttm_bo_mmap_single(dev->drm_ttm_bdev, offset, size, obj_res, nprot)); } else if ((dev->driver->driver_features & DRIVER_GEM) != 0) { return (drm_gem_mmap_single(dev, offset, size, obj_res, nprot)); } else { return (ENODEV); } } #if DRM_LINUX #include MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1); #define LINUX_IOCTL_DRM_MIN 0x6400 #define LINUX_IOCTL_DRM_MAX 0x64ff static linux_ioctl_function_t drm_linux_ioctl; static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl, LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX}; /* The bits for in/out are switched on Linux */ #define LINUX_IOC_IN IOC_OUT #define LINUX_IOC_OUT IOC_IN static int drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args) { int error; int cmd = args->cmd; args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT); if (cmd & LINUX_IOC_IN) args->cmd |= IOC_IN; if (cmd & LINUX_IOC_OUT) args->cmd |= IOC_OUT; error = ioctl(p, (struct ioctl_args *)args); return error; } #endif /* DRM_LINUX */ static int drm_core_init(void *arg) { drm_global_init(); #if DRM_LINUX linux_ioctl_register_handler(&drm_handler); #endif /* DRM_LINUX */ DRM_INFO("Initialized %s %d.%d.%d %s\n", CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); return 0; } static void drm_core_exit(void *arg) { #if DRM_LINUX linux_ioctl_unregister_handler(&drm_handler); #endif /* DRM_LINUX */ drm_global_release(); } SYSINIT(drm_register, SI_SUB_KLD, SI_ORDER_MIDDLE, drm_core_init, NULL); SYSUNINIT(drm_unregister, SI_SUB_KLD, SI_ORDER_MIDDLE, drm_core_exit, NULL); +static bool +dmi_found(const struct dmi_system_id *dsi) +{ + char *hw_vendor, *hw_prod; + int i, slot; + bool res; + + hw_vendor = kern_getenv("smbios.planar.maker"); + hw_prod = kern_getenv("smbios.planar.product"); + res = true; + for (i = 0; i < nitems(dsi->matches); i++) { + slot = dsi->matches[i].slot; + switch (slot) { + case DMI_NONE: + break; + case DMI_SYS_VENDOR: + case DMI_BOARD_VENDOR: + if (hw_vendor != NULL && + !strcmp(hw_vendor, dsi->matches[i].substr)) { + break; + } else { + res = false; + goto out; + } + case DMI_PRODUCT_NAME: + case DMI_BOARD_NAME: + if (hw_prod != NULL && + !strcmp(hw_prod, dsi->matches[i].substr)) { + break; + } else { + res = false; + goto out; + } + default: + res = false; + goto out; + } + } +out: + freeenv(hw_vendor); + freeenv(hw_prod); + + return (res); +} + bool dmi_check_system(const struct dmi_system_id *sysid) { + const struct dmi_system_id *dsi; + bool res; - /* XXXKIB */ - return (false); + for (res = false, dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) { + if (dmi_found(dsi)) { + res = true; + if (dsi->callback != NULL && dsi->callback(dsi)) + break; + } + } + return (res); } Index: head/sys/dev/drm2/drm_edid.c =================================================================== --- head/sys/dev/drm2/drm_edid.c (revision 277486) +++ head/sys/dev/drm2/drm_edid.c (revision 277487) @@ -1,1794 +1,1918 @@ /* * Copyright (c) 2006 Luc Verhaegen (quirks list) * Copyright (c) 2007-2008 Intel Corporation * Jesse Barnes * Copyright 2010 Red Hat, Inc. * * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from * FB layer. * Copyright (C) 2006 Dennis Munsie * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sub license, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include "iicbus_if.h" #define version_greater(edid, maj, min) \ (((edid)->version > (maj)) || \ ((edid)->version == (maj) && (edid)->revision > (min))) #define EDID_EST_TIMINGS 16 #define EDID_STD_TIMINGS 8 #define EDID_DETAILED_TIMINGS 4 /* * EDID blocks out in the wild have a variety of bugs, try to collect * them here (note that userspace may work around broken monitors first, * but fixes should make their way here so that the kernel "just works" * on as many displays as possible). */ /* First detailed mode wrong, use largest 60Hz mode */ #define EDID_QUIRK_PREFER_LARGE_60 (1 << 0) /* Reported 135MHz pixel clock is too high, needs adjustment */ #define EDID_QUIRK_135_CLOCK_TOO_HIGH (1 << 1) /* Prefer the largest mode at 75 Hz */ #define EDID_QUIRK_PREFER_LARGE_75 (1 << 2) /* Detail timing is in cm not mm */ #define EDID_QUIRK_DETAILED_IN_CM (1 << 3) /* Detailed timing descriptors have bogus size values, so just take the * maximum size and use that. */ #define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4) /* Monitor forgot to set the first detailed is preferred bit. */ #define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5) /* use +hsync +vsync for detailed mode */ #define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) struct detailed_mode_closure { struct drm_connector *connector; struct edid *edid; bool preferred; u32 quirks; int modes; }; #define LEVEL_DMT 0 #define LEVEL_GTF 1 #define LEVEL_GTF2 2 #define LEVEL_CVT 3 static struct edid_quirk { - char *vendor; + char vendor[4]; int product_id; u32 quirks; } edid_quirk_list[] = { /* Acer AL1706 */ { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 }, /* Acer F51 */ { "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 }, /* Unknown Acer */ { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, /* Belinea 10 15 55 */ { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, /* Envision Peripherals, Inc. EN-7100e */ { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH }, /* Envision EN2028 */ { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 }, /* Funai Electronics PM36B */ { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 | EDID_QUIRK_DETAILED_IN_CM }, /* LG Philips LCD LP154W01-A5 */ { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE }, { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE }, /* Philips 107p5 CRT */ { "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, /* Proview AY765C */ { "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, /* Samsung SyncMaster 205BW. Note: irony */ { "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP }, /* Samsung SyncMaster 22[5-6]BW */ { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 }, { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 }, }; /*** DDC fetch and block validation ***/ static const u8 edid_header[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; /* * Sanity check the header of the base EDID block. Return 8 if the header * is perfect, down to 0 if it's totally wrong. */ int drm_edid_header_is_valid(const u8 *raw_edid) { int i, score = 0; for (i = 0; i < sizeof(edid_header); i++) if (raw_edid[i] == edid_header[i]) score++; return score; } /* * Sanity check the EDID block (base or extension). Return 0 if the block * doesn't check out, or 1 if it's valid. */ static bool -drm_edid_block_valid(u8 *raw_edid) +drm_edid_block_valid(u8 *raw_edid, int block) { int i; u8 csum = 0; struct edid *edid = (struct edid *)raw_edid; - if (raw_edid[0] == 0x00) { + if (block == 0) { int score = drm_edid_header_is_valid(raw_edid); if (score == 8) ; else if (score >= 6) { DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); memcpy(raw_edid, edid_header, sizeof(edid_header)); } else { goto bad; } } for (i = 0; i < EDID_LENGTH; i++) csum += raw_edid[i]; if (csum) { DRM_DEBUG_KMS("EDID checksum is invalid, remainder is %d\n", csum); /* allow CEA to slide through, switches mangle this */ if (raw_edid[0] != 0x02) goto bad; } /* per-block-type checks */ switch (raw_edid[0]) { case 0: /* base */ if (edid->version != 1) { DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); goto bad; } if (edid->revision > 4) DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n"); break; default: break; } return 1; bad: if (raw_edid) { DRM_DEBUG_KMS("Raw EDID:\n"); if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0) { for (i = 0; i < EDID_LENGTH; ) { printf("%02x", raw_edid[i]); i++; if (i % 16 == 0 || i == EDID_LENGTH) printf("\n"); else if (i % 8 == 0) printf(" "); else printf(" "); } } } return 0; } /** * drm_edid_is_valid - sanity check EDID data * @edid: EDID data * * Sanity-check an entire EDID record (including extensions) */ bool drm_edid_is_valid(struct edid *edid) { int i; u8 *raw = (u8 *)edid; if (!edid) return false; for (i = 0; i <= edid->extensions; i++) - if (!drm_edid_block_valid(raw + i * EDID_LENGTH)) + if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i)) return false; return true; } #define DDC_ADDR 0x50 #define DDC_SEGMENT_ADDR 0x30 /** * Get EDID information via I2C. * * \param adapter : i2c device adaptor * \param buf : EDID data buffer to be filled * \param len : EDID data buffer length * \return 0 on success or -1 on failure. * * Try to fetch EDID information by calling i2c driver function. */ static int drm_do_probe_ddc_edid(device_t adapter, unsigned char *buf, int block, int len) { unsigned char start = block * EDID_LENGTH; unsigned char segment = block >> 1; unsigned char xfers = segment ? 3 : 2; int ret, retries = 5; /* The core i2c driver will automatically retry the transfer if the * adapter reports EAGAIN. However, we find that bit-banging transfers * are susceptible to errors under a heavily loaded machine and * generate spurious NAKs and timeouts. Retrying the transfer * of the individual block a few times seems to overcome this. */ do { struct iic_msg msgs[] = { { .slave = DDC_SEGMENT_ADDR << 1, .flags = 0, .len = 1, .buf = &segment, }, { .slave = DDC_ADDR << 1, .flags = IIC_M_WR, .len = 1, .buf = &start, }, { .slave = DDC_ADDR << 1, .flags = IIC_M_RD, .len = len, .buf = buf, } }; /* * Avoid sending the segment addr to not upset non-compliant ddc * monitors. */ ret = iicbus_transfer(adapter, &msgs[3 - xfers], xfers); if (ret != 0) DRM_DEBUG_KMS("iicbus_transfer countdown %d error %d\n", retries, ret); } while (ret != 0 && --retries); return (ret == 0 ? 0 : -1); } static bool drm_edid_is_zero(u8 *in_edid, int length) { int i; u32 *raw_edid = (u32 *)in_edid; for (i = 0; i < length / 4; i++) if (*(raw_edid + i) != 0) return false; return true; } static u8 * drm_do_get_edid(struct drm_connector *connector, device_t adapter) { int i, j = 0, valid_extensions = 0; u8 *block, *new; block = malloc(EDID_LENGTH, DRM_MEM_KMS, M_WAITOK | M_ZERO); /* base block fetch */ for (i = 0; i < 4; i++) { if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH)) goto out; - if (drm_edid_block_valid(block)) + if (drm_edid_block_valid(block, 0)) break; if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) { connector->null_edid_counter++; goto carp; } } if (i == 4) goto carp; /* if there's no extensions, we're done */ if (block[0x7e] == 0) return block; new = reallocf(block, (block[0x7e] + 1) * EDID_LENGTH, DRM_MEM_KMS, M_WAITOK); block = new; for (j = 1; j <= block[0x7e]; j++) { for (i = 0; i < 4; i++) { if (drm_do_probe_ddc_edid(adapter, block + (valid_extensions + 1) * EDID_LENGTH, j, EDID_LENGTH)) goto out; - if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) { + if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j)) { valid_extensions++; break; } } if (i == 4) DRM_DEBUG_KMS("%s: Ignoring invalid EDID block %d.\n", drm_get_connector_name(connector), j); } if (valid_extensions != block[0x7e]) { block[EDID_LENGTH-1] += block[0x7e] - valid_extensions; block[0x7e] = valid_extensions; new = reallocf(block, (valid_extensions + 1) * EDID_LENGTH, DRM_MEM_KMS, M_WAITOK); block = new; } DRM_DEBUG_KMS("got EDID from %s\n", drm_get_connector_name(connector)); return block; carp: DRM_DEBUG_KMS("%s: EDID block %d invalid.\n", drm_get_connector_name(connector), j); out: free(block, DRM_MEM_KMS); return NULL; } /** * Probe DDC presence. * * \param adapter : i2c device adaptor * \return 1 on success */ static bool drm_probe_ddc(device_t adapter) { unsigned char out; return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0); } /** * drm_get_edid - get EDID data, if available * @connector: connector we're probing * @adapter: i2c adapter to use for DDC * * Poke the given i2c channel to grab EDID data if possible. If found, * attach it to the connector. * * Return edid data or NULL if we couldn't find any. */ struct edid *drm_get_edid(struct drm_connector *connector, device_t adapter) { struct edid *edid = NULL; if (drm_probe_ddc(adapter)) edid = (struct edid *)drm_do_get_edid(connector, adapter); connector->display_info.raw_edid = (char *)edid; return edid; } /*** EDID parsing ***/ /** * edid_vendor - match a string against EDID's obfuscated vendor field * @edid: EDID to match * @vendor: vendor string * * Returns true if @vendor is in @edid, false otherwise */ static bool edid_vendor(struct edid *edid, char *vendor) { char edid_vendor[3]; edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@'; edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) | ((edid->mfg_id[1] & 0xe0) >> 5)) + '@'; edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@'; return !strncmp(edid_vendor, vendor, 3); } /** * edid_get_quirks - return quirk flags for a given EDID * @edid: EDID to process * * This tells subsequent routines what fixes they need to apply. */ static u32 edid_get_quirks(struct edid *edid) { struct edid_quirk *quirk; int i; for (i = 0; i < DRM_ARRAY_SIZE(edid_quirk_list); i++) { quirk = &edid_quirk_list[i]; if (edid_vendor(edid, quirk->vendor) && (EDID_PRODUCT_ID(edid) == quirk->product_id)) return quirk->quirks; } return 0; } #define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay) #define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh)) /** * edid_fixup_preferred - set preferred modes based on quirk list * @connector: has mode list to fix up * @quirks: quirks list * * Walk the mode list for @connector, clearing the preferred status * on existing modes and setting it anew for the right mode ala @quirks. */ static void edid_fixup_preferred(struct drm_connector *connector, u32 quirks) { struct drm_display_mode *t, *cur_mode, *preferred_mode; int target_refresh = 0; if (list_empty(&connector->probed_modes)) return; if (quirks & EDID_QUIRK_PREFER_LARGE_60) target_refresh = 60; if (quirks & EDID_QUIRK_PREFER_LARGE_75) target_refresh = 75; preferred_mode = list_first_entry(&connector->probed_modes, struct drm_display_mode, head); list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) { cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED; if (cur_mode == preferred_mode) continue; /* Largest mode is preferred */ if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode)) preferred_mode = cur_mode; /* At a given size, try to get closest to target refresh */ if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) && MODE_REFRESH_DIFF(cur_mode, target_refresh) < MODE_REFRESH_DIFF(preferred_mode, target_refresh)) { preferred_mode = cur_mode; } } preferred_mode->type |= DRM_MODE_TYPE_PREFERRED; } +static bool +mode_is_rb(const struct drm_display_mode *mode) +{ + return (mode->htotal - mode->hdisplay == 160) && + (mode->hsync_end - mode->hdisplay == 80) && + (mode->hsync_end - mode->hsync_start == 32) && + (mode->vsync_start - mode->vdisplay == 3); +} + +/* + * drm_mode_find_dmt - Create a copy of a mode if present in DMT + * @dev: Device to duplicate against + * @hsize: Mode width + * @vsize: Mode height + * @fresh: Mode refresh rate + * @rb: Mode reduced-blanking-ness + * + * Walk the DMT mode list looking for a match for the given parameters. + * Return a newly allocated copy of the mode, or NULL if not found. + */ struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, - int hsize, int vsize, int fresh) + int hsize, int vsize, int fresh, + bool rb) { - struct drm_display_mode *mode = NULL; int i; for (i = 0; i < drm_num_dmt_modes; i++) { struct drm_display_mode *ptr = &drm_dmt_modes[i]; - if (hsize == ptr->hdisplay && - vsize == ptr->vdisplay && - fresh == drm_mode_vrefresh(ptr)) { - /* get the expected default mode */ - mode = drm_mode_duplicate(dev, ptr); - break; - } + if (hsize != ptr->hdisplay) + continue; + if (vsize != ptr->vdisplay) + continue; + if (fresh != drm_mode_vrefresh(ptr)) + continue; + if (rb != mode_is_rb(ptr)) + continue; + + return drm_mode_duplicate(dev, ptr); } - return mode; + + return NULL; } typedef void detailed_cb(struct detailed_timing *timing, void *closure); static void cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure) { int i, n = 0; u8 rev = ext[0x01], d = ext[0x02]; u8 *det_base = ext + d; switch (rev) { case 0: /* can't happen */ return; case 1: /* have to infer how many blocks we have, check pixel clock */ for (i = 0; i < 6; i++) if (det_base[18*i] || det_base[18*i+1]) n++; break; default: /* explicit count */ n = min(ext[0x03] & 0x0f, 6); break; } for (i = 0; i < n; i++) cb((struct detailed_timing *)(det_base + 18 * i), closure); } static void vtb_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure) { unsigned int i, n = min((int)ext[0x02], 6); u8 *det_base = ext + 5; if (ext[0x01] != 1) return; /* unknown version */ for (i = 0; i < n; i++) cb((struct detailed_timing *)(det_base + 18 * i), closure); } static void drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure) { int i; struct edid *edid = (struct edid *)raw_edid; if (edid == NULL) return; for (i = 0; i < EDID_DETAILED_TIMINGS; i++) cb(&(edid->detailed_timings[i]), closure); for (i = 1; i <= raw_edid[0x7e]; i++) { u8 *ext = raw_edid + (i * EDID_LENGTH); switch (*ext) { case CEA_EXT: cea_for_each_detailed_block(ext, cb, closure); break; case VTB_EXT: vtb_for_each_detailed_block(ext, cb, closure); break; default: break; } } } static void is_rb(struct detailed_timing *t, void *data) { u8 *r = (u8 *)t; if (r[3] == EDID_DETAIL_MONITOR_RANGE) if (r[15] & 0x10) *(bool *)data = true; } /* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */ static bool drm_monitor_supports_rb(struct edid *edid) { if (edid->revision >= 4) { bool ret; drm_for_each_detailed_block((u8 *)edid, is_rb, &ret); return ret; } return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0); } static void find_gtf2(struct detailed_timing *t, void *data) { u8 *r = (u8 *)t; if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02) *(u8 **)data = r; } /* Secondary GTF curve kicks in above some break frequency */ static int drm_gtf2_hbreak(struct edid *edid) { u8 *r = NULL; drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); return r ? (r[12] * 2) : 0; } static int drm_gtf2_2c(struct edid *edid) { u8 *r = NULL; drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); return r ? r[13] : 0; } static int drm_gtf2_m(struct edid *edid) { u8 *r = NULL; drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); return r ? (r[15] << 8) + r[14] : 0; } static int drm_gtf2_k(struct edid *edid) { u8 *r = NULL; drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); return r ? r[16] : 0; } static int drm_gtf2_2j(struct edid *edid) { u8 *r = NULL; drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); return r ? r[17] : 0; } /** * standard_timing_level - get std. timing level(CVT/GTF/DMT) * @edid: EDID block to scan */ static int standard_timing_level(struct edid *edid) { if (edid->revision >= 2) { if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)) return LEVEL_CVT; if (drm_gtf2_hbreak(edid)) return LEVEL_GTF2; return LEVEL_GTF; } return LEVEL_DMT; } /* * 0 is reserved. The spec says 0x01 fill for unused timings. Some old * monitors fill with ascii space (0x20) instead. */ static int bad_std_timing(u8 a, u8 b) { return (a == 0x00 && b == 0x00) || (a == 0x01 && b == 0x01) || (a == 0x20 && b == 0x20); } /** * drm_mode_std - convert standard mode info (width, height, refresh) into mode * @t: standard timing params * @timing_level: standard timing level * * Take the standard timing params (in this case width, aspect, and refresh) * and convert them into a real mode using CVT/GTF/DMT. */ static struct drm_display_mode * drm_mode_std(struct drm_connector *connector, struct edid *edid, struct std_timing *t, int revision) { struct drm_device *dev = connector->dev; struct drm_display_mode *m, *mode = NULL; int hsize, vsize; int vrefresh_rate; unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK) >> EDID_TIMING_ASPECT_SHIFT; unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK) >> EDID_TIMING_VFREQ_SHIFT; int timing_level = standard_timing_level(edid); if (bad_std_timing(t->hsize, t->vfreq_aspect)) return NULL; /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */ hsize = t->hsize * 8 + 248; /* vrefresh_rate = vfreq + 60 */ vrefresh_rate = vfreq + 60; /* the vdisplay is calculated based on the aspect ratio */ if (aspect_ratio == 0) { if (revision < 3) vsize = hsize; else vsize = (hsize * 10) / 16; } else if (aspect_ratio == 1) vsize = (hsize * 3) / 4; else if (aspect_ratio == 2) vsize = (hsize * 4) / 5; else vsize = (hsize * 9) / 16; /* HDTV hack, part 1 */ if (vrefresh_rate == 60 && ((hsize == 1360 && vsize == 765) || (hsize == 1368 && vsize == 769))) { hsize = 1366; vsize = 768; } /* * If this connector already has a mode for this size and refresh * rate (because it came from detailed or CVT info), use that * instead. This way we don't have to guess at interlace or * reduced blanking. */ list_for_each_entry(m, &connector->probed_modes, head) if (m->hdisplay == hsize && m->vdisplay == vsize && drm_mode_vrefresh(m) == vrefresh_rate) return NULL; /* HDTV hack, part 2 */ if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) { mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0, false); mode->hdisplay = 1366; mode->hsync_start = mode->hsync_start - 1; mode->hsync_end = mode->hsync_end - 1; return mode; } /* check whether it can be found in default mode table */ - mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate); + if (drm_monitor_supports_rb(edid)) { + mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, + true); + if (mode) + return mode; + } + mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, false); if (mode) return mode; + /* okay, generate it */ switch (timing_level) { case LEVEL_DMT: break; case LEVEL_GTF: mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); break; case LEVEL_GTF2: /* * This is potentially wrong if there's ever a monitor with * more than one ranges section, each claiming a different * secondary GTF curve. Please don't do that. */ mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); + if (!mode) + return NULL; if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) { free(mode, DRM_MEM_KMS); mode = drm_gtf_mode_complex(dev, hsize, vsize, vrefresh_rate, 0, 0, drm_gtf2_m(edid), drm_gtf2_2c(edid), drm_gtf2_k(edid), drm_gtf2_2j(edid)); } break; case LEVEL_CVT: mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0, false); break; } return mode; } /* * EDID is delightfully ambiguous about how interlaced modes are to be * encoded. Our internal representation is of frame height, but some * HDTV detailed timings are encoded as field height. * * The format list here is from CEA, in frame size. Technically we * should be checking refresh rate too. Whatever. */ static void drm_mode_do_interlace_quirk(struct drm_display_mode *mode, struct detailed_pixel_timing *pt) { int i; static const struct { int w, h; } cea_interlaced[] = { { 1920, 1080 }, { 720, 480 }, { 1440, 480 }, { 2880, 480 }, { 720, 576 }, { 1440, 576 }, { 2880, 576 }, }; if (!(pt->misc & DRM_EDID_PT_INTERLACED)) return; for (i = 0; i < DRM_ARRAY_SIZE(cea_interlaced); i++) { if ((mode->hdisplay == cea_interlaced[i].w) && (mode->vdisplay == cea_interlaced[i].h / 2)) { mode->vdisplay *= 2; mode->vsync_start *= 2; mode->vsync_end *= 2; mode->vtotal *= 2; mode->vtotal |= 1; } } mode->flags |= DRM_MODE_FLAG_INTERLACE; } /** * drm_mode_detailed - create a new mode from an EDID detailed timing section * @dev: DRM device (needed to create new mode) * @edid: EDID block * @timing: EDID detailed timing info * @quirks: quirks to apply * * An EDID detailed timing block contains enough info for us to create and * return a new struct drm_display_mode. */ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, struct edid *edid, struct detailed_timing *timing, u32 quirks) { struct drm_display_mode *mode; struct detailed_pixel_timing *pt = &timing->data.pixel_data; unsigned hactive = (pt->hactive_hblank_hi & 0xf0) << 4 | pt->hactive_lo; unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo; unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo; unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo; unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo; unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo; unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4; unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf); /* ignore tiny modes */ if (hactive < 64 || vactive < 64) return NULL; if (pt->misc & DRM_EDID_PT_STEREO) { printf("stereo mode not supported\n"); return NULL; } if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) { printf("composite sync not supported\n"); } /* it is incorrect if hsync/vsync width is zero */ if (!hsync_pulse_width || !vsync_pulse_width) { DRM_DEBUG_KMS("Incorrect Detailed timing. " "Wrong Hsync/Vsync pulse width\n"); return NULL; } mode = drm_mode_create(dev); if (!mode) return NULL; mode->type = DRM_MODE_TYPE_DRIVER; if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH) timing->pixel_clock = htole16(1088); mode->clock = le16toh(timing->pixel_clock) * 10; mode->hdisplay = hactive; mode->hsync_start = mode->hdisplay + hsync_offset; mode->hsync_end = mode->hsync_start + hsync_pulse_width; mode->htotal = mode->hdisplay + hblank; mode->vdisplay = vactive; mode->vsync_start = mode->vdisplay + vsync_offset; mode->vsync_end = mode->vsync_start + vsync_pulse_width; mode->vtotal = mode->vdisplay + vblank; /* Some EDIDs have bogus h/vtotal values */ if (mode->hsync_end > mode->htotal) mode->htotal = mode->hsync_end + 1; if (mode->vsync_end > mode->vtotal) mode->vtotal = mode->vsync_end + 1; drm_mode_do_interlace_quirk(mode, pt); drm_mode_set_name(mode); if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; } mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ? DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC; mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ? DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4; mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8; if (quirks & EDID_QUIRK_DETAILED_IN_CM) { mode->width_mm *= 10; mode->height_mm *= 10; } if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) { mode->width_mm = edid->width_cm * 10; mode->height_mm = edid->height_cm * 10; } return mode; } static bool -mode_is_rb(const struct drm_display_mode *mode) -{ - return (mode->htotal - mode->hdisplay == 160) && - (mode->hsync_end - mode->hdisplay == 80) && - (mode->hsync_end - mode->hsync_start == 32) && - (mode->vsync_start - mode->vdisplay == 3); -} - -static bool mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t) { int hsync, hmin, hmax; hmin = t[7]; if (edid->revision >= 4) hmin += ((t[4] & 0x04) ? 255 : 0); hmax = t[8]; if (edid->revision >= 4) hmax += ((t[4] & 0x08) ? 255 : 0); hsync = drm_mode_hsync(mode); return (hsync <= hmax && hsync >= hmin); } static bool mode_in_vsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t) { int vsync, vmin, vmax; vmin = t[5]; if (edid->revision >= 4) vmin += ((t[4] & 0x01) ? 255 : 0); vmax = t[6]; if (edid->revision >= 4) vmax += ((t[4] & 0x02) ? 255 : 0); vsync = drm_mode_vrefresh(mode); return (vsync <= vmax && vsync >= vmin); } static u32 range_pixel_clock(struct edid *edid, u8 *t) { /* unspecified */ if (t[9] == 0 || t[9] == 255) return 0; /* 1.4 with CVT support gives us real precision, yay */ if (edid->revision >= 4 && t[10] == 0x04) return (t[9] * 10000) - ((t[12] >> 2) * 250); /* 1.3 is pathetic, so fuzz up a bit */ return t[9] * 10000 + 5001; } static bool mode_in_range(struct drm_display_mode *mode, struct edid *edid, struct detailed_timing *timing) { u32 max_clock; u8 *t = (u8 *)timing; if (!mode_in_hsync_range(mode, edid, t)) return false; if (!mode_in_vsync_range(mode, edid, t)) return false; if ((max_clock = range_pixel_clock(edid, t))) if (mode->clock > max_clock) return false; /* 1.4 max horizontal check */ if (edid->revision >= 4 && t[10] == 0x04) if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3)))) return false; if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid)) return false; return true; } -/* - * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will - * need to account for them. - */ static int -drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid, +drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid, struct detailed_timing *timing) { int i, modes = 0; struct drm_display_mode *newmode; struct drm_device *dev = connector->dev; for (i = 0; i < drm_num_dmt_modes; i++) { if (mode_in_range(drm_dmt_modes + i, edid, timing)) { newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]); if (newmode) { drm_mode_probed_add(connector, newmode); modes++; } } } return modes; } +/* fix up 1366x768 mode from 1368x768; + * GFT/CVT can't express 1366 width which isn't dividable by 8 + */ +static void fixup_mode_1366x768(struct drm_display_mode *mode) +{ + if (mode->hdisplay == 1368 && mode->vdisplay == 768) { + mode->hdisplay = 1366; + mode->hsync_start--; + mode->hsync_end--; + drm_mode_set_name(mode); + } +} + +static int +drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid, + struct detailed_timing *timing) +{ + int i, modes = 0; + struct drm_display_mode *newmode; + struct drm_device *dev = connector->dev; + + for (i = 0; i < num_extra_modes; i++) { + const struct minimode *m = &extra_modes[i]; + newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0); + if (!newmode) + return modes; + + fixup_mode_1366x768(newmode); + if (!mode_in_range(newmode, edid, timing)) { + drm_mode_destroy(dev, newmode); + continue; + } + + drm_mode_probed_add(connector, newmode); + modes++; + } + + return modes; +} + +static int +drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid, + struct detailed_timing *timing) +{ + int i, modes = 0; + struct drm_display_mode *newmode; + struct drm_device *dev = connector->dev; + bool rb = drm_monitor_supports_rb(edid); + + for (i = 0; i < num_extra_modes; i++) { + const struct minimode *m = &extra_modes[i]; + newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0); + if (!newmode) + return modes; + + fixup_mode_1366x768(newmode); + if (!mode_in_range(newmode, edid, timing)) { + drm_mode_destroy(dev, newmode); + continue; + } + + drm_mode_probed_add(connector, newmode); + modes++; + } + + return modes; +} + static void do_inferred_modes(struct detailed_timing *timing, void *c) { struct detailed_mode_closure *closure = c; struct detailed_non_pixel *data = &timing->data.other_data; - int gtf = (closure->edid->features & DRM_EDID_FEATURE_DEFAULT_GTF); + struct detailed_data_monitor_range *range = &data->data.range; - if (gtf && data->type == EDID_DETAIL_MONITOR_RANGE) + if (data->type != EDID_DETAIL_MONITOR_RANGE) + return; + + closure->modes += drm_dmt_modes_for_range(closure->connector, + closure->edid, + timing); + + if (!version_greater(closure->edid, 1, 1)) + return; /* GTF not defined yet */ + + switch (range->flags) { + case 0x02: /* secondary gtf, XXX could do more */ + case 0x00: /* default gtf */ closure->modes += drm_gtf_modes_for_range(closure->connector, closure->edid, timing); + break; + case 0x04: /* cvt, only in 1.4+ */ + if (!version_greater(closure->edid, 1, 3)) + break; + + closure->modes += drm_cvt_modes_for_range(closure->connector, + closure->edid, + timing); + break; + case 0x01: /* just the ranges, no formula */ + default: + break; + } } static int add_inferred_modes(struct drm_connector *connector, struct edid *edid) { struct detailed_mode_closure closure = { connector, edid, 0, 0, 0 }; if (version_greater(edid, 1, 0)) drm_for_each_detailed_block((u8 *)edid, do_inferred_modes, &closure); return closure.modes; } static int drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing) { int i, j, m, modes = 0; struct drm_display_mode *mode; u8 *est = ((u8 *)timing) + 5; for (i = 0; i < 6; i++) { for (j = 7; j > 0; j--) { m = (i * 8) + (7 - j); if (m >= DRM_ARRAY_SIZE(est3_modes)) break; if (est[i] & (1 << j)) { mode = drm_mode_find_dmt(connector->dev, est3_modes[m].w, est3_modes[m].h, - est3_modes[m].r - /*, est3_modes[m].rb */); + est3_modes[m].r, + est3_modes[m].rb); if (mode) { drm_mode_probed_add(connector, mode); modes++; } } } } return modes; } static void do_established_modes(struct detailed_timing *timing, void *c) { struct detailed_mode_closure *closure = c; struct detailed_non_pixel *data = &timing->data.other_data; if (data->type == EDID_DETAIL_EST_TIMINGS) closure->modes += drm_est3_modes(closure->connector, timing); } /** * add_established_modes - get est. modes from EDID and add them * @edid: EDID block to scan * * Each EDID block contains a bitmap of the supported "established modes" list * (defined above). Tease them out and add them to the global modes list. */ static int add_established_modes(struct drm_connector *connector, struct edid *edid) { struct drm_device *dev = connector->dev; unsigned long est_bits = edid->established_timings.t1 | (edid->established_timings.t2 << 8) | ((edid->established_timings.mfg_rsvd & 0x80) << 9); int i, modes = 0; struct detailed_mode_closure closure = { connector, edid, 0, 0, 0 }; for (i = 0; i <= EDID_EST_TIMINGS; i++) { if (est_bits & (1<data.other_data; struct drm_connector *connector = closure->connector; struct edid *edid = closure->edid; if (data->type == EDID_DETAIL_STD_MODES) { int i; for (i = 0; i < 6; i++) { struct std_timing *std; struct drm_display_mode *newmode; std = &data->data.timings[i]; newmode = drm_mode_std(connector, edid, std, edid->revision); if (newmode) { drm_mode_probed_add(connector, newmode); closure->modes++; } } } } /** * add_standard_modes - get std. modes from EDID and add them * @edid: EDID block to scan * * Standard modes can be calculated using the appropriate standard (DMT, * GTF or CVT. Grab them from @edid and add them to the list. */ static int add_standard_modes(struct drm_connector *connector, struct edid *edid) { int i, modes = 0; struct detailed_mode_closure closure = { connector, edid, 0, 0, 0 }; for (i = 0; i < EDID_STD_TIMINGS; i++) { struct drm_display_mode *newmode; newmode = drm_mode_std(connector, edid, &edid->standard_timings[i], edid->revision); if (newmode) { drm_mode_probed_add(connector, newmode); modes++; } } if (version_greater(edid, 1, 0)) drm_for_each_detailed_block((u8 *)edid, do_standard_modes, &closure); /* XXX should also look for standard codes in VTB blocks */ return modes + closure.modes; } static int drm_cvt_modes(struct drm_connector *connector, struct detailed_timing *timing) { int i, j, modes = 0; struct drm_display_mode *newmode; struct drm_device *dev = connector->dev; struct cvt_timing *cvt; const int rates[] = { 60, 85, 75, 60, 50 }; const u8 empty[3] = { 0, 0, 0 }; for (i = 0; i < 4; i++) { int width = 0, height; cvt = &(timing->data.other_data.data.cvt[i]); if (!memcmp(cvt->code, empty, 3)) continue; height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2; switch (cvt->code[1] & 0x0c) { case 0x00: width = height * 4 / 3; break; case 0x04: width = height * 16 / 9; break; case 0x08: width = height * 16 / 10; break; case 0x0c: width = height * 15 / 9; break; } for (j = 1; j < 5; j++) { if (cvt->code[2] & (1 << j)) { newmode = drm_cvt_mode(dev, width, height, rates[j], j == 0, false, false); if (newmode) { drm_mode_probed_add(connector, newmode); modes++; } } } } return modes; } static void do_cvt_mode(struct detailed_timing *timing, void *c) { struct detailed_mode_closure *closure = c; struct detailed_non_pixel *data = &timing->data.other_data; if (data->type == EDID_DETAIL_CVT_3BYTE) closure->modes += drm_cvt_modes(closure->connector, timing); } static int add_cvt_modes(struct drm_connector *connector, struct edid *edid) { struct detailed_mode_closure closure = { connector, edid, 0, 0, 0 }; if (version_greater(edid, 1, 2)) drm_for_each_detailed_block((u8 *)edid, do_cvt_mode, &closure); /* XXX should also look for CVT codes in VTB blocks */ return closure.modes; } static void do_detailed_mode(struct detailed_timing *timing, void *c) { struct detailed_mode_closure *closure = c; struct drm_display_mode *newmode; if (timing->pixel_clock) { newmode = drm_mode_detailed(closure->connector->dev, closure->edid, timing, closure->quirks); if (!newmode) return; if (closure->preferred) newmode->type |= DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(closure->connector, newmode); closure->modes++; closure->preferred = 0; } } /* * add_detailed_modes - Add modes from detailed timings * @connector: attached connector * @edid: EDID block to scan * @quirks: quirks to apply */ static int add_detailed_modes(struct drm_connector *connector, struct edid *edid, u32 quirks) { struct detailed_mode_closure closure = { connector, edid, 1, quirks, 0 }; if (closure.preferred && !version_greater(edid, 1, 3)) closure.preferred = (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING); drm_for_each_detailed_block((u8 *)edid, do_detailed_mode, &closure); return closure.modes; } #define HDMI_IDENTIFIER 0x000C03 #define AUDIO_BLOCK 0x01 #define VENDOR_BLOCK 0x03 #define SPEAKER_BLOCK 0x04 #define EDID_BASIC_AUDIO (1 << 6) +#define EDID_CEA_YCRCB444 (1 << 5) +#define EDID_CEA_YCRCB422 (1 << 4) /** * Search EDID for CEA extension block. */ u8 *drm_find_cea_extension(struct edid *edid) { u8 *edid_ext = NULL; int i; /* No EDID or EDID extensions */ if (edid == NULL || edid->extensions == 0) return NULL; /* Find CEA extension */ for (i = 0; i < edid->extensions; i++) { edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1); if (edid_ext[0] == CEA_EXT) break; } if (i == edid->extensions) return NULL; return edid_ext; } static void parse_hdmi_vsdb(struct drm_connector *connector, uint8_t *db) { connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */ connector->dvi_dual = db[6] & 1; connector->max_tmds_clock = db[7] * 5; connector->latency_present[0] = db[8] >> 7; connector->latency_present[1] = (db[8] >> 6) & 1; connector->video_latency[0] = db[9]; connector->audio_latency[0] = db[10]; connector->video_latency[1] = db[11]; connector->audio_latency[1] = db[12]; DRM_DEBUG_KMS("HDMI: DVI dual %d, " "max TMDS clock %d, " "latency present %d %d, " "video latency %d %d, " "audio latency %d %d\n", connector->dvi_dual, connector->max_tmds_clock, (int) connector->latency_present[0], (int) connector->latency_present[1], connector->video_latency[0], connector->video_latency[1], connector->audio_latency[0], connector->audio_latency[1]); } static void monitor_name(struct detailed_timing *t, void *data) { if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME) *(u8 **)data = t->data.other_data.data.str.str; } /** * drm_edid_to_eld - build ELD from EDID * @connector: connector corresponding to the HDMI/DP sink * @edid: EDID to parse * * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver. * Some ELD fields are left to the graphics driver caller: * - Conn_Type * - HDCP * - Port_ID */ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid) { uint8_t *eld = connector->eld; u8 *cea; u8 *name; u8 *db; int sad_count = 0; int mnl; int dbl; memset(eld, 0, sizeof(connector->eld)); cea = drm_find_cea_extension(edid); if (!cea) { DRM_DEBUG_KMS("ELD: no CEA Extension found\n"); return; } name = NULL; drm_for_each_detailed_block((u8 *)edid, monitor_name, &name); for (mnl = 0; name && mnl < 13; mnl++) { if (name[mnl] == 0x0a) break; eld[20 + mnl] = name[mnl]; } eld[4] = (cea[1] << 5) | mnl; DRM_DEBUG_KMS("ELD monitor %s\n", eld + 20); eld[0] = 2 << 3; /* ELD version: 2 */ eld[16] = edid->mfg_id[0]; eld[17] = edid->mfg_id[1]; eld[18] = edid->prod_code[0]; eld[19] = edid->prod_code[1]; for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) { dbl = db[0] & 0x1f; switch ((db[0] & 0xe0) >> 5) { case AUDIO_BLOCK: /* Audio Data Block, contains SADs */ sad_count = dbl / 3; memcpy(eld + 20 + mnl, &db[1], dbl); break; case SPEAKER_BLOCK: /* Speaker Allocation Data Block */ eld[7] = db[1]; break; case VENDOR_BLOCK: /* HDMI Vendor-Specific Data Block */ if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0) parse_hdmi_vsdb(connector, db); break; default: break; } } eld[5] |= sad_count << 4; eld[2] = (20 + mnl + sad_count * 3 + 3) / 4; DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count); } /** * drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond * @connector: connector associated with the HDMI/DP sink * @mode: the display mode */ int drm_av_sync_delay(struct drm_connector *connector, struct drm_display_mode *mode) { int i = !!(mode->flags & DRM_MODE_FLAG_INTERLACE); int a, v; if (!connector->latency_present[0]) return 0; if (!connector->latency_present[1]) i = 0; a = connector->audio_latency[i]; v = connector->video_latency[i]; /* * HDMI/DP sink doesn't support audio or video? */ if (a == 255 || v == 255) return 0; /* * Convert raw EDID values to millisecond. * Treat unknown latency as 0ms. */ if (a) a = min(2 * (a - 1), 500); if (v) v = min(2 * (v - 1), 500); return max(v - a, 0); } /** * drm_select_eld - select one ELD from multiple HDMI/DP sinks * @encoder: the encoder just changed display mode * @mode: the adjusted display mode * * It's possible for one encoder to be associated with multiple HDMI/DP sinks. * The policy is now hard coded to simply use the first HDMI/DP sink's ELD. */ struct drm_connector *drm_select_eld(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct drm_connector *connector; struct drm_device *dev = encoder->dev; list_for_each_entry(connector, &dev->mode_config.connector_list, head) if (connector->encoder == encoder && connector->eld[0]) return connector; return NULL; } /** * drm_detect_hdmi_monitor - detect whether monitor is hdmi. * @edid: monitor EDID information * * Parse the CEA extension according to CEA-861-B. * Return true if HDMI, false if not or unknown. */ bool drm_detect_hdmi_monitor(struct edid *edid) { u8 *edid_ext; int i, hdmi_id; int start_offset, end_offset; bool is_hdmi = false; edid_ext = drm_find_cea_extension(edid); if (!edid_ext) goto end; /* Data block offset in CEA extension block */ start_offset = 4; end_offset = edid_ext[2]; /* * Because HDMI identifier is in Vendor Specific Block, * search it from all data blocks of CEA extension. */ for (i = start_offset; i < end_offset; /* Increased by data block len */ i += ((edid_ext[i] & 0x1f) + 1)) { /* Find vendor specific block */ if ((edid_ext[i] >> 5) == VENDOR_BLOCK) { hdmi_id = edid_ext[i + 1] | (edid_ext[i + 2] << 8) | edid_ext[i + 3] << 16; /* Find HDMI identifier */ if (hdmi_id == HDMI_IDENTIFIER) is_hdmi = true; break; } } end: return is_hdmi; } /** * drm_detect_monitor_audio - check monitor audio capability * * Monitor should have CEA extension block. * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic * audio' only. If there is any audio extension block and supported * audio format, assume at least 'basic audio' support, even if 'basic * audio' is not defined in EDID. * */ bool drm_detect_monitor_audio(struct edid *edid) { u8 *edid_ext; int i, j; bool has_audio = false; int start_offset, end_offset; edid_ext = drm_find_cea_extension(edid); if (!edid_ext) goto end; has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0); if (has_audio) { DRM_DEBUG_KMS("Monitor has basic audio support\n"); goto end; } /* Data block offset in CEA extension block */ start_offset = 4; end_offset = edid_ext[2]; for (i = start_offset; i < end_offset; i += ((edid_ext[i] & 0x1f) + 1)) { if ((edid_ext[i] >> 5) == AUDIO_BLOCK) { has_audio = true; for (j = 1; j < (edid_ext[i] & 0x1f); j += 3) DRM_DEBUG_KMS("CEA audio format %d\n", (edid_ext[i + j] >> 3) & 0xf); goto end; } } end: return has_audio; } /** * drm_add_display_info - pull display info out if present * @edid: EDID data * @info: display info (attached to connector) * * Grab any available display info and stuff it into the drm_display_info * structure that's part of the connector. Useful for tracking bpp and * color spaces. */ static void drm_add_display_info(struct edid *edid, struct drm_display_info *info) { u8 *edid_ext; info->width_mm = edid->width_cm * 10; info->height_mm = edid->height_cm * 10; /* driver figures it out in this case */ info->bpc = 0; info->color_formats = 0; - /* Only defined for 1.4 with digital displays */ - if (edid->revision < 4) + if (edid->revision < 3) return; if (!(edid->input & DRM_EDID_INPUT_DIGITAL)) return; + /* Get data from CEA blocks if present */ + edid_ext = drm_find_cea_extension(edid); + if (edid_ext) { + info->cea_rev = edid_ext[1]; + + /* The existence of a CEA block should imply RGB support */ + info->color_formats = DRM_COLOR_FORMAT_RGB444; + if (edid_ext[3] & EDID_CEA_YCRCB444) + info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; + if (edid_ext[3] & EDID_CEA_YCRCB422) + info->color_formats |= DRM_COLOR_FORMAT_YCRCB422; + } + + /* Only defined for 1.4 with digital displays */ + if (edid->revision < 4) + return; + switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) { case DRM_EDID_DIGITAL_DEPTH_6: info->bpc = 6; break; case DRM_EDID_DIGITAL_DEPTH_8: info->bpc = 8; break; case DRM_EDID_DIGITAL_DEPTH_10: info->bpc = 10; break; case DRM_EDID_DIGITAL_DEPTH_12: info->bpc = 12; break; case DRM_EDID_DIGITAL_DEPTH_14: info->bpc = 14; break; case DRM_EDID_DIGITAL_DEPTH_16: info->bpc = 16; break; case DRM_EDID_DIGITAL_DEPTH_UNDEF: default: info->bpc = 0; break; } - info->color_formats = DRM_COLOR_FORMAT_RGB444; - if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB444) - info->color_formats = DRM_COLOR_FORMAT_YCRCB444; - if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB422) - info->color_formats = DRM_COLOR_FORMAT_YCRCB422; - - /* Get data from CEA blocks if present */ - edid_ext = drm_find_cea_extension(edid); - if (!edid_ext) - return; - - info->cea_rev = edid_ext[1]; + info->color_formats |= DRM_COLOR_FORMAT_RGB444; + if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444) + info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; + if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422) + info->color_formats |= DRM_COLOR_FORMAT_YCRCB422; } /** * drm_add_edid_modes - add modes from EDID data, if available * @connector: connector we're probing * @edid: edid data * * Add the specified modes to the connector's mode list. * * Return number of modes added or 0 if we couldn't find any. */ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) { int num_modes = 0; u32 quirks; if (edid == NULL) { return 0; } if (!drm_edid_is_valid(edid)) { device_printf(connector->dev->device, "%s: EDID invalid.\n", drm_get_connector_name(connector)); return 0; } quirks = edid_get_quirks(edid); /* * EDID spec says modes should be preferred in this order: * - preferred detailed mode * - other detailed modes from base block * - detailed modes from extension blocks * - CVT 3-byte code modes * - standard timing codes * - established timing codes * - modes inferred from GTF or CVT range information * * We get this pretty much right. * * XXX order for additional mode types in extension blocks? */ num_modes += add_detailed_modes(connector, edid, quirks); num_modes += add_cvt_modes(connector, edid); num_modes += add_standard_modes(connector, edid); num_modes += add_established_modes(connector, edid); num_modes += add_inferred_modes(connector, edid); if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) edid_fixup_preferred(connector, quirks); drm_add_display_info(edid, &connector->display_info); return num_modes; } /** * drm_add_modes_noedid - add modes for the connectors without EDID * @connector: connector we're probing * @hdisplay: the horizontal display limit * @vdisplay: the vertical display limit * * Add the specified modes to the connector's mode list. Only when the * hdisplay/vdisplay is not beyond the given limit, it will be added. * * Return number of modes added or 0 if we couldn't find any. */ int drm_add_modes_noedid(struct drm_connector *connector, int hdisplay, int vdisplay) { int i, count, num_modes = 0; struct drm_display_mode *mode; struct drm_device *dev = connector->dev; count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); if (hdisplay < 0) hdisplay = 0; if (vdisplay < 0) vdisplay = 0; for (i = 0; i < count; i++) { struct drm_display_mode *ptr = &drm_dmt_modes[i]; if (hdisplay && vdisplay) { /* * Only when two are valid, they will be used to check * whether the mode should be added to the mode list of * the connector. */ if (ptr->hdisplay > hdisplay || ptr->vdisplay > vdisplay) continue; } if (drm_mode_vrefresh(ptr) > 61) continue; mode = drm_mode_duplicate(dev, ptr); if (mode) { drm_mode_probed_add(connector, mode); num_modes++; } } return num_modes; } Index: head/sys/dev/drm2/drm_edid.h =================================================================== --- head/sys/dev/drm2/drm_edid.h (revision 277486) +++ head/sys/dev/drm2/drm_edid.h (revision 277487) @@ -1,244 +1,258 @@ /* * Copyright © 2007-2008 Intel Corporation * Jesse Barnes * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * $FreeBSD$ */ #ifndef __DRM_EDID_H__ #define __DRM_EDID_H__ #include #include #define EDID_LENGTH 128 #define DDC_ADDR 0x50 #define CEA_EXT 0x02 #define VTB_EXT 0x10 #define DI_EXT 0x40 #define LS_EXT 0x50 #define MI_EXT 0x60 struct est_timings { u8 t1; u8 t2; u8 mfg_rsvd; } __attribute__((packed)); /* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */ #define EDID_TIMING_ASPECT_SHIFT 6 #define EDID_TIMING_ASPECT_MASK (0x3 << EDID_TIMING_ASPECT_SHIFT) /* need to add 60 */ #define EDID_TIMING_VFREQ_SHIFT 0 #define EDID_TIMING_VFREQ_MASK (0x3f << EDID_TIMING_VFREQ_SHIFT) struct std_timing { u8 hsize; /* need to multiply by 8 then add 248 */ u8 vfreq_aspect; } __attribute__((packed)); #define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1) #define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2) #define DRM_EDID_PT_SEPARATE_SYNC (3 << 3) #define DRM_EDID_PT_STEREO (1 << 5) #define DRM_EDID_PT_INTERLACED (1 << 7) /* If detailed data is pixel timing */ struct detailed_pixel_timing { u8 hactive_lo; u8 hblank_lo; u8 hactive_hblank_hi; u8 vactive_lo; u8 vblank_lo; u8 vactive_vblank_hi; u8 hsync_offset_lo; u8 hsync_pulse_width_lo; u8 vsync_offset_pulse_width_lo; u8 hsync_vsync_offset_pulse_width_hi; u8 width_mm_lo; u8 height_mm_lo; u8 width_height_mm_hi; u8 hborder; u8 vborder; u8 misc; } __attribute__((packed)); /* If it's not pixel timing, it'll be one of the below */ struct detailed_data_string { u8 str[13]; } __attribute__((packed)); struct detailed_data_monitor_range { u8 min_vfreq; u8 max_vfreq; u8 min_hfreq_khz; u8 max_hfreq_khz; u8 pixel_clock_mhz; /* need to multiply by 10 */ - u16 sec_gtf_toggle; /* A000=use above, 20=use below */ - u8 hfreq_start_khz; /* need to multiply by 2 */ - u8 c; /* need to divide by 2 */ - u16 m; - u8 k; - u8 j; /* need to divide by 2 */ + u8 flags; + union { + struct { + u8 reserved; + u8 hfreq_start_khz; /* need to multiply by 2 */ + u8 c; /* need to divide by 2 */ + u16 m; + u8 k; + u8 j; /* need to divide by 2 */ + } __attribute__((packed)) gtf2; + struct { + u8 version; + u8 data1; /* high 6 bits: extra clock resolution */ + u8 data2; /* plus low 2 of above: max hactive */ + u8 supported_aspects; + u8 flags; /* preferred aspect and blanking support */ + u8 supported_scalings; + u8 preferred_refresh; + } __attribute__((packed)) cvt; + } formula; } __attribute__((packed)); struct detailed_data_wpindex { u8 white_yx_lo; /* Lower 2 bits each */ u8 white_x_hi; u8 white_y_hi; u8 gamma; /* need to divide by 100 then add 1 */ } __attribute__((packed)); struct detailed_data_color_point { u8 windex1; u8 wpindex1[3]; u8 windex2; u8 wpindex2[3]; } __attribute__((packed)); struct cvt_timing { u8 code[3]; } __attribute__((packed)); struct detailed_non_pixel { u8 pad1; u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name fb=color point data, fa=standard timing data, f9=undefined, f8=mfg. reserved */ u8 pad2; union { struct detailed_data_string str; struct detailed_data_monitor_range range; struct detailed_data_wpindex color; struct std_timing timings[6]; struct cvt_timing cvt[4]; } data; } __attribute__((packed)); #define EDID_DETAIL_EST_TIMINGS 0xf7 #define EDID_DETAIL_CVT_3BYTE 0xf8 #define EDID_DETAIL_COLOR_MGMT_DATA 0xf9 #define EDID_DETAIL_STD_MODES 0xfa #define EDID_DETAIL_MONITOR_CPDATA 0xfb #define EDID_DETAIL_MONITOR_NAME 0xfc #define EDID_DETAIL_MONITOR_RANGE 0xfd #define EDID_DETAIL_MONITOR_STRING 0xfe #define EDID_DETAIL_MONITOR_SERIAL 0xff struct detailed_timing { u16 pixel_clock; /* need to multiply by 10 KHz */ union { struct detailed_pixel_timing pixel_data; struct detailed_non_pixel other_data; } data; } __attribute__((packed)); #define DRM_EDID_INPUT_SERRATION_VSYNC (1 << 0) #define DRM_EDID_INPUT_SYNC_ON_GREEN (1 << 1) #define DRM_EDID_INPUT_COMPOSITE_SYNC (1 << 2) #define DRM_EDID_INPUT_SEPARATE_SYNCS (1 << 3) #define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 4) #define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 5) #define DRM_EDID_INPUT_DIGITAL (1 << 7) #define DRM_EDID_DIGITAL_DEPTH_MASK (7 << 4) #define DRM_EDID_DIGITAL_DEPTH_UNDEF (0 << 4) #define DRM_EDID_DIGITAL_DEPTH_6 (1 << 4) #define DRM_EDID_DIGITAL_DEPTH_8 (2 << 4) #define DRM_EDID_DIGITAL_DEPTH_10 (3 << 4) #define DRM_EDID_DIGITAL_DEPTH_12 (4 << 4) #define DRM_EDID_DIGITAL_DEPTH_14 (5 << 4) #define DRM_EDID_DIGITAL_DEPTH_16 (6 << 4) #define DRM_EDID_DIGITAL_DEPTH_RSVD (7 << 4) #define DRM_EDID_DIGITAL_TYPE_UNDEF (0) #define DRM_EDID_DIGITAL_TYPE_DVI (1) #define DRM_EDID_DIGITAL_TYPE_HDMI_A (2) #define DRM_EDID_DIGITAL_TYPE_HDMI_B (3) #define DRM_EDID_DIGITAL_TYPE_MDDI (4) #define DRM_EDID_DIGITAL_TYPE_DP (5) #define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0) #define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1) #define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 2) #define DRM_EDID_FEATURE_DISPLAY_TYPE (3 << 3) /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */ /* If digital */ #define DRM_EDID_FEATURE_COLOR_MASK (3 << 3) #define DRM_EDID_FEATURE_RGB (0 << 3) #define DRM_EDID_FEATURE_RGB_YCRCB444 (1 << 3) #define DRM_EDID_FEATURE_RGB_YCRCB422 (2 << 3) #define DRM_EDID_FEATURE_RGB_YCRCB (3 << 3) /* both 4:4:4 and 4:2:2 */ #define DRM_EDID_FEATURE_PM_ACTIVE_OFF (1 << 5) #define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6) #define DRM_EDID_FEATURE_PM_STANDBY (1 << 7) struct edid { u8 header[8]; /* Vendor & product info */ u8 mfg_id[2]; u8 prod_code[2]; u32 serial; /* FIXME: byte order */ u8 mfg_week; u8 mfg_year; /* EDID version */ u8 version; u8 revision; /* Display info: */ u8 input; u8 width_cm; u8 height_cm; u8 gamma; u8 features; /* Color characteristics */ u8 red_green_lo; u8 black_white_lo; u8 red_x; u8 red_y; u8 green_x; u8 green_y; u8 blue_x; u8 blue_y; u8 white_x; u8 white_y; /* Est. timings and mfg rsvd timings*/ struct est_timings established_timings; /* Standard timings 1-8*/ struct std_timing standard_timings[8]; /* Detailing timings 1-4 */ struct detailed_timing detailed_timings[4]; /* Number of 128 byte ext. blocks */ u8 extensions; /* Checksum */ u8 checksum; } __attribute__((packed)); #define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8)) struct drm_encoder; struct drm_connector; struct drm_display_mode; void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid); int drm_av_sync_delay(struct drm_connector *connector, struct drm_display_mode *mode); struct drm_connector *drm_select_eld(struct drm_encoder *encoder, struct drm_display_mode *mode); #endif /* __DRM_EDID_H__ */ Index: head/sys/dev/drm2/drm_edid_modes.h =================================================================== --- head/sys/dev/drm2/drm_edid_modes.h (revision 277486) +++ head/sys/dev/drm2/drm_edid_modes.h (revision 277487) @@ -1,381 +1,775 @@ /* * Copyright (c) 2007-2008 Intel Corporation * Jesse Barnes * Copyright 2010 Red Hat, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sub license, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * $FreeBSD$ */ #include #include /* * Autogenerated from the DMT spec. * This table is copied from xfree86/modes/xf86EdidModes.c. - * But the mode with Reduced blank feature is deleted. */ static struct drm_display_mode drm_dmt_modes[] = { /* 640x350@85Hz */ { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672, 736, 832, 0, 350, 382, 385, 445, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x400@85Hz */ { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672, 736, 832, 0, 400, 401, 404, 445, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@85Hz */ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756, 828, 936, 0, 400, 401, 404, 446, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 640x480@60Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, 752, 800, 0, 480, 489, 492, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664, 704, 832, 0, 480, 489, 492, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656, 720, 840, 0, 480, 481, 484, 500, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@85Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696, 752, 832, 0, 480, 481, 484, 509, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 800x600@56Hz */ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824, 896, 1024, 0, 600, 601, 603, 625, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856, 976, 1040, 0, 600, 637, 643, 666, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816, 896, 1056, 0, 600, 601, 604, 625, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@85Hz */ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832, 896, 1048, 0, 600, 601, 604, 631, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 800x600@120Hz RB */ + { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848, + 880, 960, 0, 600, 603, 607, 636, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 848x480@60Hz */ { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864, 976, 1088, 0, 480, 486, 494, 517, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@43Hz, interlace */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032, 1208, 1264, 0, 768, 768, 772, 817, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@60Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, 1184, 1344, 0, 768, 771, 777, 806, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048, 1184, 1328, 0, 768, 771, 777, 806, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@75Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040, 1136, 1312, 0, 768, 769, 772, 800, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@85Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072, 1168, 1376, 0, 768, 769, 772, 808, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1024x768@120Hz RB */ + { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072, + 1104, 1184, 0, 768, 771, 775, 813, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1152x864@75Hz */ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, 1344, 1600, 0, 864, 865, 868, 900, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1280x768@60Hz RB */ + { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328, + 1360, 1440, 0, 768, 771, 778, 790, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1280x768@60Hz */ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344, 1472, 1664, 0, 768, 771, 778, 798, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x768@75Hz */ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360, 1488, 1696, 0, 768, 771, 778, 805, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1280x768@85Hz */ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360, 1496, 1712, 0, 768, 771, 778, 809, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1280x768@120Hz RB */ + { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328, + 1360, 1440, 0, 768, 771, 778, 813, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 1280x800@60Hz RB */ + { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328, + 1360, 1440, 0, 800, 803, 809, 823, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1280x800@60Hz */ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352, 1480, 1680, 0, 800, 803, 809, 831, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1280x800@75Hz */ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360, 1488, 1696, 0, 800, 803, 809, 838, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x800@85Hz */ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360, 1496, 1712, 0, 800, 803, 809, 843, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1280x800@120Hz RB */ + { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328, + 1360, 1440, 0, 800, 803, 809, 847, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1280x960@60Hz */ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376, 1488, 1800, 0, 960, 961, 964, 1000, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x960@85Hz */ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344, 1504, 1728, 0, 960, 961, 964, 1011, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1280x960@120Hz RB */ + { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328, + 1360, 1440, 0, 960, 963, 967, 1017, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1280x1024@60Hz */ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328, 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296, 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@85Hz */ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344, 1504, 1728, 0, 1024, 1025, 1028, 1072, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1280x1024@120Hz RB */ + { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328, + 1360, 1440, 0, 1024, 1027, 1034, 1084, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1360x768@60Hz */ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424, 1536, 1792, 0, 768, 771, 777, 795, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1440x1050@60Hz */ + /* 1360x768@120Hz RB */ + { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408, + 1440, 1520, 0, 768, 771, 776, 813, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 1400x1050@60Hz RB */ + { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448, + 1480, 1560, 0, 1050, 1053, 1057, 1080, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 1400x1050@60Hz */ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488, 1632, 1864, 0, 1050, 1053, 1057, 1089, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1440x1050@75Hz */ + /* 1400x1050@75Hz */ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504, 1648, 1896, 0, 1050, 1053, 1057, 1099, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1440x1050@85Hz */ + /* 1400x1050@85Hz */ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504, 1656, 1912, 0, 1050, 1053, 1057, 1105, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1400x1050@120Hz RB */ + { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448, + 1480, 1560, 0, 1050, 1053, 1057, 1112, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 1440x900@60Hz RB */ + { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488, + 1520, 1600, 0, 900, 903, 909, 926, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1440x900@60Hz */ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520, 1672, 1904, 0, 900, 903, 909, 934, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1440x900@75Hz */ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536, 1688, 1936, 0, 900, 903, 909, 942, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1440x900@85Hz */ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544, 1696, 1952, 0, 900, 903, 909, 948, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1440x900@120Hz RB */ + { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488, + 1520, 1600, 0, 900, 903, 909, 953, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1600x1200@60Hz */ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664, 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1600x1200@65Hz */ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664, 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1600x1200@70Hz */ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664, 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1600x1200@75Hz */ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664, 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1600x1200@85Hz */ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664, 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1600x1200@120Hz RB */ + { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648, + 1680, 1760, 0, 1200, 1203, 1207, 1271, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 1680x1050@60Hz RB */ + { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728, + 1760, 1840, 0, 1050, 1053, 1059, 1080, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1680x1050@60Hz */ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784, 1960, 2240, 0, 1050, 1053, 1059, 1089, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1680x1050@75Hz */ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800, 1976, 2272, 0, 1050, 1053, 1059, 1099, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1680x1050@85Hz */ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808, 1984, 2288, 0, 1050, 1053, 1059, 1105, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1680x1050@120Hz RB */ + { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728, + 1760, 1840, 0, 1050, 1053, 1059, 1112, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1792x1344@60Hz */ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920, 2120, 2448, 0, 1344, 1345, 1348, 1394, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1729x1344@75Hz */ + /* 1792x1344@75Hz */ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888, 2104, 2456, 0, 1344, 1345, 1348, 1417, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1853x1392@60Hz */ + /* 1792x1344@120Hz RB */ + { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840, + 1872, 1952, 0, 1344, 1347, 1351, 1423, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 1856x1392@60Hz */ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952, 2176, 2528, 0, 1392, 1393, 1396, 1439, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1856x1392@75Hz */ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984, 2208, 2560, 0, 1392, 1395, 1399, 1500, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1856x1392@120Hz RB */ + { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904, + 1936, 2016, 0, 1392, 1395, 1399, 1474, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 1920x1200@60Hz RB */ + { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968, + 2000, 2080, 0, 1200, 1203, 1209, 1235, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1920x1200@60Hz */ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056, 2256, 2592, 0, 1200, 1203, 1209, 1245, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1920x1200@75Hz */ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056, 2264, 2608, 0, 1200, 1203, 1209, 1255, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1920x1200@85Hz */ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064, 2272, 2624, 0, 1200, 1203, 1209, 1262, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1920x1200@120Hz RB */ + { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968, + 2000, 2080, 0, 1200, 1203, 1209, 1271, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1920x1440@60Hz */ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048, 2256, 2600, 0, 1440, 1441, 1444, 1500, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1920x1440@75Hz */ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064, 2288, 2640, 0, 1440, 1441, 1444, 1500, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1920x1440@120Hz RB */ + { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968, + 2000, 2080, 0, 1440, 1443, 1447, 1525, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 2560x1600@60Hz RB */ + { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608, + 2640, 2720, 0, 1600, 1603, 1609, 1646, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 2560x1600@60Hz */ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752, 3032, 3504, 0, 1600, 1603, 1609, 1658, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 2560x1600@75HZ */ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768, 3048, 3536, 0, 1600, 1603, 1609, 1672, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 2560x1600@85HZ */ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768, 3048, 3536, 0, 1600, 1603, 1609, 1682, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 2560x1600@120Hz RB */ + { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608, + 2640, 2720, 0, 1600, 1603, 1609, 1694, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + }; static const int drm_num_dmt_modes = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); static struct drm_display_mode edid_est_modes[] = { { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824, 896, 1024, 0, 600, 601, 603, 625, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656, 720, 840, 0, 480, 481, 484, 500, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664, 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704, 768, 864, 0, 480, 483, 486, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656, 752, 800, 0, 480, 490, 492, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738, 846, 900, 0, 400, 421, 423, 449, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738, 846, 900, 0, 400, 412, 414, 449, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296, 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040, 1136, 1312, 0, 768, 769, 772, 800, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048, 1184, 1328, 0, 768, 771, 777, 806, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, 1184, 1344, 0, 768, 771, 777, 806, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032, 1208, 1264, 0, 768, 768, 776, 817, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864, 928, 1152, 0, 624, 625, 628, 667, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816, 896, 1056, 0, 600, 601, 604, 625, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856, 976, 1040, 0, 600, 637, 643, 666, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, 1344, 1600, 0, 864, 865, 868, 900, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */ }; -static const struct { +struct minimode { short w; short h; short r; short rb; -} est3_modes[] = { +}; + +static const struct minimode est3_modes[] = { /* byte 6 */ { 640, 350, 85, 0 }, { 640, 400, 85, 0 }, { 720, 400, 85, 0 }, { 640, 480, 85, 0 }, { 848, 480, 60, 0 }, { 800, 600, 85, 0 }, { 1024, 768, 85, 0 }, { 1152, 864, 75, 0 }, /* byte 7 */ { 1280, 768, 60, 1 }, { 1280, 768, 60, 0 }, { 1280, 768, 75, 0 }, { 1280, 768, 85, 0 }, { 1280, 960, 60, 0 }, { 1280, 960, 85, 0 }, { 1280, 1024, 60, 0 }, { 1280, 1024, 85, 0 }, /* byte 8 */ { 1360, 768, 60, 0 }, { 1440, 900, 60, 1 }, { 1440, 900, 60, 0 }, { 1440, 900, 75, 0 }, { 1440, 900, 85, 0 }, { 1400, 1050, 60, 1 }, { 1400, 1050, 60, 0 }, { 1400, 1050, 75, 0 }, /* byte 9 */ { 1400, 1050, 85, 0 }, { 1680, 1050, 60, 1 }, { 1680, 1050, 60, 0 }, { 1680, 1050, 75, 0 }, { 1680, 1050, 85, 0 }, { 1600, 1200, 60, 0 }, { 1600, 1200, 65, 0 }, { 1600, 1200, 70, 0 }, /* byte 10 */ { 1600, 1200, 75, 0 }, { 1600, 1200, 85, 0 }, { 1792, 1344, 60, 0 }, { 1792, 1344, 85, 0 }, { 1856, 1392, 60, 0 }, { 1856, 1392, 75, 0 }, { 1920, 1200, 60, 1 }, { 1920, 1200, 60, 0 }, /* byte 11 */ { 1920, 1200, 75, 0 }, { 1920, 1200, 85, 0 }, { 1920, 1440, 60, 0 }, { 1920, 1440, 75, 0 }, }; -static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]); +static const int num_est3_modes = DRM_ARRAY_SIZE(est3_modes); + +static const struct minimode extra_modes[] = { + { 1024, 576, 60, 0 }, + { 1366, 768, 60, 0 }, + { 1600, 900, 60, 0 }, + { 1680, 945, 60, 0 }, + { 1920, 1080, 60, 0 }, + { 2048, 1152, 60, 0 }, + { 2048, 1536, 60, 0 }, +}; +static const int num_extra_modes = DRM_ARRAY_SIZE(extra_modes); + +/* + * Probably taken from CEA-861 spec. + * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c. + */ +static const struct drm_display_mode edid_cea_modes[] = { + /* 1 - 640x480@60Hz */ + { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, + 752, 800, 0, 480, 490, 492, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 2 - 720x480@60Hz */ + { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, + 798, 858, 0, 480, 489, 495, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 3 - 720x480@60Hz */ + { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, + 798, 858, 0, 480, 489, 495, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 4 - 1280x720@60Hz */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390, + 1430, 1650, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 5 - 1920x1080i@60Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, + 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | + DRM_MODE_FLAG_INTERLACE) }, + /* 6 - 1440x480i@60Hz */ + { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, + 1602, 1716, 0, 480, 488, 494, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, + /* 7 - 1440x480i@60Hz */ + { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, + 1602, 1716, 0, 480, 488, 494, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, + /* 8 - 1440x240@60Hz */ + { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, + 1602, 1716, 0, 240, 244, 247, 262, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_DBLCLK) }, + /* 9 - 1440x240@60Hz */ + { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, + 1602, 1716, 0, 240, 244, 247, 262, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_DBLCLK) }, + /* 10 - 2880x480i@60Hz */ + { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, + 3204, 3432, 0, 480, 488, 494, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE) }, + /* 11 - 2880x480i@60Hz */ + { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, + 3204, 3432, 0, 480, 488, 494, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE) }, + /* 12 - 2880x240@60Hz */ + { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, + 3204, 3432, 0, 240, 244, 247, 262, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 13 - 2880x240@60Hz */ + { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, + 3204, 3432, 0, 240, 244, 247, 262, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 14 - 1440x480@60Hz */ + { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472, + 1596, 1716, 0, 480, 489, 495, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 15 - 1440x480@60Hz */ + { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472, + 1596, 1716, 0, 480, 489, 495, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 16 - 1920x1080@60Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, + 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 17 - 720x576@50Hz */ + { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, + 796, 864, 0, 576, 581, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 18 - 720x576@50Hz */ + { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, + 796, 864, 0, 576, 581, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 19 - 1280x720@50Hz */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720, + 1760, 1980, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 20 - 1920x1080i@50Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, + 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | + DRM_MODE_FLAG_INTERLACE) }, + /* 21 - 1440x576i@50Hz */ + { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, + 1590, 1728, 0, 576, 580, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, + /* 22 - 1440x576i@50Hz */ + { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, + 1590, 1728, 0, 576, 580, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, + /* 23 - 1440x288@50Hz */ + { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, + 1590, 1728, 0, 288, 290, 293, 312, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_DBLCLK) }, + /* 24 - 1440x288@50Hz */ + { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, + 1590, 1728, 0, 288, 290, 293, 312, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_DBLCLK) }, + /* 25 - 2880x576i@50Hz */ + { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, + 3180, 3456, 0, 576, 580, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE) }, + /* 26 - 2880x576i@50Hz */ + { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, + 3180, 3456, 0, 576, 580, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE) }, + /* 27 - 2880x288@50Hz */ + { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, + 3180, 3456, 0, 288, 290, 293, 312, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 28 - 2880x288@50Hz */ + { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, + 3180, 3456, 0, 288, 290, 293, 312, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 29 - 1440x576@50Hz */ + { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, + 1592, 1728, 0, 576, 581, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 30 - 1440x576@50Hz */ + { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, + 1592, 1728, 0, 576, 581, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 31 - 1920x1080@50Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, + 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 32 - 1920x1080@24Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558, + 2602, 2750, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 33 - 1920x1080@25Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, + 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 34 - 1920x1080@30Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, + 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 35 - 2880x480@60Hz */ + { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944, + 3192, 3432, 0, 480, 489, 495, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 36 - 2880x480@60Hz */ + { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944, + 3192, 3432, 0, 480, 489, 495, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 37 - 2880x576@50Hz */ + { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928, + 3184, 3456, 0, 576, 581, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 38 - 2880x576@50Hz */ + { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928, + 3184, 3456, 0, 576, 581, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 39 - 1920x1080i@50Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952, + 2120, 2304, 0, 1080, 1126, 1136, 1250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE) }, + /* 40 - 1920x1080i@100Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, + 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | + DRM_MODE_FLAG_INTERLACE) }, + /* 41 - 1280x720@100Hz */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720, + 1760, 1980, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 42 - 720x576@100Hz */ + { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, + 796, 864, 0, 576, 581, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 43 - 720x576@100Hz */ + { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, + 796, 864, 0, 576, 581, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 44 - 1440x576i@100Hz */ + { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, + 1590, 1728, 0, 576, 580, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_DBLCLK) }, + /* 45 - 1440x576i@100Hz */ + { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, + 1590, 1728, 0, 576, 580, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_DBLCLK) }, + /* 46 - 1920x1080i@120Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, + 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | + DRM_MODE_FLAG_INTERLACE) }, + /* 47 - 1280x720@120Hz */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390, + 1430, 1650, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 48 - 720x480@120Hz */ + { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736, + 798, 858, 0, 480, 489, 495, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 49 - 720x480@120Hz */ + { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736, + 798, 858, 0, 480, 489, 495, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 50 - 1440x480i@120Hz */ + { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, + 1602, 1716, 0, 480, 488, 494, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, + /* 51 - 1440x480i@120Hz */ + { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, + 1602, 1716, 0, 480, 488, 494, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, + /* 52 - 720x576@200Hz */ + { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732, + 796, 864, 0, 576, 581, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 53 - 720x576@200Hz */ + { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732, + 796, 864, 0, 576, 581, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 54 - 1440x576i@200Hz */ + { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, + 1590, 1728, 0, 576, 580, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, + /* 55 - 1440x576i@200Hz */ + { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, + 1590, 1728, 0, 576, 580, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, + /* 56 - 720x480@240Hz */ + { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736, + 798, 858, 0, 480, 489, 495, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 57 - 720x480@240Hz */ + { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736, + 798, 858, 0, 480, 489, 495, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 58 - 1440x480i@240 */ + { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, + 1602, 1716, 0, 480, 488, 494, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, + /* 59 - 1440x480i@240 */ + { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, + 1602, 1716, 0, 480, 488, 494, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, + /* 60 - 1280x720@24Hz */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040, + 3080, 3300, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 61 - 1280x720@25Hz */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700, + 3740, 3960, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 62 - 1280x720@30Hz */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040, + 3080, 3300, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 63 - 1920x1080@120Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008, + 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 64 - 1920x1080@100Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448, + 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, +}; +static const int drm_num_cea_modes = DRM_ARRAY_SIZE(edid_cea_modes); Index: head/sys/dev/drm2/drm_fb_helper.c =================================================================== --- head/sys/dev/drm2/drm_fb_helper.c (revision 277486) +++ head/sys/dev/drm2/drm_fb_helper.c (revision 277487) @@ -1,1516 +1,1519 @@ /* * Copyright (c) 2006-2009 Red Hat Inc. * Copyright (c) 2006-2008 Intel Corporation * Copyright (c) 2007 Dave Airlie * * DRM framebuffer helper functions * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. * * Authors: * Dave Airlie * Jesse Barnes */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include struct vt_kms_softc { struct drm_fb_helper *fb_helper; struct task fb_mode_task; }; static fb_enter_t vt_kms_postswitch; static void vt_restore_fbdev_mode(void *, int); /* Call restore out of vt(9) locks. */ static void vt_restore_fbdev_mode(void *arg, int pending) { struct drm_fb_helper *fb_helper; struct vt_kms_softc *sc; sc = (struct vt_kms_softc *)arg; fb_helper = sc->fb_helper; sx_xlock(&fb_helper->dev->mode_config.mutex); drm_fb_helper_restore_fbdev_mode(fb_helper); sx_xunlock(&fb_helper->dev->mode_config.mutex); } static int vt_kms_postswitch(void *arg) { struct vt_kms_softc *sc; sc = (struct vt_kms_softc *)arg; if (!kdb_active && panicstr == NULL) taskqueue_enqueue_fast(taskqueue_thread, &sc->fb_mode_task); else drm_fb_helper_restore_fbdev_mode(sc->fb_helper); return (0); } static DRM_LIST_HEAD(kernel_fb_helper_list); /* simple single crtc case helper function */ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) { struct drm_device *dev = fb_helper->dev; struct drm_connector *connector; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct drm_fb_helper_connector *fb_helper_connector; fb_helper_connector = malloc( sizeof(struct drm_fb_helper_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO); fb_helper_connector->connector = connector; fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector; } return 0; } static int fb_get_options(const char *connector_name, char **option) { char tunable[64]; /* * A user may use loader tunables to set a specific mode for the * console. Tunables are read in the following order: * 1. kern.vt.fb.modes.$connector_name * 2. kern.vt.fb.default_mode * * Example of a mode specific to the LVDS connector: * kern.vt.fb.modes.LVDS="1024x768" * * Example of a mode applied to all connectors not having a * connector-specific mode: * kern.vt.fb.default_mode="640x480" */ snprintf(tunable, sizeof(tunable), "kern.vt.fb.modes.%s", connector_name); DRM_INFO("Connector %s: get mode from tunables:\n", connector_name); DRM_INFO(" - %s\n", tunable); DRM_INFO(" - kern.vt.fb.default_mode\n"); *option = kern_getenv(tunable); if (*option == NULL) *option = kern_getenv("kern.vt.fb.default_mode"); return (*option != NULL ? 0 : 1); } static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper) { struct drm_fb_helper_connector *fb_helper_conn; int i; for (i = 0; i < fb_helper->connector_count; i++) { struct drm_cmdline_mode *mode; struct drm_connector *connector; char *option = NULL; fb_helper_conn = fb_helper->connector_info[i]; connector = fb_helper_conn->connector; mode = &fb_helper_conn->cmdline_mode; /* do something on return - turn off connector maybe */ if (fb_get_options(drm_get_connector_name(connector), &option)) continue; if (drm_mode_parse_command_line_for_connector(option, connector, mode)) { if (mode->force) { const char *s; switch (mode->force) { case DRM_FORCE_OFF: s = "OFF"; break; case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break; default: case DRM_FORCE_ON: s = "ON"; break; } DRM_INFO("forcing %s connector %s\n", drm_get_connector_name(connector), s); connector->force = mode->force; } DRM_INFO("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n", drm_get_connector_name(connector), mode->xres, mode->yres, mode->refresh_specified ? mode->refresh : 60, mode->rb ? " reduced blanking" : "", mode->margins ? " with margins" : "", mode->interlace ? " interlaced" : ""); } freeenv(option); } return 0; } #if 0 static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper) { uint16_t *r_base, *g_base, *b_base; int i; r_base = crtc->gamma_store; g_base = r_base + crtc->gamma_size; b_base = g_base + crtc->gamma_size; for (i = 0; i < crtc->gamma_size; i++) helper->funcs->gamma_get(crtc, &r_base[i], &g_base[i], &b_base[i], i); } static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc) { uint16_t *r_base, *g_base, *b_base; + if (crtc->funcs->gamma_set == NULL) + return; + r_base = crtc->gamma_store; g_base = r_base + crtc->gamma_size; b_base = g_base + crtc->gamma_size; crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size); } #endif #if 0 int drm_fb_helper_debug_enter(struct fb_info *info) { struct drm_fb_helper *helper = info->par; struct drm_crtc_helper_funcs *funcs; int i; if (list_empty(&kernel_fb_helper_list)) return false; list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { for (i = 0; i < helper->crtc_count; i++) { struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set; if (!mode_set->crtc->enabled) continue; funcs = mode_set->crtc->helper_private; drm_fb_helper_save_lut_atomic(mode_set->crtc, helper); funcs->mode_set_base_atomic(mode_set->crtc, mode_set->fb, mode_set->x, mode_set->y, ENTER_ATOMIC_MODE_SET); } } return 0; } #endif #if 0 /* Find the real fb for a given fb helper CRTC */ static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_crtc *c; list_for_each_entry(c, &dev->mode_config.crtc_list, head) { if (crtc->base.id == c->base.id) return c->fb; } return NULL; } #endif #if 0 int drm_fb_helper_debug_leave(struct fb_info *info) { struct drm_fb_helper *helper = info->par; struct drm_crtc *crtc; struct drm_crtc_helper_funcs *funcs; struct drm_framebuffer *fb; int i; for (i = 0; i < helper->crtc_count; i++) { struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set; crtc = mode_set->crtc; funcs = crtc->helper_private; fb = drm_mode_config_fb(crtc); if (!crtc->enabled) continue; if (!fb) { DRM_ERROR("no fb to restore??\n"); continue; } drm_fb_helper_restore_lut_atomic(mode_set->crtc); funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x, crtc->y, LEAVE_ATOMIC_MODE_SET); } return 0; } #endif bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper) { bool error = false; int i, ret; for (i = 0; i < fb_helper->crtc_count; i++) { struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set; ret = drm_crtc_helper_set_config(mode_set); if (ret) error = true; } return error; } #if 0 bool drm_fb_helper_force_kernel_mode(void) { bool ret, error = false; struct drm_fb_helper *helper; if (list_empty(&kernel_fb_helper_list)) return false; list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { if (helper->dev->switch_power_state == DRM_SWITCH_POWER_OFF) continue; ret = drm_fb_helper_restore_fbdev_mode(helper); if (ret) error = true; } return error; } #endif #if 0 int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed, void *panic_str) { printf("panic occurred, switching back to text console\n"); return drm_fb_helper_force_kernel_mode(); return 0; } static struct notifier_block paniced = { .notifier_call = drm_fb_helper_panic, }; /** * drm_fb_helper_restore - restore the framebuffer console (kernel) config * * Restore's the kernel's fbcon mode, used for lastclose & panic paths. */ void drm_fb_helper_restore(void) { bool ret; ret = drm_fb_helper_force_kernel_mode(); if (ret == true) DRM_ERROR("Failed to restore crtc configuration\n"); } #ifdef CONFIG_MAGIC_SYSRQ static void drm_fb_helper_restore_work_fn(struct work_struct *ignored) { drm_fb_helper_restore(); } static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn); static void drm_fb_helper_sysrq(int dummy1) { schedule_work(&drm_fb_helper_restore_work); } static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { .handler = drm_fb_helper_sysrq, .help_msg = "force-fb(V)", .action_msg = "Restore framebuffer console", }; #else static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { }; #endif #endif #if 0 static void drm_fb_helper_on(struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; struct drm_device *dev = fb_helper->dev; struct drm_crtc *crtc; struct drm_crtc_helper_funcs *crtc_funcs; struct drm_connector *connector; struct drm_encoder *encoder; int i, j; /* * For each CRTC in this fb, turn the crtc on then, * find all associated encoders and turn them on. */ sx_xlock(&dev->mode_config.mutex); for (i = 0; i < fb_helper->crtc_count; i++) { crtc = fb_helper->crtc_info[i].mode_set.crtc; crtc_funcs = crtc->helper_private; if (!crtc->enabled) continue; crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); /* Walk the connectors & encoders on this fb turning them on */ for (j = 0; j < fb_helper->connector_count; j++) { connector = fb_helper->connector_info[j]->connector; connector->dpms = DRM_MODE_DPMS_ON; drm_connector_property_set_value(connector, dev->mode_config.dpms_property, DRM_MODE_DPMS_ON); } /* Found a CRTC on this fb, now find encoders */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc == crtc) { struct drm_encoder_helper_funcs *encoder_funcs; encoder_funcs = encoder->helper_private; encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); } } } sx_xunlock(&dev->mode_config.mutex); } #endif #if 0 static void drm_fb_helper_off(struct fb_info *info, int dpms_mode) { struct drm_fb_helper *fb_helper = info->par; struct drm_device *dev = fb_helper->dev; struct drm_crtc *crtc; struct drm_crtc_helper_funcs *crtc_funcs; struct drm_connector *connector; struct drm_encoder *encoder; int i, j; /* * For each CRTC in this fb, find all associated encoders * and turn them off, then turn off the CRTC. */ sx_xlock(&dev->mode_config.mutex); for (i = 0; i < fb_helper->crtc_count; i++) { crtc = fb_helper->crtc_info[i].mode_set.crtc; crtc_funcs = crtc->helper_private; if (!crtc->enabled) continue; /* Walk the connectors on this fb and mark them off */ for (j = 0; j < fb_helper->connector_count; j++) { connector = fb_helper->connector_info[j]->connector; connector->dpms = dpms_mode; drm_connector_property_set_value(connector, dev->mode_config.dpms_property, dpms_mode); } /* Found a CRTC on this fb, now find encoders */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc == crtc) { struct drm_encoder_helper_funcs *encoder_funcs; encoder_funcs = encoder->helper_private; encoder_funcs->dpms(encoder, dpms_mode); } } crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); } sx_xunlock(&dev->mode_config.mutex); } #endif #if 0 int drm_fb_helper_blank(int blank, struct fb_info *info) { switch (blank) { /* Display: On; HSync: On, VSync: On */ case FB_BLANK_UNBLANK: drm_fb_helper_on(info); break; /* Display: Off; HSync: On, VSync: On */ case FB_BLANK_NORMAL: drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY); break; /* Display: Off; HSync: Off, VSync: On */ case FB_BLANK_HSYNC_SUSPEND: drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY); break; /* Display: Off; HSync: On, VSync: Off */ case FB_BLANK_VSYNC_SUSPEND: drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND); break; /* Display: Off; HSync: Off, VSync: Off */ case FB_BLANK_POWERDOWN: drm_fb_helper_off(info, DRM_MODE_DPMS_OFF); break; } return 0; } #endif static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper) { int i; for (i = 0; i < helper->connector_count; i++) free(helper->connector_info[i], DRM_MEM_KMS); free(helper->connector_info, DRM_MEM_KMS); for (i = 0; i < helper->crtc_count; i++) { free(helper->crtc_info[i].mode_set.connectors, DRM_MEM_KMS); if (helper->crtc_info[i].mode_set.mode) drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode); } free(helper->crtc_info, DRM_MEM_KMS); } int drm_fb_helper_init(struct drm_device *dev, struct drm_fb_helper *fb_helper, int crtc_count, int max_conn_count) { struct drm_crtc *crtc; int i; fb_helper->dev = dev; INIT_LIST_HEAD(&fb_helper->kernel_fb_list); fb_helper->crtc_info = malloc(crtc_count * sizeof(struct drm_fb_helper_crtc), DRM_MEM_KMS, M_WAITOK | M_ZERO); fb_helper->crtc_count = crtc_count; fb_helper->connector_info = malloc(dev->mode_config.num_connector * sizeof(struct drm_fb_helper_connector *), DRM_MEM_KMS, M_WAITOK | M_ZERO); fb_helper->connector_count = 0; for (i = 0; i < crtc_count; i++) { fb_helper->crtc_info[i].mode_set.connectors = malloc(max_conn_count * sizeof(struct drm_connector *), DRM_MEM_KMS, M_WAITOK | M_ZERO); fb_helper->crtc_info[i].mode_set.num_connectors = 0; } i = 0; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { fb_helper->crtc_info[i].crtc_id = crtc->base.id; fb_helper->crtc_info[i].mode_set.crtc = crtc; i++; } fb_helper->conn_limit = max_conn_count; return 0; } void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) { if (!list_empty(&fb_helper->kernel_fb_list)) { list_del(&fb_helper->kernel_fb_list); if (list_empty(&kernel_fb_helper_list)) { #if 0 printk(KERN_INFO "drm: unregistered panic notifier\n"); atomic_notifier_chain_unregister(&panic_notifier_list, &paniced); unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); #endif } } drm_fb_helper_crtc_free(fb_helper); } #if 0 static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, u16 regno, struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; struct drm_framebuffer *fb = fb_helper->fb; int pindex; if (info->fix.visual == FB_VISUAL_trueCOLOR) { u32 *palette; u32 value; /* place color in psuedopalette */ if (regno > 16) return -EINVAL; palette = (u32 *)info->pseudo_palette; red >>= (16 - info->var.red.length); green >>= (16 - info->var.green.length); blue >>= (16 - info->var.blue.length); value = (red << info->var.red.offset) | (green << info->var.green.offset) | (blue << info->var.blue.offset); if (info->var.transp.length > 0) { u32 mask = (1 << info->var.transp.length) - 1; mask <<= info->var.transp.offset; value |= mask; } palette[regno] = value; return 0; } pindex = regno; if (fb->bits_per_pixel == 16) { pindex = regno << 3; if (fb->depth == 16 && regno > 63) return -EINVAL; if (fb->depth == 15 && regno > 31) return -EINVAL; if (fb->depth == 16) { u16 r, g, b; int i; if (regno < 32) { for (i = 0; i < 8; i++) fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex + i); } fb_helper->funcs->gamma_get(crtc, &r, &g, &b, pindex >> 1); for (i = 0; i < 4; i++) fb_helper->funcs->gamma_set(crtc, r, green, b, (pindex >> 1) + i); } } if (fb->depth != 16) fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex); return 0; } #endif #if 0 int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; struct drm_crtc_helper_funcs *crtc_funcs; u16 *red, *green, *blue, *transp; struct drm_crtc *crtc; int i, j, rc = 0; int start; for (i = 0; i < fb_helper->crtc_count; i++) { crtc = fb_helper->crtc_info[i].mode_set.crtc; crtc_funcs = crtc->helper_private; red = cmap->red; green = cmap->green; blue = cmap->blue; transp = cmap->transp; start = cmap->start; for (j = 0; j < cmap->len; j++) { u16 hred, hgreen, hblue, htransp = 0xffff; hred = *red++; hgreen = *green++; hblue = *blue++; if (transp) htransp = *transp++; rc = setcolreg(crtc, hred, hgreen, hblue, start++, info); if (rc) return rc; } crtc_funcs->load_lut(crtc); } return rc; } #endif #if 0 int drm_fb_helper_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; struct drm_framebuffer *fb = fb_helper->fb; int depth; if (var->pixclock != 0 || in_dbg_master()) return -EINVAL; /* Need to resize the fb object !!! */ if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height || var->xres_virtual > fb->width || var->yres_virtual > fb->height) { DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb " "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel, var->xres_virtual, var->yres_virtual, fb->width, fb->height, fb->bits_per_pixel); return -EINVAL; } switch (var->bits_per_pixel) { case 16: depth = (var->green.length == 6) ? 16 : 15; break; case 32: depth = (var->transp.length > 0) ? 32 : 24; break; default: depth = var->bits_per_pixel; break; } switch (depth) { case 8: var->red.offset = 0; var->green.offset = 0; var->blue.offset = 0; var->red.length = 8; var->green.length = 8; var->blue.length = 8; var->transp.length = 0; var->transp.offset = 0; break; case 15: var->red.offset = 10; var->green.offset = 5; var->blue.offset = 0; var->red.length = 5; var->green.length = 5; var->blue.length = 5; var->transp.length = 1; var->transp.offset = 15; break; case 16: var->red.offset = 11; var->green.offset = 5; var->blue.offset = 0; var->red.length = 5; var->green.length = 6; var->blue.length = 5; var->transp.length = 0; var->transp.offset = 0; break; case 24: var->red.offset = 16; var->green.offset = 8; var->blue.offset = 0; var->red.length = 8; var->green.length = 8; var->blue.length = 8; var->transp.length = 0; var->transp.offset = 0; break; case 32: var->red.offset = 16; var->green.offset = 8; var->blue.offset = 0; var->red.length = 8; var->green.length = 8; var->blue.length = 8; var->transp.length = 8; var->transp.offset = 24; break; default: return -EINVAL; } return 0; } #endif #if 0 /* this will let fbcon do the mode init */ int drm_fb_helper_set_par(struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; struct drm_device *dev = fb_helper->dev; struct fb_var_screeninfo *var = &info->var; struct drm_crtc *crtc; int ret; int i; if (var->pixclock != 0) { DRM_ERROR("PIXEL CLOCK SET\n"); return -EINVAL; } mutex_lock(&dev->mode_config.mutex); for (i = 0; i < fb_helper->crtc_count; i++) { crtc = fb_helper->crtc_info[i].mode_set.crtc; ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set); if (ret) { mutex_unlock(&dev->mode_config.mutex); return ret; } } mutex_unlock(&dev->mode_config.mutex); if (fb_helper->delayed_hotplug) { fb_helper->delayed_hotplug = false; drm_fb_helper_hotplug_event(fb_helper); } return 0; } #endif #if 0 int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; struct drm_device *dev = fb_helper->dev; struct drm_mode_set *modeset; struct drm_crtc *crtc; int ret = 0; int i; mutex_lock(&dev->mode_config.mutex); for (i = 0; i < fb_helper->crtc_count; i++) { crtc = fb_helper->crtc_info[i].mode_set.crtc; modeset = &fb_helper->crtc_info[i].mode_set; modeset->x = var->xoffset; modeset->y = var->yoffset; if (modeset->num_connectors) { ret = crtc->funcs->set_config(modeset); if (!ret) { info->var.xoffset = var->xoffset; info->var.yoffset = var->yoffset; } } } mutex_unlock(&dev->mode_config.mutex); return ret; } #endif int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, int preferred_bpp) { int new_fb = 0; int crtc_count = 0; int i; struct fb_info *info; struct drm_fb_helper_surface_size sizes; int gamma_size = 0; struct vt_kms_softc *sc; device_t kdev; memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size)); sizes.surface_depth = 24; sizes.surface_bpp = 32; sizes.fb_width = (unsigned)-1; sizes.fb_height = (unsigned)-1; /* if driver picks 8 or 16 by default use that for both depth/bpp */ if (preferred_bpp != sizes.surface_bpp) { sizes.surface_depth = sizes.surface_bpp = preferred_bpp; } /* first up get a count of crtcs now in use and new min/maxes width/heights */ for (i = 0; i < fb_helper->connector_count; i++) { struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i]; struct drm_cmdline_mode *cmdline_mode; cmdline_mode = &fb_helper_conn->cmdline_mode; if (cmdline_mode->bpp_specified) { switch (cmdline_mode->bpp) { case 8: sizes.surface_depth = sizes.surface_bpp = 8; break; case 15: sizes.surface_depth = 15; sizes.surface_bpp = 16; break; case 16: sizes.surface_depth = sizes.surface_bpp = 16; break; case 24: sizes.surface_depth = sizes.surface_bpp = 24; break; case 32: sizes.surface_depth = 24; sizes.surface_bpp = 32; break; } break; } } crtc_count = 0; for (i = 0; i < fb_helper->crtc_count; i++) { struct drm_display_mode *desired_mode; desired_mode = fb_helper->crtc_info[i].desired_mode; if (desired_mode) { if (gamma_size == 0) gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size; if (desired_mode->hdisplay < sizes.fb_width) sizes.fb_width = desired_mode->hdisplay; if (desired_mode->vdisplay < sizes.fb_height) sizes.fb_height = desired_mode->vdisplay; if (desired_mode->hdisplay > sizes.surface_width) sizes.surface_width = desired_mode->hdisplay; if (desired_mode->vdisplay > sizes.surface_height) sizes.surface_height = desired_mode->vdisplay; crtc_count++; } } if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) { /* hmm everyone went away - assume VGA cable just fell out and will come back later. */ DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n"); sizes.fb_width = sizes.surface_width = 1024; sizes.fb_height = sizes.surface_height = 768; } /* push down into drivers */ new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes); if (new_fb < 0) return new_fb; sc = malloc(sizeof(struct vt_kms_softc), DRM_MEM_KMS, M_WAITOK | M_ZERO); sc->fb_helper = fb_helper; TASK_INIT(&sc->fb_mode_task, 0, vt_restore_fbdev_mode, sc); info = fb_helper->fbdev; info->fb_name = device_get_nameunit(fb_helper->dev->device); info->fb_depth = fb_helper->fb->bits_per_pixel; info->fb_height = fb_helper->fb->height; info->fb_width = fb_helper->fb->width; info->fb_stride = fb_helper->fb->pitches[0]; info->fb_priv = sc; info->enter = &vt_kms_postswitch; /* set the fb pointer */ for (i = 0; i < fb_helper->crtc_count; i++) { fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb; } if (new_fb) { device_t fbd; int ret; kdev = fb_helper->dev->device; fbd = device_add_child(kdev, "fbd", device_get_unit(kdev)); if (fbd != NULL) ret = device_probe_and_attach(fbd); else ret = ENODEV; #ifdef DEV_VT if (ret != 0) DRM_ERROR("Failed to attach fbd device: %d\n", ret); #endif } return 0; } #if 0 void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, uint32_t depth) { info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_trueCOLOR; info->fix.mmio_start = 0; info->fix.mmio_len = 0; info->fix.type_aux = 0; info->fix.xpanstep = 1; /* doing it in hw */ info->fix.ypanstep = 1; /* doing it in hw */ info->fix.ywrapstep = 0; info->fix.accel = FB_ACCEL_NONE; info->fix.type_aux = 0; info->fix.line_length = pitch; return; } void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, uint32_t fb_width, uint32_t fb_height) { struct drm_framebuffer *fb = fb_helper->fb; info->pseudo_palette = fb_helper->pseudo_palette; info->var.xres_virtual = fb->width; info->var.yres_virtual = fb->height; info->var.bits_per_pixel = fb->bits_per_pixel; info->var.accel_flags = FB_ACCELF_TEXT; info->var.xoffset = 0; info->var.yoffset = 0; info->var.activate = FB_ACTIVATE_NOW; info->var.height = -1; info->var.width = -1; switch (fb->depth) { case 8: info->var.red.offset = 0; info->var.green.offset = 0; info->var.blue.offset = 0; info->var.red.length = 8; /* 8bit DAC */ info->var.green.length = 8; info->var.blue.length = 8; info->var.transp.offset = 0; info->var.transp.length = 0; break; case 15: info->var.red.offset = 10; info->var.green.offset = 5; info->var.blue.offset = 0; info->var.red.length = 5; info->var.green.length = 5; info->var.blue.length = 5; info->var.transp.offset = 15; info->var.transp.length = 1; break; case 16: info->var.red.offset = 11; info->var.green.offset = 5; info->var.blue.offset = 0; info->var.red.length = 5; info->var.green.length = 6; info->var.blue.length = 5; info->var.transp.offset = 0; break; case 24: info->var.red.offset = 16; info->var.green.offset = 8; info->var.blue.offset = 0; info->var.red.length = 8; info->var.green.length = 8; info->var.blue.length = 8; info->var.transp.offset = 0; info->var.transp.length = 0; break; case 32: info->var.red.offset = 16; info->var.green.offset = 8; info->var.blue.offset = 0; info->var.red.length = 8; info->var.green.length = 8; info->var.blue.length = 8; info->var.transp.offset = 24; info->var.transp.length = 8; break; default: break; } info->var.xres = fb_width; info->var.yres = fb_height; } #endif static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper, uint32_t maxX, uint32_t maxY) { struct drm_connector *connector; int count = 0; int i; for (i = 0; i < fb_helper->connector_count; i++) { connector = fb_helper->connector_info[i]->connector; count += connector->funcs->fill_modes(connector, maxX, maxY); } return count; } static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height) { struct drm_display_mode *mode; list_for_each_entry(mode, &fb_connector->connector->modes, head) { if (drm_mode_width(mode) > width || drm_mode_height(mode) > height) continue; if (mode->type & DRM_MODE_TYPE_PREFERRED) return mode; } return NULL; } static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector) { struct drm_cmdline_mode *cmdline_mode; cmdline_mode = &fb_connector->cmdline_mode; return cmdline_mode->specified; } static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, int width, int height) { struct drm_cmdline_mode *cmdline_mode; struct drm_display_mode *mode = NULL; cmdline_mode = &fb_helper_conn->cmdline_mode; if (cmdline_mode->specified == false) return (NULL); /* attempt to find a matching mode in the list of modes * we have gotten so far, if not add a CVT mode that conforms */ if (cmdline_mode->rb || cmdline_mode->margins) goto create_mode; list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) { /* check width/height */ if (mode->hdisplay != cmdline_mode->xres || mode->vdisplay != cmdline_mode->yres) continue; if (cmdline_mode->refresh_specified) { if (mode->vrefresh != cmdline_mode->refresh) continue; } if (cmdline_mode->interlace) { if (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) continue; } return mode; } create_mode: if (cmdline_mode->cvt) mode = drm_cvt_mode(fb_helper_conn->connector->dev, cmdline_mode->xres, cmdline_mode->yres, cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, cmdline_mode->rb, cmdline_mode->interlace, cmdline_mode->margins); else mode = drm_gtf_mode(fb_helper_conn->connector->dev, cmdline_mode->xres, cmdline_mode->yres, cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, cmdline_mode->interlace, cmdline_mode->margins); drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); list_add(&mode->head, &fb_helper_conn->connector->modes); return mode; } static bool drm_connector_enabled(struct drm_connector *connector, bool strict) { bool enable; if (strict) { enable = connector->status == connector_status_connected; } else { enable = connector->status != connector_status_disconnected; } return enable; } static void drm_enable_connectors(struct drm_fb_helper *fb_helper, bool *enabled) { bool any_enabled = false; struct drm_connector *connector; int i = 0; for (i = 0; i < fb_helper->connector_count; i++) { connector = fb_helper->connector_info[i]->connector; enabled[i] = drm_connector_enabled(connector, true); DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id, enabled[i] ? "yes" : "no"); any_enabled |= enabled[i]; } if (any_enabled) return; for (i = 0; i < fb_helper->connector_count; i++) { connector = fb_helper->connector_info[i]->connector; enabled[i] = drm_connector_enabled(connector, false); } } static bool drm_target_cloned(struct drm_fb_helper *fb_helper, struct drm_display_mode **modes, bool *enabled, int width, int height) { int count, i, j; bool can_clone = false; struct drm_fb_helper_connector *fb_helper_conn; struct drm_display_mode *dmt_mode, *mode; /* only contemplate cloning in the single crtc case */ if (fb_helper->crtc_count > 1) return false; count = 0; for (i = 0; i < fb_helper->connector_count; i++) { if (enabled[i]) count++; } /* only contemplate cloning if more than one connector is enabled */ if (count <= 1) return false; /* check the command line or if nothing common pick 1024x768 */ can_clone = true; for (i = 0; i < fb_helper->connector_count; i++) { if (!enabled[i]) continue; fb_helper_conn = fb_helper->connector_info[i]; modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height); if (!modes[i]) { can_clone = false; break; } for (j = 0; j < i; j++) { if (!enabled[j]) continue; if (!drm_mode_equal(modes[j], modes[i])) can_clone = false; } } if (can_clone) { DRM_DEBUG_KMS("can clone using command line\n"); return true; } /* try and find a 1024x768 mode on each connector */ can_clone = true; - dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60); + dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60, false); for (i = 0; i < fb_helper->connector_count; i++) { if (!enabled[i]) continue; fb_helper_conn = fb_helper->connector_info[i]; list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) { if (drm_mode_equal(mode, dmt_mode)) modes[i] = mode; } if (!modes[i]) can_clone = false; } if (can_clone) { DRM_DEBUG_KMS("can clone using 1024x768\n"); return true; } DRM_INFO("kms: can't enable cloning when we probably wanted to.\n"); return false; } static bool drm_target_preferred(struct drm_fb_helper *fb_helper, struct drm_display_mode **modes, bool *enabled, int width, int height) { struct drm_fb_helper_connector *fb_helper_conn; int i; for (i = 0; i < fb_helper->connector_count; i++) { fb_helper_conn = fb_helper->connector_info[i]; if (enabled[i] == false) continue; DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n", fb_helper_conn->connector->base.id); /* got for command line mode first */ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height); if (!modes[i]) { DRM_DEBUG_KMS("looking for preferred mode on connector %d\n", fb_helper_conn->connector->base.id); modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height); } /* No preferred modes, pick one off the list */ if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) { list_for_each_entry(modes[i], &fb_helper_conn->connector->modes, head) break; } DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name : "none"); } return true; } static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, struct drm_fb_helper_crtc **best_crtcs, struct drm_display_mode **modes, int n, int width, int height) { int c, o; struct drm_device *dev = fb_helper->dev; struct drm_connector *connector; struct drm_connector_helper_funcs *connector_funcs; struct drm_encoder *encoder; struct drm_fb_helper_crtc *best_crtc; int my_score, best_score, score; struct drm_fb_helper_crtc **crtcs, *crtc; struct drm_fb_helper_connector *fb_helper_conn; if (n == fb_helper->connector_count) return 0; fb_helper_conn = fb_helper->connector_info[n]; connector = fb_helper_conn->connector; best_crtcs[n] = NULL; best_crtc = NULL; best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height); if (modes[n] == NULL) return best_score; crtcs = malloc(dev->mode_config.num_connector * sizeof(struct drm_fb_helper_crtc *), DRM_MEM_KMS, M_WAITOK | M_ZERO); my_score = 1; if (connector->status == connector_status_connected) my_score++; if (drm_has_cmdline_mode(fb_helper_conn)) my_score++; if (drm_has_preferred_mode(fb_helper_conn, width, height)) my_score++; connector_funcs = connector->helper_private; encoder = connector_funcs->best_encoder(connector); if (!encoder) goto out; /* select a crtc for this connector and then attempt to configure remaining connectors */ for (c = 0; c < fb_helper->crtc_count; c++) { crtc = &fb_helper->crtc_info[c]; if ((encoder->possible_crtcs & (1 << c)) == 0) { continue; } for (o = 0; o < n; o++) if (best_crtcs[o] == crtc) break; if (o < n) { /* ignore cloning unless only a single crtc */ if (fb_helper->crtc_count > 1) continue; if (!drm_mode_equal(modes[o], modes[n])) continue; } crtcs[n] = crtc; memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *)); score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1, width, height); if (score > best_score) { best_crtc = crtc; best_score = score; memcpy(best_crtcs, crtcs, dev->mode_config.num_connector * sizeof(struct drm_fb_helper_crtc *)); } } out: free(crtcs, DRM_MEM_KMS); return best_score; } static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) { struct drm_device *dev = fb_helper->dev; struct drm_fb_helper_crtc **crtcs; struct drm_display_mode **modes; struct drm_encoder *encoder; struct drm_mode_set *modeset; bool *enabled; int width, height; int i, ret; DRM_DEBUG_KMS("\n"); width = dev->mode_config.max_width; height = dev->mode_config.max_height; /* clean out all the encoder/crtc combos */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { encoder->crtc = NULL; } crtcs = malloc(dev->mode_config.num_connector * sizeof(struct drm_fb_helper_crtc *), DRM_MEM_KMS, M_WAITOK | M_ZERO); modes = malloc(dev->mode_config.num_connector * sizeof(struct drm_display_mode *), DRM_MEM_KMS, M_WAITOK | M_ZERO); enabled = malloc(dev->mode_config.num_connector * sizeof(bool), DRM_MEM_KMS, M_WAITOK | M_ZERO); drm_enable_connectors(fb_helper, enabled); ret = drm_target_cloned(fb_helper, modes, enabled, width, height); if (!ret) { ret = drm_target_preferred(fb_helper, modes, enabled, width, height); if (!ret) DRM_ERROR("Unable to find initial modes\n"); } DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height); drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height); /* need to set the modesets up here for use later */ /* fill out the connector<->crtc mappings into the modesets */ for (i = 0; i < fb_helper->crtc_count; i++) { modeset = &fb_helper->crtc_info[i].mode_set; modeset->num_connectors = 0; } for (i = 0; i < fb_helper->connector_count; i++) { struct drm_display_mode *mode = modes[i]; struct drm_fb_helper_crtc *fb_crtc = crtcs[i]; modeset = &fb_crtc->mode_set; if (mode && fb_crtc) { DRM_DEBUG_KMS("desired mode %s set on crtc %d\n", mode->name, fb_crtc->mode_set.crtc->base.id); fb_crtc->desired_mode = mode; if (modeset->mode) drm_mode_destroy(dev, modeset->mode); modeset->mode = drm_mode_duplicate(dev, fb_crtc->desired_mode); modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector; } } free(crtcs, DRM_MEM_KMS); free(modes, DRM_MEM_KMS); free(enabled, DRM_MEM_KMS); } /** * drm_helper_initial_config - setup a sane initial connector configuration * @dev: DRM device * * LOCKING: * Called at init time, must take mode config lock. * * Scan the CRTCs and connectors and try to put together an initial setup. * At the moment, this is a cloned configuration across all heads with * a new framebuffer object as the backing store. * * RETURNS: * Zero if everything went ok, nonzero otherwise. */ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) { struct drm_device *dev = fb_helper->dev; int count = 0; /* disable all the possible outputs/crtcs before entering KMS mode */ drm_helper_disable_unused_functions(fb_helper->dev); drm_fb_helper_parse_command_line(fb_helper); count = drm_fb_helper_probe_connector_modes(fb_helper, dev->mode_config.max_width, dev->mode_config.max_height); /* * we shouldn't end up with no modes here. */ if (count == 0) { printf("No connectors reported connected with modes\n"); } drm_setup_crtcs(fb_helper); return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); } int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) { struct drm_device *dev = fb_helper->dev; int count = 0; u32 max_width, max_height, bpp_sel; bool bound = false, crtcs_bound = false; struct drm_crtc *crtc; if (!fb_helper->fb) return 0; sx_xlock(&dev->mode_config.mutex); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { if (crtc->fb) crtcs_bound = true; if (crtc->fb == fb_helper->fb) bound = true; } if (!bound && crtcs_bound) { fb_helper->delayed_hotplug = true; sx_xunlock(&dev->mode_config.mutex); return 0; } DRM_DEBUG_KMS("\n"); max_width = fb_helper->fb->width; max_height = fb_helper->fb->height; bpp_sel = fb_helper->fb->bits_per_pixel; count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height); drm_setup_crtcs(fb_helper); sx_xunlock(&dev->mode_config.mutex); return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); } Index: head/sys/dev/drm2/drm_ioctl.c =================================================================== --- head/sys/dev/drm2/drm_ioctl.c (revision 277486) +++ head/sys/dev/drm2/drm_ioctl.c (revision 277487) @@ -1,320 +1,324 @@ /*- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Rickard E. (Rik) Faith * Gareth Hughes * */ #include __FBSDID("$FreeBSD$"); /** @file drm_ioctl.c * Varios minor DRM ioctls not applicable to other files, such as versioning * information and reporting DRM information to userland. */ #include #include /* * Beginning in revision 1.1 of the DRM interface, getunique will return * a unique in the form pci:oooo:bb:dd.f (o=domain, b=bus, d=device, f=function) * before setunique has been called. The format for the bus-specific part of * the unique is not defined for any other bus. */ int drm_getunique(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_unique *u = data; if (u->unique_len >= dev->unique_len) { if (DRM_COPY_TO_USER(u->unique, dev->unique, dev->unique_len)) return EFAULT; } u->unique_len = dev->unique_len; return 0; } /* Deprecated in DRM version 1.1, and will return EBUSY when setversion has * requested version 1.1 or greater. */ int drm_setunique(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_unique *u = data; int domain, bus, slot, func, ret; char *busid; /* Check and copy in the submitted Bus ID */ if (!u->unique_len || u->unique_len > 1024) return EINVAL; busid = malloc(u->unique_len + 1, DRM_MEM_DRIVER, M_WAITOK); if (busid == NULL) return ENOMEM; if (DRM_COPY_FROM_USER(busid, u->unique, u->unique_len)) { free(busid, DRM_MEM_DRIVER); return EFAULT; } busid[u->unique_len] = '\0'; /* Return error if the busid submitted doesn't match the device's actual * busid. */ ret = sscanf(busid, "PCI:%d:%d:%d", &bus, &slot, &func); if (ret != 3) { free(busid, DRM_MEM_DRIVER); return EINVAL; } domain = bus >> 8; bus &= 0xff; if ((domain != dev->pci_domain) || (bus != dev->pci_bus) || (slot != dev->pci_slot) || (func != dev->pci_func)) { free(busid, DRM_MEM_DRIVER); return EINVAL; } /* Actually set the device's busid now. */ DRM_LOCK(dev); if (dev->unique_len || dev->unique) { DRM_UNLOCK(dev); return EBUSY; } dev->unique_len = u->unique_len; dev->unique = busid; DRM_UNLOCK(dev); return 0; } static int drm_set_busid(struct drm_device *dev) { DRM_LOCK(dev); if (dev->unique != NULL) { DRM_UNLOCK(dev); return EBUSY; } dev->unique_len = 20; dev->unique = malloc(dev->unique_len + 1, DRM_MEM_DRIVER, M_NOWAIT); if (dev->unique == NULL) { DRM_UNLOCK(dev); return ENOMEM; } snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%1x", dev->pci_domain, dev->pci_bus, dev->pci_slot, dev->pci_func); DRM_UNLOCK(dev); return 0; } int drm_getmap(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_map *map = data; drm_local_map_t *mapinlist; int idx; int i = 0; idx = map->offset; DRM_LOCK(dev); if (idx < 0) { DRM_UNLOCK(dev); return EINVAL; } TAILQ_FOREACH(mapinlist, &dev->maplist, link) { if (i == idx) { map->offset = mapinlist->offset; map->size = mapinlist->size; map->type = mapinlist->type; map->flags = mapinlist->flags; map->handle = mapinlist->handle; map->mtrr = mapinlist->mtrr; break; } i++; } DRM_UNLOCK(dev); if (mapinlist == NULL) return EINVAL; return 0; } int drm_getclient(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_client *client = data; struct drm_file *pt; int idx; int i = 0; idx = client->idx; DRM_LOCK(dev); TAILQ_FOREACH(pt, &dev->files, link) { if (i == idx) { client->auth = pt->authenticated; client->pid = pt->pid; client->uid = pt->uid; client->magic = pt->magic; client->iocs = pt->ioctl_count; DRM_UNLOCK(dev); return 0; } i++; } DRM_UNLOCK(dev); return EINVAL; } int drm_getstats(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_stats *stats = data; int i; memset(stats, 0, sizeof(struct drm_stats)); DRM_LOCK(dev); for (i = 0; i < dev->counters; i++) { if (dev->types[i] == _DRM_STAT_LOCK) stats->data[i].value = (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0); else stats->data[i].value = atomic_read(&dev->counts[i]); stats->data[i].type = dev->types[i]; } stats->count = dev->counters; DRM_UNLOCK(dev); return 0; } int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_get_cap *req = data; req->value = 0; switch (req->capability) { case DRM_CAP_DUMB_BUFFER: if (dev->driver->dumb_create) req->value = 1; break; case DRM_CAP_VBLANK_HIGH_CRTC: req->value = 1; break; case DRM_CAP_DUMB_PREFERRED_DEPTH: req->value = dev->mode_config.preferred_depth; break; case DRM_CAP_DUMB_PREFER_SHADOW: req->value = dev->mode_config.prefer_shadow; break; + case DRM_CAP_PRIME: + req->value |= false /* XXXKIB dev->driver->prime_fd_to_handle */ ? DRM_PRIME_CAP_IMPORT : 0; + req->value |= false /* XXXKIB dev->driver->prime_handle_to_fd */ ? DRM_PRIME_CAP_EXPORT : 0; + break; case DRM_CAP_TIMESTAMP_MONOTONIC: req->value = drm_timestamp_monotonic; break; default: return EINVAL; } return 0; } int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_set_version *sv = data; struct drm_set_version ver; int if_version; /* Save the incoming data, and set the response before continuing * any further. */ ver = *sv; sv->drm_di_major = DRM_IF_MAJOR; sv->drm_di_minor = DRM_IF_MINOR; sv->drm_dd_major = dev->driver->major; sv->drm_dd_minor = dev->driver->minor; DRM_DEBUG("ver.drm_di_major %d ver.drm_di_minor %d " "ver.drm_dd_major %d ver.drm_dd_minor %d\n", ver.drm_di_major, ver.drm_di_minor, ver.drm_dd_major, ver.drm_dd_minor); DRM_DEBUG("sv->drm_di_major %d sv->drm_di_minor %d " "sv->drm_dd_major %d sv->drm_dd_minor %d\n", sv->drm_di_major, sv->drm_di_minor, sv->drm_dd_major, sv->drm_dd_minor); if (ver.drm_di_major != -1) { if (ver.drm_di_major != DRM_IF_MAJOR || ver.drm_di_minor < 0 || ver.drm_di_minor > DRM_IF_MINOR) { return EINVAL; } if_version = DRM_IF_VERSION(ver.drm_di_major, ver.drm_dd_minor); dev->if_version = DRM_MAX(if_version, dev->if_version); if (ver.drm_di_minor >= 1) { /* * Version 1.1 includes tying of DRM to specific device */ drm_set_busid(dev); } } if (ver.drm_dd_major != -1) { if (ver.drm_dd_major != dev->driver->major || ver.drm_dd_minor < 0 || ver.drm_dd_minor > dev->driver->minor) { return EINVAL; } } return 0; } int drm_noop(struct drm_device *dev, void *data, struct drm_file *file_priv) { DRM_DEBUG("\n"); return 0; } Index: head/sys/dev/drm2/drm_irq.c =================================================================== --- head/sys/dev/drm2/drm_irq.c (revision 277486) +++ head/sys/dev/drm2/drm_irq.c (revision 277487) @@ -1,1253 +1,1249 @@ /*- * Copyright 2003 Eric Anholt * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt * */ #include __FBSDID("$FreeBSD$"); /** @file drm_irq.c * Support code for handling setup/teardown of interrupt handlers and * handing interrupt handlers off to the drivers. */ #include #include MALLOC_DEFINE(DRM_MEM_VBLANK, "drm_vblank", "DRM VBLANK Handling Data"); /* Access macro for slots in vblank timestamp ringbuffer. */ #define vblanktimestamp(dev, crtc, count) ( \ (dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \ ((count) % DRM_VBLANKTIME_RBSIZE)]) /* Retry timestamp calculation up to 3 times to satisfy * drm_timestamp_precision before giving up. */ #define DRM_TIMESTAMP_MAXRETRIES 3 /* Threshold in nanoseconds for detection of redundant * vblank irq in drm_handle_vblank(). 1 msec should be ok. */ #define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000 int drm_irq_by_busid(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_irq_busid *irq = data; if ((irq->busnum >> 8) != dev->pci_domain || (irq->busnum & 0xff) != dev->pci_bus || irq->devnum != dev->pci_slot || irq->funcnum != dev->pci_func) return EINVAL; irq->irq = dev->irq; DRM_DEBUG("%d:%d:%d => IRQ %d\n", irq->busnum, irq->devnum, irq->funcnum, irq->irq); return 0; } static void drm_irq_handler_wrap(void *arg) { struct drm_device *dev = arg; mtx_lock(&dev->irq_lock); dev->driver->irq_handler(arg); mtx_unlock(&dev->irq_lock); } int drm_irq_install(struct drm_device *dev) { int retcode; if (dev->irq == 0 || dev->dev_private == NULL) return (EINVAL); DRM_DEBUG("irq=%d\n", dev->irq); DRM_LOCK(dev); if (dev->irq_enabled) { DRM_UNLOCK(dev); return EBUSY; } dev->irq_enabled = 1; dev->context_flag = 0; /* Before installing handler */ if (dev->driver->irq_preinstall) dev->driver->irq_preinstall(dev); DRM_UNLOCK(dev); /* Install handler */ retcode = bus_setup_intr(dev->device, dev->irqr, INTR_TYPE_TTY | INTR_MPSAFE, NULL, (dev->driver->driver_features & DRIVER_LOCKLESS_IRQ) != 0 ? drm_irq_handler_wrap : dev->driver->irq_handler, dev, &dev->irqh); if (retcode != 0) goto err; /* After installing handler */ DRM_LOCK(dev); if (dev->driver->irq_postinstall) dev->driver->irq_postinstall(dev); DRM_UNLOCK(dev); return (0); err: device_printf(dev->device, "Error setting interrupt: %d\n", retcode); dev->irq_enabled = 0; return (retcode); } int drm_irq_uninstall(struct drm_device *dev) { int i; if (!dev->irq_enabled) return EINVAL; dev->irq_enabled = 0; /* * Wake up any waiters so they don't hang. */ if (dev->num_crtcs) { mtx_lock(&dev->vbl_lock); for (i = 0; i < dev->num_crtcs; i++) { wakeup(&dev->_vblank_count[i]); dev->vblank_enabled[i] = 0; dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i); } mtx_unlock(&dev->vbl_lock); } DRM_DEBUG("irq=%d\n", dev->irq); if (dev->driver->irq_uninstall) dev->driver->irq_uninstall(dev); DRM_UNLOCK(dev); bus_teardown_intr(dev->device, dev->irqr, dev->irqh); DRM_LOCK(dev); return 0; } int drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_control *ctl = data; int err; switch (ctl->func) { case DRM_INST_HANDLER: /* Handle drivers whose DRM used to require IRQ setup but the * no longer does. */ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return 0; if (drm_core_check_feature(dev, DRIVER_MODESET)) return 0; if (dev->if_version < DRM_IF_VERSION(1, 2) && ctl->irq != dev->irq) return EINVAL; return drm_irq_install(dev); case DRM_UNINST_HANDLER: if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return 0; if (drm_core_check_feature(dev, DRIVER_MODESET)) return 0; DRM_LOCK(dev); err = drm_irq_uninstall(dev); DRM_UNLOCK(dev); return err; default: return EINVAL; } } #define NSEC_PER_USEC 1000L #define NSEC_PER_SEC 1000000000L int64_t timeval_to_ns(const struct timeval *tv) { return ((int64_t)tv->tv_sec * NSEC_PER_SEC) + tv->tv_usec * NSEC_PER_USEC; } struct timeval ns_to_timeval(const int64_t nsec) { struct timeval tv; long rem; if (nsec == 0) { tv.tv_sec = 0; tv.tv_usec = 0; return (tv); } tv.tv_sec = nsec / NSEC_PER_SEC; rem = nsec % NSEC_PER_SEC; if (rem < 0) { tv.tv_sec--; rem += NSEC_PER_SEC; } tv.tv_usec = rem / 1000; return (tv); } /* * Clear vblank timestamp buffer for a crtc. */ static void clear_vblank_timestamps(struct drm_device *dev, int crtc) { memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0, DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval)); } static int64_t abs64(int64_t x) { return (x < 0 ? -x : x); } /* * Disable vblank irq's on crtc, make sure that last vblank count * of hardware and corresponding consistent software vblank counter * are preserved, even if there are any spurious vblank irq's after * disable. */ static void vblank_disable_and_save(struct drm_device *dev, int crtc) { u32 vblcount; int64_t diff_ns; int vblrc; struct timeval tvblank; /* Prevent vblank irq processing while disabling vblank irqs, * so no updates of timestamps or count can happen after we've * disabled. Needed to prevent races in case of delayed irq's. */ mtx_lock(&dev->vblank_time_lock); dev->driver->disable_vblank(dev, crtc); dev->vblank_enabled[crtc] = 0; /* No further vblank irq's will be processed after * this point. Get current hardware vblank count and * vblank timestamp, repeat until they are consistent. * * FIXME: There is still a race condition here and in * drm_update_vblank_count() which can cause off-by-one * reinitialization of software vblank counter. If gpu * vblank counter doesn't increment exactly at the leading * edge of a vblank interval, then we can lose 1 count if * we happen to execute between start of vblank and the * delayed gpu counter increment. */ do { dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0); } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc)); /* Compute time difference to stored timestamp of last vblank * as updated by last invocation of drm_handle_vblank() in vblank irq. */ vblcount = atomic_read(&dev->_vblank_count[crtc]); diff_ns = timeval_to_ns(&tvblank) - timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount)); /* If there is at least 1 msec difference between the last stored * timestamp and tvblank, then we are currently executing our * disable inside a new vblank interval, the tvblank timestamp * corresponds to this new vblank interval and the irq handler * for this vblank didn't run yet and won't run due to our disable. * Therefore we need to do the job of drm_handle_vblank() and * increment the vblank counter by one to account for this vblank. * * Skip this step if there isn't any high precision timestamp * available. In that case we can't account for this and just * hope for the best. */ if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) { atomic_inc(&dev->_vblank_count[crtc]); } /* Invalidate all timestamps while vblank irq's are off. */ clear_vblank_timestamps(dev, crtc); mtx_unlock(&dev->vblank_time_lock); } static void vblank_disable_fn(void * arg) { struct drm_device *dev = (struct drm_device *)arg; int i; if (!dev->vblank_disable_allowed) return; for (i = 0; i < dev->num_crtcs; i++) { mtx_lock(&dev->vbl_lock); if (atomic_read(&dev->vblank_refcount[i]) == 0 && dev->vblank_enabled[i]) { DRM_DEBUG("disabling vblank on crtc %d\n", i); vblank_disable_and_save(dev, i); } mtx_unlock(&dev->vbl_lock); } } void drm_vblank_cleanup(struct drm_device *dev) { /* Bail if the driver didn't call drm_vblank_init() */ if (dev->num_crtcs == 0) return; callout_stop(&dev->vblank_disable_callout); vblank_disable_fn(dev); free(dev->_vblank_count, DRM_MEM_VBLANK); free(dev->vblank_refcount, DRM_MEM_VBLANK); free(dev->vblank_enabled, DRM_MEM_VBLANK); free(dev->last_vblank, DRM_MEM_VBLANK); free(dev->last_vblank_wait, DRM_MEM_VBLANK); free(dev->vblank_inmodeset, DRM_MEM_VBLANK); free(dev->_vblank_time, DRM_MEM_VBLANK); dev->num_crtcs = 0; } int drm_vblank_init(struct drm_device *dev, int num_crtcs) { int i; callout_init(&dev->vblank_disable_callout, CALLOUT_MPSAFE); #if 0 mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF); #endif mtx_init(&dev->vblank_time_lock, "drmvtl", NULL, MTX_DEF); dev->num_crtcs = num_crtcs; dev->_vblank_count = malloc(sizeof(atomic_t) * num_crtcs, DRM_MEM_VBLANK, M_WAITOK); dev->vblank_refcount = malloc(sizeof(atomic_t) * num_crtcs, DRM_MEM_VBLANK, M_WAITOK); dev->vblank_enabled = malloc(num_crtcs * sizeof(int), DRM_MEM_VBLANK, M_WAITOK | M_ZERO); dev->last_vblank = malloc(num_crtcs * sizeof(u32), DRM_MEM_VBLANK, M_WAITOK | M_ZERO); dev->last_vblank_wait = malloc(num_crtcs * sizeof(u32), DRM_MEM_VBLANK, M_WAITOK | M_ZERO); dev->vblank_inmodeset = malloc(num_crtcs * sizeof(int), DRM_MEM_VBLANK, M_WAITOK | M_ZERO); dev->_vblank_time = malloc(num_crtcs * DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval), DRM_MEM_VBLANK, M_WAITOK | M_ZERO); DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n"); /* Driver specific high-precision vblank timestamping supported? */ if (dev->driver->get_vblank_timestamp) DRM_INFO("Driver supports precise vblank timestamp query.\n"); else DRM_INFO("No driver support for vblank timestamp query.\n"); /* Zero per-crtc vblank stuff */ for (i = 0; i < num_crtcs; i++) { atomic_set(&dev->_vblank_count[i], 0); atomic_set(&dev->vblank_refcount[i], 0); } dev->vblank_disable_allowed = 0; return 0; } void drm_calc_timestamping_constants(struct drm_crtc *crtc) { int64_t linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0; uint64_t dotclock; /* Dot clock in Hz: */ dotclock = (uint64_t) crtc->hwmode.clock * 1000; /* Fields of interlaced scanout modes are only halve a frame duration. * Double the dotclock to get halve the frame-/line-/pixelduration. */ if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE) dotclock *= 2; /* Valid dotclock? */ if (dotclock > 0) { /* Convert scanline length in pixels and video dot clock to * line duration, frame duration and pixel duration in * nanoseconds: */ pixeldur_ns = (int64_t)1000000000 / dotclock; linedur_ns = ((uint64_t)crtc->hwmode.crtc_htotal * 1000000000) / dotclock; framedur_ns = (int64_t)crtc->hwmode.crtc_vtotal * linedur_ns; } else DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n", crtc->base.id); crtc->pixeldur_ns = pixeldur_ns; crtc->linedur_ns = linedur_ns; crtc->framedur_ns = framedur_ns; DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n", crtc->base.id, crtc->hwmode.crtc_htotal, crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay); DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n", crtc->base.id, (int) dotclock/1000, (int) framedur_ns, (int) linedur_ns, (int) pixeldur_ns); } /** * drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms * drivers. Implements calculation of exact vblank timestamps from * given drm_display_mode timings and current video scanout position * of a crtc. This can be called from within get_vblank_timestamp() * implementation of a kms driver to implement the actual timestamping. * * Should return timestamps conforming to the OML_sync_control OpenML * extension specification. The timestamp corresponds to the end of * the vblank interval, aka start of scanout of topmost-leftmost display * pixel in the following video frame. * * Requires support for optional dev->driver->get_scanout_position() * in kms driver, plus a bit of setup code to provide a drm_display_mode * that corresponds to the true scanout timing. * * The current implementation only handles standard video modes. It * returns as no operation if a doublescan or interlaced video mode is * active. Higher level code is expected to handle this. * * @dev: DRM device. * @crtc: Which crtc's vblank timestamp to retrieve. * @max_error: Desired maximum allowable error in timestamps (nanosecs). * On return contains true maximum error of timestamp. * @vblank_time: Pointer to struct timeval which should receive the timestamp. * @flags: Flags to pass to driver: * 0 = Default. * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler. * @refcrtc: drm_crtc* of crtc which defines scanout timing. * * Returns negative value on error, failure or if not supported in current * video mode: * * -EINVAL - Invalid crtc. * -EAGAIN - Temporary unavailable, e.g., called before initial modeset. * -ENOTSUPP - Function not supported in current display mode. * -EIO - Failed, e.g., due to failed scanout position query. * * Returns or'ed positive status flags on success: * * DRM_VBLANKTIME_SCANOUTPOS_METHOD - Signal this method used for timestamping. * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval. * */ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, int *max_error, struct timeval *vblank_time, unsigned flags, struct drm_crtc *refcrtc) { struct timeval stime, raw_time; struct drm_display_mode *mode; int vbl_status, vtotal, vdisplay; int vpos, hpos, i; int64_t framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns; bool invbl; if (crtc < 0 || crtc >= dev->num_crtcs) { DRM_ERROR("Invalid crtc %d\n", crtc); return -EINVAL; } /* Scanout position query not supported? Should not happen. */ if (!dev->driver->get_scanout_position) { DRM_ERROR("Called from driver w/o get_scanout_position()!?\n"); return -EIO; } mode = &refcrtc->hwmode; vtotal = mode->crtc_vtotal; vdisplay = mode->crtc_vdisplay; /* Durations of frames, lines, pixels in nanoseconds. */ framedur_ns = refcrtc->framedur_ns; linedur_ns = refcrtc->linedur_ns; pixeldur_ns = refcrtc->pixeldur_ns; /* If mode timing undefined, just return as no-op: * Happens during initial modesetting of a crtc. */ if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) { DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc); return -EAGAIN; } /* Get current scanout position with system timestamp. * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times * if single query takes longer than max_error nanoseconds. * * This guarantees a tight bound on maximum error if * code gets preempted or delayed for some reason. */ for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) { /* Disable preemption to make it very likely to * succeed in the first iteration. */ critical_enter(); /* Get system timestamp before query. */ getmicrouptime(&stime); /* Get vertical and horizontal scanout pos. vpos, hpos. */ vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos); /* Get system timestamp after query. */ getmicrouptime(&raw_time); critical_exit(); /* Return as no-op if scanout query unsupported or failed. */ if (!(vbl_status & DRM_SCANOUTPOS_VALID)) { DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n", crtc, vbl_status); return -EIO; } duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime); /* Accept result with < max_error nsecs timing uncertainty. */ if (duration_ns <= (int64_t) *max_error) break; } /* Noisy system timing? */ if (i == DRM_TIMESTAMP_MAXRETRIES) { DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n", crtc, (int) duration_ns/1000, *max_error/1000, i); } /* Return upper bound of timestamp precision error. */ *max_error = (int) duration_ns; /* Check if in vblank area: * vpos is >=0 in video scanout area, but negative * within vblank area, counting down the number of lines until * start of scanout. */ invbl = vbl_status & DRM_SCANOUTPOS_INVBL; /* Convert scanout position into elapsed time at raw_time query * since start of scanout at first display scanline. delta_ns * can be negative if start of scanout hasn't happened yet. */ delta_ns = (int64_t)vpos * linedur_ns + (int64_t)hpos * pixeldur_ns; /* Is vpos outside nominal vblank area, but less than * 1/100 of a frame height away from start of vblank? * If so, assume this isn't a massively delayed vblank * interrupt, but a vblank interrupt that fired a few * microseconds before true start of vblank. Compensate * by adding a full frame duration to the final timestamp. * Happens, e.g., on ATI R500, R600. * * We only do this if DRM_CALLED_FROM_VBLIRQ. */ if ((flags & DRM_CALLED_FROM_VBLIRQ) && !invbl && ((vdisplay - vpos) < vtotal / 100)) { delta_ns = delta_ns - framedur_ns; /* Signal this correction as "applied". */ vbl_status |= 0x8; } /* Subtract time delta from raw timestamp to get final * vblank_time timestamp for end of vblank. */ *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns); DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %jd.%jd -> %jd.%jd [e %d us, %d rep]\n", crtc, (int)vbl_status, hpos, vpos, (uintmax_t)raw_time.tv_sec, (uintmax_t)raw_time.tv_usec, (uintmax_t)vblank_time->tv_sec, (uintmax_t)vblank_time->tv_usec, (int)duration_ns/1000, i); vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD; if (invbl) vbl_status |= DRM_VBLANKTIME_INVBL; return vbl_status; } /** * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent * vblank interval. * * @dev: DRM device * @crtc: which crtc's vblank timestamp to retrieve * @tvblank: Pointer to target struct timeval which should receive the timestamp * @flags: Flags to pass to driver: * 0 = Default. * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler. * * Fetches the system timestamp corresponding to the time of the most recent * vblank interval on specified crtc. May call into kms-driver to * compute the timestamp with a high-precision GPU specific method. * * Returns zero if timestamp originates from uncorrected do_gettimeofday() * call, i.e., it isn't very precisely locked to the true vblank. * * Returns non-zero if timestamp is considered to be very precise. */ u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, struct timeval *tvblank, unsigned flags) { - int ret = 0; + int ret; /* Define requested maximum error on timestamps (nanoseconds). */ int max_error = (int) drm_timestamp_precision * 1000; /* Query driver if possible and precision timestamping enabled. */ if (dev->driver->get_vblank_timestamp && (max_error > 0)) { ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error, tvblank, flags); if (ret > 0) return (u32) ret; } /* GPU high precision timestamp query unsupported or failed. * Return gettimeofday timestamp as best estimate. */ microtime(tvblank); return 0; } /** * drm_vblank_count - retrieve "cooked" vblank counter value * @dev: DRM device * @crtc: which counter to retrieve * * Fetches the "cooked" vblank count value that represents the number of * vblank events since the system was booted, including lost events due to * modesetting activity. */ u32 drm_vblank_count(struct drm_device *dev, int crtc) { return atomic_read(&dev->_vblank_count[crtc]); } /** * drm_vblank_count_and_time - retrieve "cooked" vblank counter value * and the system timestamp corresponding to that vblank counter value. * * @dev: DRM device * @crtc: which counter to retrieve * @vblanktime: Pointer to struct timeval to receive the vblank timestamp. * * Fetches the "cooked" vblank count value that represents the number of * vblank events since the system was booted, including lost events due to * modesetting activity. Returns corresponding system timestamp of the time * of the vblank interval that corresponds to the current value vblank counter * value. */ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, struct timeval *vblanktime) { u32 cur_vblank; /* Read timestamp from slot of _vblank_time ringbuffer * that corresponds to current vblank count. Retry if * count has incremented during readout. This works like * a seqlock. */ do { cur_vblank = atomic_read(&dev->_vblank_count[crtc]); *vblanktime = vblanktimestamp(dev, crtc, cur_vblank); rmb(); } while (cur_vblank != atomic_read(&dev->_vblank_count[crtc])); return cur_vblank; } /** * drm_update_vblank_count - update the master vblank counter * @dev: DRM device * @crtc: counter to update * * Call back into the driver to update the appropriate vblank counter * (specified by @crtc). Deal with wraparound, if it occurred, and * update the last read value so we can deal with wraparound on the next * call if necessary. * * Only necessary when going from off->on, to account for frames we * didn't get an interrupt for. * * Note: caller must hold dev->vbl_lock since this reads & writes * device vblank fields. */ static void drm_update_vblank_count(struct drm_device *dev, int crtc) { u32 cur_vblank, diff, tslot, rc; struct timeval t_vblank; /* * Interrupts were disabled prior to this call, so deal with counter * wrap if needed. * NOTE! It's possible we lost a full dev->max_vblank_count events * here if the register is small or we had vblank interrupts off for * a long time. * * We repeat the hardware vblank counter & timestamp query until * we get consistent results. This to prevent races between gpu * updating its hardware counter while we are retrieving the * corresponding vblank timestamp. */ do { cur_vblank = dev->driver->get_vblank_counter(dev, crtc); rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0); } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc)); /* Deal with counter wrap */ diff = cur_vblank - dev->last_vblank[crtc]; if (cur_vblank < dev->last_vblank[crtc]) { diff += dev->max_vblank_count; DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n", crtc, dev->last_vblank[crtc], cur_vblank, diff); } DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n", crtc, diff); /* Reinitialize corresponding vblank timestamp if high-precision query * available. Skip this step if query unsupported or failed. Will * reinitialize delayed at next vblank interrupt in that case. */ if (rc) { tslot = atomic_read(&dev->_vblank_count[crtc]) + diff; vblanktimestamp(dev, crtc, tslot) = t_vblank; } atomic_add(diff, &dev->_vblank_count[crtc]); } /** * drm_vblank_get - get a reference count on vblank events * @dev: DRM device * @crtc: which CRTC to own * * Acquire a reference count on vblank events to avoid having them disabled * while in use. * * RETURNS * Zero on success, nonzero on failure. */ int drm_vblank_get(struct drm_device *dev, int crtc) { int ret = 0; mtx_lock(&dev->vbl_lock); /* Going from 0->1 means we have to enable interrupts again */ if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) { mtx_lock(&dev->vblank_time_lock); if (!dev->vblank_enabled[crtc]) { /* Enable vblank irqs under vblank_time_lock protection. * All vblank count & timestamp updates are held off * until we are done reinitializing master counter and * timestamps. Filtercode in drm_handle_vblank() will * prevent double-accounting of same vblank interval. */ ret = -dev->driver->enable_vblank(dev, crtc); DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret); if (ret) atomic_dec(&dev->vblank_refcount[crtc]); else { dev->vblank_enabled[crtc] = 1; drm_update_vblank_count(dev, crtc); } } mtx_unlock(&dev->vblank_time_lock); } else { if (!dev->vblank_enabled[crtc]) { atomic_dec(&dev->vblank_refcount[crtc]); ret = EINVAL; } } mtx_unlock(&dev->vbl_lock); return ret; } /** * drm_vblank_put - give up ownership of vblank events * @dev: DRM device * @crtc: which counter to give up * * Release ownership of a given vblank counter, turning off interrupts * if possible. Disable interrupts after drm_vblank_offdelay milliseconds. */ void drm_vblank_put(struct drm_device *dev, int crtc) { KASSERT(atomic_read(&dev->vblank_refcount[crtc]) != 0, ("Too many drm_vblank_put for crtc %d", crtc)); /* Last user schedules interrupt disable */ if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) && (drm_vblank_offdelay > 0)) callout_reset(&dev->vblank_disable_callout, (drm_vblank_offdelay * DRM_HZ) / 1000, vblank_disable_fn, dev); } void drm_vblank_off(struct drm_device *dev, int crtc) { struct drm_pending_vblank_event *e, *t; struct timeval now; unsigned int seq; mtx_lock(&dev->vbl_lock); vblank_disable_and_save(dev, crtc); mtx_lock(&dev->event_lock); wakeup(&dev->_vblank_count[crtc]); /* Send any queued vblank events, lest the natives grow disquiet */ seq = drm_vblank_count_and_time(dev, crtc, &now); list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { if (e->pipe != crtc) continue; DRM_DEBUG("Sending premature vblank event on disable: \ wanted %d, current %d\n", e->event.sequence, seq); e->event.sequence = seq; e->event.tv_sec = now.tv_sec; e->event.tv_usec = now.tv_usec; drm_vblank_put(dev, e->pipe); list_move_tail(&e->base.link, &e->base.file_priv->event_list); drm_event_wakeup(&e->base); CTR3(KTR_DRM, "vblank_event_delivered %d %d %d", e->base.pid, e->pipe, e->event.sequence); } mtx_unlock(&dev->event_lock); mtx_unlock(&dev->vbl_lock); } /** * drm_vblank_pre_modeset - account for vblanks across mode sets * @dev: DRM device * @crtc: CRTC in question * @post: post or pre mode set? * * Account for vblank events across mode setting events, which will likely * reset the hardware frame counter. */ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc) { /* vblank is not initialized (IRQ not installed ?) */ if (!dev->num_crtcs) return; /* * To avoid all the problems that might happen if interrupts * were enabled/disabled around or between these calls, we just * have the kernel take a reference on the CRTC (just once though * to avoid corrupting the count if multiple, mismatch calls occur), * so that interrupts remain enabled in the interim. */ if (!dev->vblank_inmodeset[crtc]) { dev->vblank_inmodeset[crtc] = 0x1; if (drm_vblank_get(dev, crtc) == 0) dev->vblank_inmodeset[crtc] |= 0x2; } } void drm_vblank_post_modeset(struct drm_device *dev, int crtc) { if (dev->vblank_inmodeset[crtc]) { mtx_lock(&dev->vbl_lock); dev->vblank_disable_allowed = 1; mtx_unlock(&dev->vbl_lock); if (dev->vblank_inmodeset[crtc] & 0x2) drm_vblank_put(dev, crtc); dev->vblank_inmodeset[crtc] = 0; } } /** * drm_modeset_ctl - handle vblank event counter changes across mode switch * @DRM_IOCTL_ARGS: standard ioctl arguments * * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET * ioctls around modesetting so that any lost vblank events are accounted for. * * Generally the counter will reset across mode sets. If interrupts are * enabled around this call, we don't have to do anything since the counter * will have already been incremented. */ int drm_modeset_ctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_modeset_ctl *modeset = data; - int ret = 0; unsigned int crtc; /* If drm_vblank_init() hasn't been called yet, just no-op */ if (!dev->num_crtcs) - goto out; + return 0; crtc = modeset->crtc; - if (crtc >= dev->num_crtcs) { - ret = -EINVAL; - goto out; - } + if (crtc >= dev->num_crtcs) + return -EINVAL; switch (modeset->cmd) { case _DRM_PRE_MODESET: drm_vblank_pre_modeset(dev, crtc); break; case _DRM_POST_MODESET: drm_vblank_post_modeset(dev, crtc); break; default: - ret = -EINVAL; + return -EINVAL; break; } -out: - return ret; + return 0; } static void drm_vblank_event_destroy(struct drm_pending_event *e) { free(e, DRM_MEM_VBLANK); } static int drm_queue_vblank_event(struct drm_device *dev, int pipe, union drm_wait_vblank *vblwait, struct drm_file *file_priv) { struct drm_pending_vblank_event *e; struct timeval now; unsigned int seq; int ret; e = malloc(sizeof *e, DRM_MEM_VBLANK, M_WAITOK | M_ZERO); e->pipe = pipe; e->base.pid = curproc->p_pid; e->event.base.type = DRM_EVENT_VBLANK; e->event.base.length = sizeof e->event; e->event.user_data = vblwait->request.signal; e->base.event = &e->event.base; e->base.file_priv = file_priv; e->base.destroy = drm_vblank_event_destroy; mtx_lock(&dev->event_lock); if (file_priv->event_space < sizeof e->event) { ret = EBUSY; goto err_unlock; } file_priv->event_space -= sizeof e->event; seq = drm_vblank_count_and_time(dev, pipe, &now); if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) && (seq - vblwait->request.sequence) <= (1 << 23)) { vblwait->request.sequence = seq + 1; vblwait->reply.sequence = vblwait->request.sequence; } DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n", vblwait->request.sequence, seq, pipe); CTR4(KTR_DRM, "vblank_event_queued %d %d rt %x %d", curproc->p_pid, pipe, vblwait->request.type, vblwait->request.sequence); e->event.sequence = vblwait->request.sequence; if ((seq - vblwait->request.sequence) <= (1 << 23)) { e->event.sequence = seq; e->event.tv_sec = now.tv_sec; e->event.tv_usec = now.tv_usec; drm_vblank_put(dev, pipe); list_add_tail(&e->base.link, &e->base.file_priv->event_list); drm_event_wakeup(&e->base); vblwait->reply.sequence = seq; CTR3(KTR_DRM, "vblank_event_wakeup p1 %d %d %d", curproc->p_pid, pipe, vblwait->request.sequence); } else { /* drm_handle_vblank_events will call drm_vblank_put */ list_add_tail(&e->base.link, &dev->vblank_event_list); vblwait->reply.sequence = vblwait->request.sequence; } mtx_unlock(&dev->event_lock); return 0; err_unlock: mtx_unlock(&dev->event_lock); free(e, DRM_MEM_VBLANK); drm_vblank_put(dev, pipe); return ret; } /** * Wait for VBLANK. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param data user argument, pointing to a drm_wait_vblank structure. * \return zero on success or a negative number on failure. * * This function enables the vblank interrupt on the pipe requested, then * sleeps waiting for the requested sequence number to occur, and drops * the vblank interrupt refcount afterwards. (vblank irq disable follows that * after a timeout with no further vblank waits scheduled). */ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv) { union drm_wait_vblank *vblwait = data; - int ret = 0; + int ret; unsigned int flags, seq, crtc, high_crtc; if (/*(!drm_dev_to_irq(dev)) || */(!dev->irq_enabled)) return (EINVAL); if (vblwait->request.type & _DRM_VBLANK_SIGNAL) return (EINVAL); if (vblwait->request.type & ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK | _DRM_VBLANK_HIGH_CRTC_MASK)) { DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n", vblwait->request.type, (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK | _DRM_VBLANK_HIGH_CRTC_MASK)); return (EINVAL); } flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; high_crtc = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK); if (high_crtc) crtc = high_crtc >> _DRM_VBLANK_HIGH_CRTC_SHIFT; else crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0; if (crtc >= dev->num_crtcs) return (EINVAL); ret = drm_vblank_get(dev, crtc); if (ret) { DRM_DEBUG("failed to acquire vblank counter, %d\n", ret); return (ret); } seq = drm_vblank_count(dev, crtc); switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { case _DRM_VBLANK_RELATIVE: vblwait->request.sequence += seq; vblwait->request.type &= ~_DRM_VBLANK_RELATIVE; case _DRM_VBLANK_ABSOLUTE: break; default: ret = (EINVAL); goto done; } if (flags & _DRM_VBLANK_EVENT) { /* must hold on to the vblank ref until the event fires * drm_vblank_put will be called asynchronously */ return drm_queue_vblank_event(dev, crtc, vblwait, file_priv); } if ((flags & _DRM_VBLANK_NEXTONMISS) && (seq - vblwait->request.sequence) <= (1<<23)) { vblwait->request.sequence = seq + 1; } dev->last_vblank_wait[crtc] = vblwait->request.sequence; mtx_lock(&dev->vblank_time_lock); while (((drm_vblank_count(dev, crtc) - vblwait->request.sequence) > (1 << 23)) && dev->irq_enabled) { /* * The wakeups from the drm_irq_uninstall() and * drm_vblank_off() may be lost there since vbl_lock * is not held. Then, the timeout will wake us; the 3 * seconds delay should not be a problem for * application when crtc is disabled or irq * uninstalled anyway. */ ret = msleep(&dev->_vblank_count[crtc], &dev->vblank_time_lock, PCATCH, "drmvbl", 3 * hz); if (ret != 0) break; } mtx_unlock(&dev->vblank_time_lock); if (ret != EINTR) { struct timeval now; long reply_seq; reply_seq = drm_vblank_count_and_time(dev, crtc, &now); CTR5(KTR_DRM, "wait_vblank %d %d rt %x success %d %d", curproc->p_pid, crtc, vblwait->request.type, vblwait->request.sequence, reply_seq); vblwait->reply.sequence = reply_seq; vblwait->reply.tval_sec = now.tv_sec; vblwait->reply.tval_usec = now.tv_usec; } else { CTR5(KTR_DRM, "wait_vblank %d %d rt %x error %d %d", curproc->p_pid, crtc, vblwait->request.type, ret, vblwait->request.sequence); } done: drm_vblank_put(dev, crtc); return ret; } void drm_handle_vblank_events(struct drm_device *dev, int crtc) { struct drm_pending_vblank_event *e, *t; struct timeval now; unsigned int seq; seq = drm_vblank_count_and_time(dev, crtc, &now); CTR2(KTR_DRM, "drm_handle_vblank_events %d %d", seq, crtc); mtx_lock(&dev->event_lock); list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { if (e->pipe != crtc) continue; if ((seq - e->event.sequence) > (1<<23)) continue; e->event.sequence = seq; e->event.tv_sec = now.tv_sec; e->event.tv_usec = now.tv_usec; drm_vblank_put(dev, e->pipe); list_move_tail(&e->base.link, &e->base.file_priv->event_list); drm_event_wakeup(&e->base); CTR3(KTR_DRM, "vblank_event_wakeup p2 %d %d %d", e->base.pid, e->pipe, e->event.sequence); } mtx_unlock(&dev->event_lock); } /** * drm_handle_vblank - handle a vblank event * @dev: DRM device * @crtc: where this event occurred * * Drivers should call this routine in their vblank interrupt handlers to * update the vblank counter and send any signals that may be pending. */ bool drm_handle_vblank(struct drm_device *dev, int crtc) { u32 vblcount; int64_t diff_ns; struct timeval tvblank; if (!dev->num_crtcs) return false; /* Need timestamp lock to prevent concurrent execution with * vblank enable/disable, as this would cause inconsistent * or corrupted timestamps and vblank counts. */ mtx_lock(&dev->vblank_time_lock); /* Vblank irq handling disabled. Nothing to do. */ if (!dev->vblank_enabled[crtc]) { mtx_unlock(&dev->vblank_time_lock); return false; } /* Fetch corresponding timestamp for this vblank interval from * driver and store it in proper slot of timestamp ringbuffer. */ /* Get current timestamp and count. */ vblcount = atomic_read(&dev->_vblank_count[crtc]); drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ); /* Compute time difference to timestamp of last vblank */ diff_ns = timeval_to_ns(&tvblank) - timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount)); /* Update vblank timestamp and count if at least * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds * difference between last stored timestamp and current * timestamp. A smaller difference means basically * identical timestamps. Happens if this vblank has * been already processed and this is a redundant call, * e.g., due to spurious vblank interrupts. We need to * ignore those for accounting. */ if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) { /* Store new timestamp in ringbuffer. */ vblanktimestamp(dev, crtc, vblcount + 1) = tvblank; /* Increment cooked vblank count. This also atomically commits * the timestamp computed above. */ atomic_inc(&dev->_vblank_count[crtc]); } else { DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", crtc, (int) diff_ns); } wakeup(&dev->_vblank_count[crtc]); drm_handle_vblank_events(dev, crtc); mtx_unlock(&dev->vblank_time_lock); return true; } Index: head/sys/dev/drm2/drm_memory.c =================================================================== --- head/sys/dev/drm2/drm_memory.c (revision 277486) +++ head/sys/dev/drm2/drm_memory.c (revision 277487) @@ -1,127 +1,135 @@ /*- *Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * Copyright (c) 2011 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Rickard E. (Rik) Faith * Gareth Hughes * */ #include __FBSDID("$FreeBSD$"); /** @file drm_memory.c * Wrappers for kernel memory allocation routines, and MTRR management support. * * This file previously implemented a memory consumption tracking system using * the "area" argument for various different types of allocations, but that * has been stripped out for now. */ #include MALLOC_DEFINE(DRM_MEM_DMA, "drm_dma", "DRM DMA Data Structures"); MALLOC_DEFINE(DRM_MEM_SAREA, "drm_sarea", "DRM SAREA Data Structures"); MALLOC_DEFINE(DRM_MEM_DRIVER, "drm_driver", "DRM DRIVER Data Structures"); MALLOC_DEFINE(DRM_MEM_MAGIC, "drm_magic", "DRM MAGIC Data Structures"); MALLOC_DEFINE(DRM_MEM_IOCTLS, "drm_ioctls", "DRM IOCTL Data Structures"); MALLOC_DEFINE(DRM_MEM_MAPS, "drm_maps", "DRM MAP Data Structures"); MALLOC_DEFINE(DRM_MEM_BUFS, "drm_bufs", "DRM BUFFER Data Structures"); MALLOC_DEFINE(DRM_MEM_SEGS, "drm_segs", "DRM SEGMENTS Data Structures"); MALLOC_DEFINE(DRM_MEM_PAGES, "drm_pages", "DRM PAGES Data Structures"); MALLOC_DEFINE(DRM_MEM_FILES, "drm_files", "DRM FILE Data Structures"); MALLOC_DEFINE(DRM_MEM_QUEUES, "drm_queues", "DRM QUEUE Data Structures"); MALLOC_DEFINE(DRM_MEM_CMDS, "drm_cmds", "DRM COMMAND Data Structures"); MALLOC_DEFINE(DRM_MEM_MAPPINGS, "drm_mapping", "DRM MAPPING Data Structures"); MALLOC_DEFINE(DRM_MEM_BUFLISTS, "drm_buflists", "DRM BUFLISTS Data Structures"); MALLOC_DEFINE(DRM_MEM_AGPLISTS, "drm_agplists", "DRM AGPLISTS Data Structures"); MALLOC_DEFINE(DRM_MEM_CTXBITMAP, "drm_ctxbitmap", "DRM CTXBITMAP Data Structures"); MALLOC_DEFINE(DRM_MEM_SGLISTS, "drm_sglists", "DRM SGLISTS Data Structures"); MALLOC_DEFINE(DRM_MEM_DRAWABLE, "drm_drawable", "DRM DRAWABLE Data Structures"); MALLOC_DEFINE(DRM_MEM_MM, "drm_sman", "DRM MEMORY MANAGER Data Structures"); MALLOC_DEFINE(DRM_MEM_HASHTAB, "drm_hashtab", "DRM HASHTABLE Data Structures"); MALLOC_DEFINE(DRM_MEM_KMS, "drm_kms", "DRM KMS Data Structures"); void drm_mem_init(void) { } void drm_mem_uninit(void) { } void *drm_ioremap_wc(struct drm_device *dev, drm_local_map_t *map) { return pmap_mapdev_attr(map->offset, map->size, VM_MEMATTR_WRITE_COMBINING); } void *drm_ioremap(struct drm_device *dev, drm_local_map_t *map) { return pmap_mapdev(map->offset, map->size); } void drm_ioremapfree(drm_local_map_t *map) { pmap_unmapdev((vm_offset_t) map->virtual, map->size); } int drm_mtrr_add(unsigned long offset, size_t size, int flags) { int act; struct mem_range_desc mrdesc; mrdesc.mr_base = offset; mrdesc.mr_len = size; mrdesc.mr_flags = flags; act = MEMRANGE_SET_UPDATE; strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner)); return mem_range_attr_set(&mrdesc, &act); } int drm_mtrr_del(int __unused handle, unsigned long offset, size_t size, int flags) { int act; struct mem_range_desc mrdesc; mrdesc.mr_base = offset; mrdesc.mr_len = size; mrdesc.mr_flags = flags; act = MEMRANGE_SET_REMOVE; strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner)); return mem_range_attr_set(&mrdesc, &act); } void drm_clflush_pages(vm_page_t *pages, unsigned long num_pages) { pmap_invalidate_cache_pages(pages, num_pages); } + +void +drm_clflush_virt_range(char *addr, unsigned long length) +{ + + pmap_invalidate_cache_range((vm_offset_t)addr, + (vm_offset_t)addr + length, TRUE); +} Index: head/sys/dev/drm2/drm_mode.h =================================================================== --- head/sys/dev/drm2/drm_mode.h (revision 277486) +++ head/sys/dev/drm2/drm_mode.h (revision 277487) @@ -1,444 +1,460 @@ /* * Copyright (c) 2007 Dave Airlie * Copyright (c) 2007 Jakob Bornecrantz * Copyright (c) 2008 Red Hat Inc. * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA * Copyright (c) 2007-2008 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * $FreeBSD$ */ #ifndef _DRM_MODE_H #define _DRM_MODE_H #define DRM_DISPLAY_INFO_LEN 32 #define DRM_CONNECTOR_NAME_LEN 32 #define DRM_DISPLAY_MODE_LEN 32 #define DRM_PROP_NAME_LEN 32 #define DRM_MODE_TYPE_BUILTIN (1<<0) #define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN) #define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN) #define DRM_MODE_TYPE_PREFERRED (1<<3) #define DRM_MODE_TYPE_DEFAULT (1<<4) #define DRM_MODE_TYPE_USERDEF (1<<5) #define DRM_MODE_TYPE_DRIVER (1<<6) /* Video mode flags */ /* bit compatible with the xorg definitions. */ #define DRM_MODE_FLAG_PHSYNC (1<<0) #define DRM_MODE_FLAG_NHSYNC (1<<1) #define DRM_MODE_FLAG_PVSYNC (1<<2) #define DRM_MODE_FLAG_NVSYNC (1<<3) #define DRM_MODE_FLAG_INTERLACE (1<<4) #define DRM_MODE_FLAG_DBLSCAN (1<<5) #define DRM_MODE_FLAG_CSYNC (1<<6) #define DRM_MODE_FLAG_PCSYNC (1<<7) #define DRM_MODE_FLAG_NCSYNC (1<<8) #define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */ #define DRM_MODE_FLAG_BCAST (1<<10) #define DRM_MODE_FLAG_PIXMUX (1<<11) #define DRM_MODE_FLAG_DBLCLK (1<<12) #define DRM_MODE_FLAG_CLKDIV2 (1<<13) /* DPMS flags */ /* bit compatible with the xorg definitions. */ #define DRM_MODE_DPMS_ON 0 #define DRM_MODE_DPMS_STANDBY 1 #define DRM_MODE_DPMS_SUSPEND 2 #define DRM_MODE_DPMS_OFF 3 /* Scaling mode options */ #define DRM_MODE_SCALE_NONE 0 /* Unmodified timing (display or software can still scale) */ #define DRM_MODE_SCALE_FULLSCREEN 1 /* Full screen, ignore aspect */ #define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */ #define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */ /* Dithering mode options */ #define DRM_MODE_DITHERING_OFF 0 #define DRM_MODE_DITHERING_ON 1 #define DRM_MODE_DITHERING_AUTO 2 /* Dirty info options */ #define DRM_MODE_DIRTY_OFF 0 #define DRM_MODE_DIRTY_ON 1 #define DRM_MODE_DIRTY_ANNOTATE 2 struct drm_mode_modeinfo { uint32_t clock; uint16_t hdisplay, hsync_start, hsync_end, htotal, hskew; uint16_t vdisplay, vsync_start, vsync_end, vtotal, vscan; uint32_t vrefresh; uint32_t flags; uint32_t type; char name[DRM_DISPLAY_MODE_LEN]; }; struct drm_mode_card_res { uint64_t fb_id_ptr; uint64_t crtc_id_ptr; uint64_t connector_id_ptr; uint64_t encoder_id_ptr; uint32_t count_fbs; uint32_t count_crtcs; uint32_t count_connectors; uint32_t count_encoders; uint32_t min_width, max_width; uint32_t min_height, max_height; }; struct drm_mode_crtc { uint64_t set_connectors_ptr; uint32_t count_connectors; uint32_t crtc_id; /**< Id */ uint32_t fb_id; /**< Id of framebuffer */ uint32_t x, y; /**< Position on the frameuffer */ uint32_t gamma_size; uint32_t mode_valid; struct drm_mode_modeinfo mode; }; #define DRM_MODE_PRESENT_TOP_FIELD (1<<0) #define DRM_MODE_PRESENT_BOTTOM_FIELD (1<<1) /* Planes blend with or override other bits on the CRTC */ struct drm_mode_set_plane { uint32_t plane_id; uint32_t crtc_id; uint32_t fb_id; /* fb object contains surface format type */ uint32_t flags; /* see above flags */ /* Signed dest location allows it to be partially off screen */ int32_t crtc_x, crtc_y; uint32_t crtc_w, crtc_h; /* Source values are 16.16 fixed point */ uint32_t src_x, src_y; uint32_t src_h, src_w; }; struct drm_mode_get_plane { uint32_t plane_id; uint32_t crtc_id; uint32_t fb_id; uint32_t possible_crtcs; uint32_t gamma_size; uint32_t count_format_types; uint64_t format_type_ptr; }; struct drm_mode_get_plane_res { uint64_t plane_id_ptr; uint32_t count_planes; }; #define DRM_MODE_ENCODER_NONE 0 #define DRM_MODE_ENCODER_DAC 1 #define DRM_MODE_ENCODER_TMDS 2 #define DRM_MODE_ENCODER_LVDS 3 #define DRM_MODE_ENCODER_TVDAC 4 struct drm_mode_get_encoder { uint32_t encoder_id; uint32_t encoder_type; uint32_t crtc_id; /**< Id of crtc */ uint32_t possible_crtcs; uint32_t possible_clones; }; /* This is for connectors with multiple signal types. */ /* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */ #define DRM_MODE_SUBCONNECTOR_Automatic 0 #define DRM_MODE_SUBCONNECTOR_Unknown 0 #define DRM_MODE_SUBCONNECTOR_DVID 3 #define DRM_MODE_SUBCONNECTOR_DVIA 4 #define DRM_MODE_SUBCONNECTOR_Composite 5 #define DRM_MODE_SUBCONNECTOR_SVIDEO 6 #define DRM_MODE_SUBCONNECTOR_Component 8 #define DRM_MODE_SUBCONNECTOR_SCART 9 #define DRM_MODE_CONNECTOR_Unknown 0 #define DRM_MODE_CONNECTOR_VGA 1 #define DRM_MODE_CONNECTOR_DVII 2 #define DRM_MODE_CONNECTOR_DVID 3 #define DRM_MODE_CONNECTOR_DVIA 4 #define DRM_MODE_CONNECTOR_Composite 5 #define DRM_MODE_CONNECTOR_SVIDEO 6 #define DRM_MODE_CONNECTOR_LVDS 7 #define DRM_MODE_CONNECTOR_Component 8 #define DRM_MODE_CONNECTOR_9PinDIN 9 #define DRM_MODE_CONNECTOR_DisplayPort 10 #define DRM_MODE_CONNECTOR_HDMIA 11 #define DRM_MODE_CONNECTOR_HDMIB 12 #define DRM_MODE_CONNECTOR_TV 13 #define DRM_MODE_CONNECTOR_eDP 14 struct drm_mode_get_connector { uint64_t encoders_ptr; uint64_t modes_ptr; uint64_t props_ptr; uint64_t prop_values_ptr; uint32_t count_modes; uint32_t count_props; uint32_t count_encoders; uint32_t encoder_id; /**< Current Encoder */ uint32_t connector_id; /**< Id */ uint32_t connector_type; uint32_t connector_type_id; uint32_t connection; uint32_t mm_width, mm_height; /**< HxW in millimeters */ uint32_t subpixel; }; #define DRM_MODE_PROP_PENDING (1<<0) #define DRM_MODE_PROP_RANGE (1<<1) #define DRM_MODE_PROP_IMMUTABLE (1<<2) #define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */ #define DRM_MODE_PROP_BLOB (1<<4) +#define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */ struct drm_mode_property_enum { uint64_t value; char name[DRM_PROP_NAME_LEN]; }; struct drm_mode_get_property { uint64_t values_ptr; /* values and blob lengths */ uint64_t enum_blob_ptr; /* enum and blob id ptrs */ uint32_t prop_id; uint32_t flags; char name[DRM_PROP_NAME_LEN]; uint32_t count_values; uint32_t count_enum_blobs; }; struct drm_mode_connector_set_property { uint64_t value; uint32_t prop_id; uint32_t connector_id; +}; + +struct drm_mode_obj_get_properties { + uint64_t props_ptr; + uint64_t prop_values_ptr; + uint32_t count_props; + uint32_t obj_id; + uint32_t obj_type; +}; + +struct drm_mode_obj_set_property { + uint64_t value; + uint32_t prop_id; + uint32_t obj_id; + uint32_t obj_type; }; struct drm_mode_get_blob { uint32_t blob_id; uint32_t length; uint64_t data; }; struct drm_mode_fb_cmd { uint32_t fb_id; uint32_t width, height; uint32_t pitch; uint32_t bpp; uint32_t depth; /* driver specific handle */ uint32_t handle; }; #define DRM_MODE_FB_INTERLACED (1<<0 /* for interlaced framebuffers */ struct drm_mode_fb_cmd2 { uint32_t fb_id; uint32_t width, height; uint32_t pixel_format; /* fourcc code from drm_fourcc.h */ uint32_t flags; /* see above flags */ /* * In case of planar formats, this ioctl allows up to 4 * buffer objects with offets and pitches per plane. * The pitch and offset order is dictated by the fourcc, * e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as: * * YUV 4:2:0 image with a plane of 8 bit Y samples * followed by an interleaved U/V plane containing * 8 bit 2x2 subsampled colour difference samples. * * So it would consist of Y as offset[0] and UV as * offeset[1]. Note that offset[0] will generally * be 0. */ uint32_t handles[4]; uint32_t pitches[4]; /* pitch for each plane */ uint32_t offsets[4]; /* offset of each plane */ }; #define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01 #define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02 #define DRM_MODE_FB_DIRTY_FLAGS 0x03 #define DRM_MODE_FB_DIRTY_MAX_CLIPS 256 /* * Mark a region of a framebuffer as dirty. * * Some hardware does not automatically update display contents * as a hardware or software draw to a framebuffer. This ioctl * allows userspace to tell the kernel and the hardware what * regions of the framebuffer have changed. * * The kernel or hardware is free to update more then just the * region specified by the clip rects. The kernel or hardware * may also delay and/or coalesce several calls to dirty into a * single update. * * Userspace may annotate the updates, the annotates are a * promise made by the caller that the change is either a copy * of pixels or a fill of a single color in the region specified. * * If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then * the number of updated regions are half of num_clips given, * where the clip rects are paired in src and dst. The width and * height of each one of the pairs must match. * * If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller * promises that the region specified of the clip rects is filled * completely with a single color as given in the color argument. */ struct drm_mode_fb_dirty_cmd { uint32_t fb_id; uint32_t flags; uint32_t color; uint32_t num_clips; uint64_t clips_ptr; }; struct drm_mode_mode_cmd { uint32_t connector_id; struct drm_mode_modeinfo mode; }; #define DRM_MODE_CURSOR_BO (1<<0) #define DRM_MODE_CURSOR_MOVE (1<<1) /* * depending on the value in flags diffrent members are used. * * CURSOR_BO uses * crtc * width * height * handle - if 0 turns the cursor of * * CURSOR_MOVE uses * crtc * x * y */ struct drm_mode_cursor { uint32_t flags; uint32_t crtc_id; int32_t x; int32_t y; uint32_t width; uint32_t height; /* driver specific handle */ uint32_t handle; }; struct drm_mode_crtc_lut { uint32_t crtc_id; uint32_t gamma_size; /* pointers to arrays */ uint64_t red; uint64_t green; uint64_t blue; }; #define DRM_MODE_PAGE_FLIP_EVENT 0x01 #define DRM_MODE_PAGE_FLIP_FLAGS DRM_MODE_PAGE_FLIP_EVENT /* * Request a page flip on the specified crtc. * * This ioctl will ask KMS to schedule a page flip for the specified * crtc. Once any pending rendering targeting the specified fb (as of * ioctl time) has completed, the crtc will be reprogrammed to display * that fb after the next vertical refresh. The ioctl returns * immediately, but subsequent rendering to the current fb will block * in the execbuffer ioctl until the page flip happens. If a page * flip is already pending as the ioctl is called, EBUSY will be * returned. * * The ioctl supports one flag, DRM_MODE_PAGE_FLIP_EVENT, which will * request that drm sends back a vblank event (see drm.h: struct * drm_event_vblank) when the page flip is done. The user_data field * passed in with this ioctl will be returned as the user_data field * in the vblank event struct. * * The reserved field must be zero until we figure out something * clever to use it for. */ struct drm_mode_crtc_page_flip { uint32_t crtc_id; uint32_t fb_id; uint32_t flags; uint32_t reserved; uint64_t user_data; }; /* create a dumb scanout buffer */ struct drm_mode_create_dumb { uint32_t height; uint32_t width; uint32_t bpp; uint32_t flags; /* handle, pitch, size will be returned */ uint32_t handle; uint32_t pitch; uint64_t size; }; /* set up for mmap of a dumb scanout buffer */ struct drm_mode_map_dumb { /** Handle for the object being mapped. */ uint32_t handle; uint32_t pad; /** * Fake offset to use for subsequent mmap call * * This is a fixed-size type for 32/64 compatibility. */ uint64_t offset; }; struct drm_mode_destroy_dumb { uint32_t handle; }; #endif Index: head/sys/dev/drm2/drm_pciids.h =================================================================== --- head/sys/dev/drm2/drm_pciids.h (revision 277486) +++ head/sys/dev/drm2/drm_pciids.h (revision 277487) @@ -1,988 +1,995 @@ /* * $FreeBSD$ */ /* * Generated by gen-drm_pciids from: * o previous FreeBSD's drm_pciids.h * o Linux' drm_pciids.h * o the PCI ID repository (http://pciids.sourceforge.net/) * * See tools/tools/drm/gen-drm_pciids. */ #define ffb_PCI_IDS \ {0, 0, 0, NULL} #define gamma_PCI_IDS \ {0x3D3D, 0x0008, 0, "3DLabs GLINT Gamma G1"}, \ {0, 0, 0, NULL} #define i810_PCI_IDS \ {0x8086, 0x1132, 0, "Intel i815 GMCH"}, \ {0x8086, 0x7121, 0, "Intel i810 GMCH"}, \ {0x8086, 0x7123, 0, "Intel i810-DC100 GMCH"}, \ {0x8086, 0x7125, 0, "Intel i810E GMCH"}, \ {0, 0, 0, NULL} #define i830_PCI_IDS \ {0x8086, 0x2562, 0, "Intel i845G GMCH"}, \ {0x8086, 0x2572, 0, "Intel i865G GMCH"}, \ {0x8086, 0x3577, 0, "Intel i830M GMCH"}, \ {0x8086, 0x3582, 0, "Intel i852GM/i855GM GMCH"}, \ {0, 0, 0, NULL} #define i915_PCI_IDS \ {0x8086, 0x0042, CHIP_I9XX|CHIP_I915, "Intel IronLake"}, \ {0x8086, 0x0046, CHIP_I9XX|CHIP_I915, "Intel IronLake"}, \ {0x8086, 0x0102, CHIP_I9XX|CHIP_I915, "Intel SandyBridge"}, \ {0x8086, 0x0106, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \ {0x8086, 0x010A, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \ {0x8086, 0x0112, CHIP_I9XX|CHIP_I915, "Intel SandyBridge"}, \ {0x8086, 0x0116, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \ {0x8086, 0x0122, CHIP_I9XX|CHIP_I915, "Intel SandyBridge"}, \ {0x8086, 0x0126, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \ {0x8086, 0x0152, CHIP_I9XX|CHIP_I915, "Intel IvyBridge"}, \ {0x8086, 0x0156, CHIP_I9XX|CHIP_I915, "Intel IvyBridge (M)"}, \ {0x8086, 0x015A, CHIP_I9XX|CHIP_I915, "Intel IvyBridge (S)"}, \ {0x8086, 0x0162, CHIP_I9XX|CHIP_I915, "Intel IvyBridge"}, \ {0x8086, 0x0166, CHIP_I9XX|CHIP_I915, "Intel IvyBridge (M)"}, \ {0x8086, 0x016A, CHIP_I9XX|CHIP_I915, "Intel IvyBridge (S)"}, \ + {0x8086, 0x0402, CHIP_I9XX|CHIP_I915, "Intel Haswell"}, \ + {0x8086, 0x0412, CHIP_I9XX|CHIP_I915, "Intel Haswell"}, \ + {0x8086, 0x040a, CHIP_I9XX|CHIP_I915, "Intel Haswell (S)"}, \ + {0x8086, 0x041a, CHIP_I9XX|CHIP_I915, "Intel Haswell (S)"}, \ + {0x8086, 0x0406, CHIP_I9XX|CHIP_I915, "Intel Haswell (M)"}, \ + {0x8086, 0x0416, CHIP_I9XX|CHIP_I915, "Intel Haswell (M)"}, \ + {0x8086, 0x0c16, CHIP_I9XX|CHIP_I915, "Intel Haswell (SDV)"}, \ {0x8086, 0x2562, CHIP_I8XX, "Intel i845G GMCH"}, \ {0x8086, 0x2572, CHIP_I8XX, "Intel i865G GMCH"}, \ {0x8086, 0x2582, CHIP_I9XX|CHIP_I915, "Intel i915G"}, \ {0x8086, 0x258A, CHIP_I9XX|CHIP_I915, "Intel E7221 (i915)"}, \ {0x8086, 0x2592, CHIP_I9XX|CHIP_I915, "Intel i915GM"}, \ {0x8086, 0x2772, CHIP_I9XX|CHIP_I915, "Intel i945G"}, \ {0x8086, 0x27A2, CHIP_I9XX|CHIP_I915, "Intel i945GM"}, \ {0x8086, 0x27AE, CHIP_I9XX|CHIP_I915, "Intel i945GME"}, \ {0x8086, 0x2972, CHIP_I9XX|CHIP_I965, "Intel i946GZ"}, \ {0x8086, 0x2982, CHIP_I9XX|CHIP_I965, "Intel i965G"}, \ {0x8086, 0x2992, CHIP_I9XX|CHIP_I965, "Intel i965Q"}, \ {0x8086, 0x29A2, CHIP_I9XX|CHIP_I965, "Intel i965G"}, \ {0x8086, 0x29B2, CHIP_I9XX|CHIP_I915, "Intel Q35"}, \ {0x8086, 0x29C2, CHIP_I9XX|CHIP_I915, "Intel G33"}, \ {0x8086, 0x29D2, CHIP_I9XX|CHIP_I915, "Intel Q33"}, \ {0x8086, 0x2A02, CHIP_I9XX|CHIP_I965, "Intel i965GM"}, \ {0x8086, 0x2A12, CHIP_I9XX|CHIP_I965, "Intel i965GME/GLE"}, \ {0x8086, 0x2A42, CHIP_I9XX|CHIP_I965, "Mobile Intel® GM45 Express Chipset"}, \ {0x8086, 0x2E02, CHIP_I9XX|CHIP_I965, "Intel Eaglelake"}, \ {0x8086, 0x2E12, CHIP_I9XX|CHIP_I965, "Intel Q45/Q43"}, \ {0x8086, 0x2E22, CHIP_I9XX|CHIP_I965, "Intel G45/G43"}, \ {0x8086, 0x2E32, CHIP_I9XX|CHIP_I965, "Intel G41"}, \ {0x8086, 0x2E42, CHIP_I9XX|CHIP_I915, "Intel G43 ?"}, \ {0x8086, 0x2E92, CHIP_I9XX|CHIP_I915, "Intel G43 ?"}, \ {0x8086, 0x3577, CHIP_I8XX, "Intel i830M GMCH"}, \ {0x8086, 0x3582, CHIP_I8XX, "Intel i852GM/i855GM GMCH"}, \ {0x8086, 0x358E, CHIP_I8XX, "Intel i852GM/i855GM GMCH"}, \ {0x8086, 0xA001, CHIP_I9XX|CHIP_I965, "Intel Pineview"}, \ {0x8086, 0xA011, CHIP_I9XX|CHIP_I965, "Intel Pineview (M)"}, \ {0, 0, 0, NULL} #define imagine_PCI_IDS \ {0x105D, 0x2309, IMAGINE_128, "Imagine 128"}, \ {0x105D, 0x2339, IMAGINE_128_2, "Imagine 128-II"}, \ {0x105D, 0x493D, IMAGINE_T2R, "Ticket to Ride"}, \ {0x105D, 0x5348, IMAGINE_REV4, "Revolution IV"}, \ {0, 0, 0, NULL} #define mach64_PCI_IDS \ {0x1002, 0x4742, 0, "3D Rage Pro AGP 1X/2X"}, \ {0x1002, 0x4744, 0, "3D Rage Pro AGP 1X"}, \ {0x1002, 0x4749, 0, "3D Rage Pro"}, \ {0x1002, 0x474C, 0, "Rage XC"}, \ {0x1002, 0x474D, 0, "Rage XL AGP 2X"}, \ {0x1002, 0x474E, 0, "Rage XC AGP"}, \ {0x1002, 0x474F, 0, "Rage XL"}, \ {0x1002, 0x4750, 0, "3D Rage Pro 215GP"}, \ {0x1002, 0x4751, 0, "3D Rage Pro 215GQ"}, \ {0x1002, 0x4752, 0, "Rage XL"}, \ {0x1002, 0x4753, 0, "Rage XC"}, \ {0x1002, 0x4C42, 0, "3D Rage LT Pro AGP-133"}, \ {0x1002, 0x4C44, 0, "3D Rage LT Pro AGP-66"}, \ {0x1002, 0x4C49, 0, "3D Rage LT Pro"}, \ {0x1002, 0x4C4D, 0, "Rage Mobility P/M AGP 2X"}, \ {0x1002, 0x4C4E, 0, "Rage Mobility L AGP 2X"}, \ {0x1002, 0x4C50, 0, "3D Rage LT Pro"}, \ {0x1002, 0x4C51, 0, "3D Rage LT Pro"}, \ {0x1002, 0x4C52, 0, "Rage Mobility P/M"}, \ {0x1002, 0x4C53, 0, "Rage Mobility L"}, \ {0, 0, 0, NULL} #define mga_PCI_IDS \ {0x102B, 0x0520, MGA_CARD_TYPE_G200, "Matrox G200 (PCI)"}, \ {0x102B, 0x0521, MGA_CARD_TYPE_G200, "Matrox G200 (AGP)"}, \ {0x102B, 0x0525, MGA_CARD_TYPE_G400, "Matrox G400/G450 (AGP)"}, \ {0x102B, 0x2527, MGA_CARD_TYPE_G550, "Matrox G550 (AGP)"}, \ {0, 0, 0, NULL} #define nv_PCI_IDS \ {0x10DE, 0x0020, NV04, "NVidia RIVA TNT"}, \ {0x10DE, 0x0028, NV04, "NVidia RIVA TNT2"}, \ {0x10DE, 0x0029, NV04, "NVidia RIVA TNT2 Ultra"}, \ {0x10DE, 0x002A, NV04, "NVidia Unknown TNT2"}, \ {0x10DE, 0x002C, NV04, "NVidia Vanta"}, \ {0x10DE, 0x002D, NV04, "NVidia RIVA TNT2 Model 64"}, \ {0x10DE, 0x0040, NV40, "NVidia GeForce 6800 Ultra"}, \ {0x10DE, 0x0041, NV40, "NVidia GeForce 6800"}, \ {0x10DE, 0x0042, NV40, "NVidia GeForce 6800 LE"}, \ {0x10DE, 0x0043, NV40, "NVidia 0x0043"}, \ {0x10DE, 0x0045, NV40, "NVidia GeForce 6800 GT"}, \ {0x10DE, 0x0046, NV40, "NVidia GeForce 6800 GT"}, \ {0x10DE, 0x0049, NV40, "NVidia 0x0049"}, \ {0x10DE, 0x004E, NV40, "NVidia Quadro FX 4000"}, \ {0x10DE, 0x0090, NV40, "NVidia 0x0090"}, \ {0x10DE, 0x0091, NV40, "NVidia GeForce 7800 GTX"}, \ {0x10DE, 0x0092, NV40, "NVidia 0x0092"}, \ {0x10DE, 0x0093, NV40, "NVidia 0x0093"}, \ {0x10DE, 0x0094, NV40, "NVidia 0x0094"}, \ {0x10DE, 0x0098, NV40, "NVidia 0x0098"}, \ {0x10DE, 0x0099, NV40, "NVidia GeForce Go 7800 GTX"}, \ {0x10DE, 0x009C, NV40, "NVidia 0x009C"}, \ {0x10DE, 0x009D, NV40, "NVidia Quadro FX 4500"}, \ {0x10DE, 0x009E, NV40, "NVidia 0x009E"}, \ {0x10DE, 0x00A0, NV04, "NVidia Aladdin TNT2"}, \ {0x10DE, 0x00C0, NV40, "NVidia 0x00C0"}, \ {0x10DE, 0x00C1, NV40, "NVidia GeForce 6800"}, \ {0x10DE, 0x00C2, NV40, "NVidia GeForce 6800 LE"}, \ {0x10DE, 0x00C8, NV40, "NVidia GeForce Go 6800"}, \ {0x10DE, 0x00C9, NV40, "NVidia GeForce Go 6800 Ultra"}, \ {0x10DE, 0x00CC, NV40, "NVidia Quadro FX Go1400"}, \ {0x10DE, 0x00CD, NV40, "NVidia Quadro FX 3450/4000 SDI"}, \ {0x10DE, 0x00CE, NV40, "NVidia Quadro FX 1400"}, \ {0x10DE, 0x00F0, NV40, "Nvidia GeForce 6600 GT"}, \ {0x10DE, 0x00F1, NV40, "Nvidia GeForce 6600 GT"}, \ {0x10DE, 0x0100, NV10, "NVidia GeForce 256"}, \ {0x10DE, 0x0101, NV10, "NVidia GeForce DDR"}, \ {0x10DE, 0x0103, NV10, "NVidia Quadro"}, \ {0x10DE, 0x0110, NV10, "NVidia GeForce2 MX/MX 400"}, \ {0x10DE, 0x0111, NV10, "NVidia GeForce2 MX 100/200"}, \ {0x10DE, 0x0112, NV10, "NVidia GeForce2 Go"}, \ {0x10DE, 0x0113, NV10, "NVidia Quadro2 MXR/EX/Go"}, \ {0x10DE, 0x0140, NV40, "NVidia GeForce 6600 GT"}, \ {0x10DE, 0x0141, NV40, "NVidia GeForce 6600"}, \ {0x10DE, 0x0142, NV40, "NVidia GeForce 6600 LE"}, \ {0x10DE, 0x0143, NV40, "NVidia 0x0143"}, \ {0x10DE, 0x0144, NV40, "NVidia GeForce Go 6600"}, \ {0x10DE, 0x0145, NV40, "NVidia GeForce 6610 XL"}, \ {0x10DE, 0x0146, NV40, "NVidia GeForce Go 6600 TE/6200 TE"}, \ {0x10DE, 0x0147, NV40, "NVidia GeForce 6700 XL"}, \ {0x10DE, 0x0148, NV40, "NVidia GeForce Go 6600"}, \ {0x10DE, 0x0149, NV40, "NVidia GeForce Go 6600 GT"}, \ {0x10DE, 0x014B, NV40, "NVidia 0x014B"}, \ {0x10DE, 0x014C, NV40, "NVidia 0x014C"}, \ {0x10DE, 0x014D, NV40, "NVidia 0x014D"}, \ {0x10DE, 0x014E, NV40, "NVidia Quadro FX 540"}, \ {0x10DE, 0x014F, NV40, "NVidia GeForce 6200"}, \ {0x10DE, 0x0150, NV10, "NVidia GeForce2 GTS"}, \ {0x10DE, 0x0151, NV10, "NVidia GeForce2 Ti"}, \ {0x10DE, 0x0152, NV10, "NVidia GeForce2 Ultra"}, \ {0x10DE, 0x0153, NV10, "NVidia Quadro2 Pro"}, \ {0x10DE, 0x0160, NV40, "NVidia 0x0160"}, \ {0x10DE, 0x0161, NV40, "NVidia GeForce 6200 TurboCache(TM)"}, \ {0x10DE, 0x0162, NV40, "NVidia GeForce 6200SE TurboCache(TM)"}, \ {0x10DE, 0x0163, NV40, "NVidia 0x0163"}, \ {0x10DE, 0x0164, NV40, "NVidia GeForce Go 6200"}, \ {0x10DE, 0x0165, NV40, "NVidia Quadro NVS 285"}, \ {0x10DE, 0x0166, NV40, "NVidia GeForce Go 6400"}, \ {0x10DE, 0x0167, NV40, "NVidia GeForce Go 6200"}, \ {0x10DE, 0x0168, NV40, "NVidia GeForce Go 6400"}, \ {0x10DE, 0x0169, NV40, "NVidia 0x0169"}, \ {0x10DE, 0x016B, NV40, "NVidia 0x016B"}, \ {0x10DE, 0x016C, NV40, "NVidia 0x016C"}, \ {0x10DE, 0x016D, NV40, "NVidia 0x016D"}, \ {0x10DE, 0x016E, NV40, "NVidia 0x016E"}, \ {0x10DE, 0x0170, NV10, "NVidia GeForce4 MX 460"}, \ {0x10DE, 0x0171, NV10, "NVidia GeForce4 MX 440"}, \ {0x10DE, 0x0172, NV10, "NVidia GeForce4 MX 420"}, \ {0x10DE, 0x0173, NV10, "NVidia GeForce4 MX 440-SE"}, \ {0x10DE, 0x0174, NV10, "NVidia GeForce4 440 Go"}, \ {0x10DE, 0x0175, NV10, "NVidia GeForce4 420 Go"}, \ {0x10DE, 0x0176, NV10, "NVidia GeForce4 420 Go 32M"}, \ {0x10DE, 0x0177, NV10, "NVidia GeForce4 460 Go"}, \ {0x10DE, 0x0178, NV10, "NVidia Quadro4 550 XGL"}, \ {0x10DE, 0x0179, NV10, "NVidia GeForce4"}, \ {0x10DE, 0x017A, NV10, "NVidia Quadro4 NVS"}, \ {0x10DE, 0x017C, NV10, "NVidia Quadro4 500 GoGL"}, \ {0x10DE, 0x017D, NV10, "NVidia GeForce4 410 Go 16M"}, \ {0x10DE, 0x0181, NV10, "NVidia GeForce4 MX 440 with AGP8X"}, \ {0x10DE, 0x0182, NV10, "NVidia GeForce4 MX 440SE with AGP8X"}, \ {0x10DE, 0x0183, NV10, "NVidia GeForce4 MX 420 with AGP8X"}, \ {0x10DE, 0x0185, NV10, "NVidia GeForce4 MX 4000"}, \ {0x10DE, 0x0186, NV10, "NVidia GeForce4 448 Go"}, \ {0x10DE, 0x0187, NV10, "NVidia GeForce4 488 Go"}, \ {0x10DE, 0x0188, NV10, "NVidia Quadro4 580 XGL"}, \ {0x10DE, 0x0189, NV10, "NVidia GeForce4 MX with AGP8X (Mac)"}, \ {0x10DE, 0x018A, NV10, "NVidia Quadro4 280 NVS"}, \ {0x10DE, 0x018B, NV10, "NVidia Quadro4 380 XGL"}, \ {0x10DE, 0x018C, NV10, "NVidia Quadro NVS 50 PCI"}, \ {0x10DE, 0x018D, NV10, "NVidia GeForce4 448 Go"}, \ {0x10DE, 0x01A0, NV10, "NVidia GeForce2 Integrated GPU"}, \ {0x10DE, 0x01F0, NV10, "NVidia GeForce4 MX Integrated GPU"}, \ {0x10DE, 0x0200, NV20, "NVidia GeForce3"}, \ {0x10DE, 0x0201, NV20, "NVidia GeForce3 Ti 200"}, \ {0x10DE, 0x0202, NV20, "NVidia GeForce3 Ti 500"}, \ {0x10DE, 0x0203, NV20, "NVidia Quadro DCC"}, \ {0x10DE, 0x0210, NV40, "NVidia 0x0210"}, \ {0x10DE, 0x0211, NV40, "NVidia GeForce 6800"}, \ {0x10DE, 0x0212, NV40, "NVidia GeForce 6800 LE"}, \ {0x10DE, 0x0215, NV40, "NVidia GeForce 6800 GT"}, \ {0x10DE, 0x0220, NV40, "NVidia 0x0220"}, \ {0x10DE, 0x0221, NV40, "NVidia GeForce 6200"}, \ {0x10DE, 0x0222, NV40, "NVidia 0x0222"}, \ {0x10DE, 0x0228, NV40, "NVidia 0x0228"}, \ {0x10DE, 0x0250, NV20, "NVidia GeForce4 Ti 4600"}, \ {0x10DE, 0x0251, NV20, "NVidia GeForce4 Ti 4400"}, \ {0x10DE, 0x0252, NV20, "NVidia 0x0252"}, \ {0x10DE, 0x0253, NV20, "NVidia GeForce4 Ti 4200"}, \ {0x10DE, 0x0258, NV20, "NVidia Quadro4 900 XGL"}, \ {0x10DE, 0x0259, NV20, "NVidia Quadro4 750 XGL"}, \ {0x10DE, 0x025B, NV20, "NVidia Quadro4 700 XGL"}, \ {0x10DE, 0x0280, NV20, "NVidia GeForce4 Ti 4800"}, \ {0x10DE, 0x0281, NV20, "NVidia GeForce4 Ti 4200 with AGP8X"}, \ {0x10DE, 0x0282, NV20, "NVidia GeForce4 Ti 4800 SE"}, \ {0x10DE, 0x0286, NV20, "NVidia GeForce4 4200 Go"}, \ {0x10DE, 0x0288, NV20, "NVidia Quadro4 980 XGL"}, \ {0x10DE, 0x0289, NV20, "NVidia Quadro4 780 XGL"}, \ {0x10DE, 0x028C, NV20, "NVidia Quadro4 700 GoGL"}, \ {0x10DE, 0x0301, NV30, "NVidia GeForce FX 5800 Ultra"}, \ {0x10DE, 0x0302, NV30, "NVidia GeForce FX 5800"}, \ {0x10DE, 0x0308, NV30, "NVidia Quadro FX 2000"}, \ {0x10DE, 0x0309, NV30, "NVidia Quadro FX 1000"}, \ {0x10DE, 0x0311, NV30, "NVidia GeForce FX 5600 Ultra"}, \ {0x10DE, 0x0312, NV30, "NVidia GeForce FX 5600"}, \ {0x10DE, 0x0313, NV30, "NVidia 0x0313"}, \ {0x10DE, 0x0314, NV30, "NVidia GeForce FX 5600SE"}, \ {0x10DE, 0x0316, NV30, "NVidia 0x0316"}, \ {0x10DE, 0x0317, NV30, "NVidia 0x0317"}, \ {0x10DE, 0x031A, NV30, "NVidia GeForce FX Go5600"}, \ {0x10DE, 0x031B, NV30, "NVidia GeForce FX Go5650"}, \ {0x10DE, 0x031C, NV30, "NVidia Quadro FX Go700"}, \ {0x10DE, 0x031D, NV30, "NVidia 0x031D"}, \ {0x10DE, 0x031E, NV30, "NVidia 0x031E"}, \ {0x10DE, 0x031F, NV30, "NVidia 0x031F"}, \ {0x10DE, 0x0320, NV30, "NVidia GeForce FX 5200"}, \ {0x10DE, 0x0321, NV30, "NVidia GeForce FX 5200 Ultra"}, \ {0x10DE, 0x0322, NV30, "NVidia GeForce FX 5200"}, \ {0x10DE, 0x0323, NV30, "NVidia GeForce FX 5200SE"}, \ {0x10DE, 0x0324, NV30, "NVidia GeForce FX Go5200"}, \ {0x10DE, 0x0325, NV30, "NVidia GeForce FX Go5250"}, \ {0x10DE, 0x0326, NV30, "NVidia GeForce FX 5500"}, \ {0x10DE, 0x0327, NV30, "NVidia GeForce FX 5100"}, \ {0x10DE, 0x0328, NV30, "NVidia GeForce FX Go5200 32M/64M"}, \ {0x10DE, 0x0329, NV30, "NVidia GeForce FX 5200 (Mac)"}, \ {0x10DE, 0x032A, NV30, "NVidia Quadro NVS 280 PCI"}, \ {0x10DE, 0x032B, NV30, "NVidia Quadro FX 500/600 PCI"}, \ {0x10DE, 0x032C, NV30, "NVidia GeForce FX Go53xx Series"}, \ {0x10DE, 0x032D, NV30, "NVidia GeForce FX Go5100"}, \ {0x10DE, 0x032F, NV30, "NVidia 0x032F"}, \ {0x10DE, 0x0330, NV30, "NVidia GeForce FX 5900 Ultra"}, \ {0x10DE, 0x0331, NV30, "NVidia GeForce FX 5900"}, \ {0x10DE, 0x0332, NV30, "NVidia GeForce FX 5900XT"}, \ {0x10DE, 0x0333, NV30, "NVidia GeForce FX 5950 Ultra"}, \ {0x10DE, 0x0334, NV30, "NVidia GeForce FX 5900ZT"}, \ {0x10DE, 0x0338, NV30, "NVidia Quadro FX 3000"}, \ {0x10DE, 0x033F, NV30, "NVidia Quadro FX 700"}, \ {0x10DE, 0x0341, NV30, "NVidia GeForce FX 5700 Ultra"}, \ {0x10DE, 0x0342, NV30, "NVidia GeForce FX 5700"}, \ {0x10DE, 0x0343, NV30, "NVidia GeForce FX 5700LE"}, \ {0x10DE, 0x0344, NV30, "NVidia GeForce FX 5700VE"}, \ {0x10DE, 0x0345, NV30, "NVidia 0x0345"}, \ {0x10DE, 0x0347, NV30, "NVidia GeForce FX Go5700"}, \ {0x10DE, 0x0348, NV30, "NVidia GeForce FX Go5700"}, \ {0x10DE, 0x0349, NV30, "NVidia 0x0349"}, \ {0x10DE, 0x034B, NV30, "NVidia 0x034B"}, \ {0x10DE, 0x034C, NV30, "NVidia Quadro FX Go1000"}, \ {0x10DE, 0x034E, NV30, "NVidia Quadro FX 1100"}, \ {0x10DE, 0x034F, NV30, "NVidia 0x034F"}, \ {0, 0, 0, NULL} #define r128_PCI_IDS \ {0x1002, 0x4C45, 0, "ATI Rage 128 Mobility LE (PCI)"}, \ {0x1002, 0x4C46, 0, "ATI Rage 128 Mobility LF (AGP)"}, \ {0x1002, 0x4D46, 0, "ATI Rage 128 Mobility MF (AGP)"}, \ {0x1002, 0x4D4C, 0, "ATI Rage 128 Mobility ML (AGP)"}, \ {0x1002, 0x5041, 0, "ATI Rage 128 Pro PA (PCI)"}, \ {0x1002, 0x5042, 0, "ATI Rage 128 Pro PB (AGP)"}, \ {0x1002, 0x5043, 0, "ATI Rage 128 Pro PC (AGP)"}, \ {0x1002, 0x5044, 0, "ATI Rage 128 Pro PD (PCI)"}, \ {0x1002, 0x5045, 0, "ATI Rage 128 Pro PE (AGP)"}, \ {0x1002, 0x5046, 0, "ATI Rage 128 Pro PF (AGP)"}, \ {0x1002, 0x5047, 0, "ATI Rage 128 Pro PG (PCI)"}, \ {0x1002, 0x5048, 0, "ATI Rage 128 Pro PH (AGP)"}, \ {0x1002, 0x5049, 0, "ATI Rage 128 Pro PI (AGP)"}, \ {0x1002, 0x504A, 0, "ATI Rage 128 Pro PJ (PCI)"}, \ {0x1002, 0x504B, 0, "ATI Rage 128 Pro PK (AGP)"}, \ {0x1002, 0x504C, 0, "ATI Rage 128 Pro PL (AGP)"}, \ {0x1002, 0x504D, 0, "ATI Rage 128 Pro PM (PCI)"}, \ {0x1002, 0x504E, 0, "ATI Rage 128 Pro PN (AGP)"}, \ {0x1002, 0x504F, 0, "ATI Rage 128 Pro PO (AGP)"}, \ {0x1002, 0x5050, 0, "ATI Rage 128 Pro PP (PCI)"}, \ {0x1002, 0x5051, 0, "ATI Rage 128 Pro PQ (AGP)"}, \ {0x1002, 0x5052, 0, "ATI Rage 128 Pro PR (PCI)"}, \ {0x1002, 0x5053, 0, "ATI Rage 128 Pro PS (PCI)"}, \ {0x1002, 0x5054, 0, "ATI Rage 128 Pro PT (AGP)"}, \ {0x1002, 0x5055, 0, "ATI Rage 128 Pro PU (AGP)"}, \ {0x1002, 0x5056, 0, "ATI Rage 128 Pro PV (PCI)"}, \ {0x1002, 0x5057, 0, "ATI Rage 128 Pro PW (AGP)"}, \ {0x1002, 0x5058, 0, "ATI Rage 128 Pro PX (AGP)"}, \ {0x1002, 0x5245, 0, "ATI Rage 128 RE (PCI)"}, \ {0x1002, 0x5246, 0, "ATI Rage 128 RF (AGP)"}, \ {0x1002, 0x5247, 0, "ATI Rage 128 RG (AGP)"}, \ {0x1002, 0x524B, 0, "ATI Rage 128 RK (PCI)"}, \ {0x1002, 0x524C, 0, "ATI Rage 128 RL (AGP)"}, \ {0x1002, 0x534D, 0, "ATI Rage 128 SM (AGP)"}, \ {0x1002, 0x5446, 0, "ATI Rage 128 Pro Ultra TF (AGP)"}, \ {0x1002, 0x544C, 0, "ATI Rage 128 Pro Ultra TL (AGP)"}, \ {0x1002, 0x5452, 0, "ATI Rage 128 Pro Ultra TR (AGP)"}, \ {0, 0, 0, NULL} #define radeon_PCI_IDS \ {0x1002, 0x3150, CHIP_RV380|RADEON_IS_MOBILITY, "ATI Radeon Mobility X600 M24"}, \ {0x1002, 0x3151, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "M24 [FireMV 2400]"}, \ {0x1002, 0x3152, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Radeon Mobility X300 M24"}, \ {0x1002, 0x3154, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI FireGL M24 GL"}, \ {0x1002, 0x3155, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "RV380 [FireMV 2400]"}, \ {0x1002, 0x3E50, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI Radeon RV380 X600"}, \ {0x1002, 0x3E54, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI FireGL V3200 RV380"}, \ {0x1002, 0x4136, CHIP_RS100|RADEON_IS_IGP, "ATI Radeon RS100 IGP 320"}, \ {0x1002, 0x4137, CHIP_RS200|RADEON_IS_IGP, "ATI Radeon RS200 IGP 340"}, \ {0x1002, 0x4144, CHIP_R300, "ATI Radeon AD 9500"}, \ {0x1002, 0x4145, CHIP_R300, "ATI Radeon AE 9700 Pro"}, \ {0x1002, 0x4146, CHIP_R300, "ATI Radeon AF R300 9600TX"}, \ {0x1002, 0x4147, CHIP_R300, "ATI FireGL AG Z1"}, \ {0x1002, 0x4148, CHIP_R350, "ATI Radeon AH 9800 SE"}, \ {0x1002, 0x4149, CHIP_R350, "ATI Radeon AI 9800"}, \ {0x1002, 0x414A, CHIP_R350, "ATI Radeon AJ 9800"}, \ {0x1002, 0x414B, CHIP_R350, "ATI FireGL AK X2"}, \ {0x1002, 0x4150, CHIP_RV350, "ATI Radeon AP 9600"}, \ {0x1002, 0x4151, CHIP_RV350, "ATI Radeon AQ 9600 SE"}, \ {0x1002, 0x4152, CHIP_RV350, "ATI Radeon AR 9600 XT"}, \ {0x1002, 0x4153, CHIP_RV350, "ATI Radeon AS 9550"}, \ {0x1002, 0x4154, CHIP_RV350, "ATI FireGL AT T2"}, \ {0x1002, 0x4155, CHIP_RV350, "ATI Radeon 9650"}, \ {0x1002, 0x4156, CHIP_RV350, "ATI FireGL AV RV360 T2"}, \ {0x1002, 0x4237, CHIP_RS200|RADEON_IS_IGP, "ATI Radeon RS250 IGP"}, \ {0x1002, 0x4242, CHIP_R200, "ATI Radeon BB R200 AIW 8500DV"}, \ {0x1002, 0x4243, CHIP_R200, "ATI Radeon BC R200"}, \ {0x1002, 0x4336, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY, "ATI Radeon RS100 Mobility U1"}, \ {0x1002, 0x4337, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY, "ATI Radeon RS200 Mobility IGP 340M"}, \ {0x1002, 0x4437, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY, "ATI Radeon RS250 Mobility IGP"}, \ {0x1002, 0x4966, CHIP_RV250, "ATI Radeon If RV250 9000"}, \ {0x1002, 0x4967, CHIP_RV250, "ATI Radeon Ig RV250 9000"}, \ {0x1002, 0x4A48, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JH R420 X800"}, \ {0x1002, 0x4A49, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JI R420 X800 Pro"}, \ {0x1002, 0x4A4A, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JJ R420 X800 SE"}, \ {0x1002, 0x4A4B, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JK R420 X800 XT"}, \ {0x1002, 0x4A4C, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JL R420 X800"}, \ {0x1002, 0x4A4D, CHIP_R420|RADEON_NEW_MEMMAP, "ATI FireGL JM X3-256"}, \ {0x1002, 0x4A4E, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Radeon JN R420 Mobility M18"}, \ {0x1002, 0x4A4F, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JO R420 X800 SE"}, \ {0x1002, 0x4A50, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JP R420 X800 XT PE"}, \ {0x1002, 0x4A54, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JT R420 AIW X800 VE"}, \ {0x1002, 0x4B48, CHIP_R420|RADEON_NEW_MEMMAP, "R481 [Radeon X850 PCIe]"}, \ {0x1002, 0x4B49, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R481 X850 XT"}, \ {0x1002, 0x4B4A, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R481 X850 SE"}, \ {0x1002, 0x4B4B, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R481 X850 Pro"}, \ {0x1002, 0x4B4C, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R481 X850 XT PE"}, \ {0x1002, 0x4C57, CHIP_RV200|RADEON_IS_MOBILITY, "ATI Radeon LW RV200 Mobility 7500 M7"}, \ {0x1002, 0x4C58, CHIP_RV200|RADEON_IS_MOBILITY, "ATI Radeon LX RV200 Mobility FireGL 7800 M7"}, \ {0x1002, 0x4C59, CHIP_RV100|RADEON_IS_MOBILITY, "ATI Radeon LY RV100 Mobility M6"}, \ {0x1002, 0x4C5A, CHIP_RV100|RADEON_IS_MOBILITY, "ATI Radeon LZ RV100 Mobility M6"}, \ {0x1002, 0x4C64, CHIP_RV250|RADEON_IS_MOBILITY, "ATI Radeon Ld RV250 Mobility 9000 M9"}, \ {0x1002, 0x4C66, CHIP_RV250, "ATI Radeon Lf RV250 Mobility 9000 M9 / FireMV 2400 PCI"}, \ {0x1002, 0x4C67, CHIP_RV250|RADEON_IS_MOBILITY, "ATI Radeon Lg RV250 Mobility 9000 M9"}, \ {0x1002, 0x4C6E, CHIP_RV280|RADEON_IS_MOBILITY, "Radeon RV250 Ln [Radeon Mobility 9000 M9] (Secondary)"}, \ {0x1002, 0x4E44, CHIP_R300, "ATI Radeon ND R300 9700 Pro"}, \ {0x1002, 0x4E45, CHIP_R300, "ATI Radeon NE R300 9500 Pro / 9700"}, \ {0x1002, 0x4E46, CHIP_R300, "ATI Radeon NF R300 9600TX"}, \ {0x1002, 0x4E47, CHIP_R300, "ATI Radeon NG R300 FireGL X1"}, \ {0x1002, 0x4E48, CHIP_R350, "ATI Radeon NH R350 9800 Pro"}, \ {0x1002, 0x4E49, CHIP_R350, "ATI Radeon NI R350 9800"}, \ {0x1002, 0x4E4A, CHIP_R350, "ATI Radeon NJ R360 9800 XT"}, \ {0x1002, 0x4E4B, CHIP_R350, "ATI FireGL NK X2"}, \ {0x1002, 0x4E50, CHIP_RV350|RADEON_IS_MOBILITY, "ATI Radeon RV350 Mobility 9600 M10 NP"}, \ {0x1002, 0x4E51, CHIP_RV350|RADEON_IS_MOBILITY, "ATI Radeon RV350 Mobility 9600 M10 NQ"}, \ {0x1002, 0x4E52, CHIP_RV350|RADEON_IS_MOBILITY, "ATI Radeon RV350 Mobility 9600 M11 NR"}, \ {0x1002, 0x4E53, CHIP_RV350|RADEON_IS_MOBILITY, "ATI Radeon RV350 Mobility 9600 M10 NS"}, \ {0x1002, 0x4E54, CHIP_RV350|RADEON_IS_MOBILITY, "ATI FireGL T2/T2e"}, \ {0x1002, 0x4E56, CHIP_RV350|RADEON_IS_MOBILITY, "ATI Radeon Mobility 9550"}, \ {0x1002, 0x5144, CHIP_R100|RADEON_SINGLE_CRTC, "ATI Radeon QD R100"}, \ {0x1002, 0x5145, CHIP_R100|RADEON_SINGLE_CRTC, "ATI Radeon QE R100"}, \ {0x1002, 0x5146, CHIP_R100|RADEON_SINGLE_CRTC, "ATI Radeon QF R100"}, \ {0x1002, 0x5147, CHIP_R100|RADEON_SINGLE_CRTC, "ATI Radeon QG R100"}, \ {0x1002, 0x5148, CHIP_R200, "ATI Radeon QH R200 8500"}, \ {0x1002, 0x514C, CHIP_R200, "ATI Radeon QL R200 8500 LE"}, \ {0x1002, 0x514D, CHIP_R200, "ATI Radeon QM R200 9100"}, \ {0x1002, 0x5157, CHIP_RV200, "ATI Radeon QW RV200 7500"}, \ {0x1002, 0x5158, CHIP_RV200, "ATI Radeon QX RV200 7500"}, \ {0x1002, 0x5159, CHIP_RV100, "ATI Radeon QY RV100 7000/VE"}, \ {0x1002, 0x515A, CHIP_RV100, "ATI Radeon QZ RV100 7000/VE"}, \ {0x1002, 0x515E, CHIP_RV100, "ATI ES1000 RN50"}, \ {0x1002, 0x5460, CHIP_RV380|RADEON_IS_MOBILITY, "ATI Radeon Mobility X300 M22"}, \ {0x1002, 0x5462, CHIP_RV380|RADEON_IS_MOBILITY, "ATI Radeon Mobility X600 SE M24C"}, \ {0x1002, 0x5464, CHIP_RV380|RADEON_IS_MOBILITY, "ATI FireGL M22 GL 5464"}, \ {0x1002, 0x5548, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R423 X800"}, \ {0x1002, 0x5549, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R423 X800 Pro"}, \ {0x1002, 0x554A, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R423 X800 XT PE"}, \ {0x1002, 0x554B, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R423 X800 SE"}, \ {0x1002, 0x554C, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R430 X800 XTP"}, \ {0x1002, 0x554D, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R430 X800 XL"}, \ {0x1002, 0x554E, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R430 X800 SE"}, \ {0x1002, 0x554F, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R430 X800"}, \ {0x1002, 0x5550, CHIP_R423|RADEON_NEW_MEMMAP, "ATI FireGL V7100 R423"}, \ {0x1002, 0x5551, CHIP_R423|RADEON_NEW_MEMMAP, "ATI FireGL V5100 R423 UQ"}, \ {0x1002, 0x5552, CHIP_R423|RADEON_NEW_MEMMAP, "ATI FireGL unknown R423 UR"}, \ {0x1002, 0x5554, CHIP_R423|RADEON_NEW_MEMMAP, "ATI FireGL unknown R423 UT"}, \ {0x1002, 0x564A, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V5000 M26"}, \ {0x1002, 0x564B, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V5000 M26"}, \ {0x1002, 0x564F, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Radeon Mobility X700 XL M26"}, \ {0x1002, 0x5652, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Radeon Mobility X700 M26"}, \ {0x1002, 0x5653, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Radeon Mobility X700 M26"}, \ {0x1002, 0x5657, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon X550XTX"}, \ {0x1002, 0x5834, CHIP_RS300|RADEON_IS_IGP, "ATI Radeon RS300 9100 IGP"}, \ {0x1002, 0x5835, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY, "ATI Radeon RS300 Mobility IGP"}, \ {0x1002, 0x5954, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_IGPGART, "ATI RS480 XPRESS 200G"}, \ {0x1002, 0x5955, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART, "ATI Radeon XPRESS 200M 5955"}, \ {0x1002, 0x5960, CHIP_RV280, "ATI Radeon RV280 9250"}, \ {0x1002, 0x5961, CHIP_RV280, "ATI Radeon RV280 9200"}, \ {0x1002, 0x5962, CHIP_RV280, "ATI Radeon RV280 9200"}, \ {0x1002, 0x5964, CHIP_RV280, "ATI Radeon RV280 9200 SE"}, \ {0x1002, 0x5965, CHIP_RV280, "ATI FireMV 2200 PCI"}, \ {0x1002, 0x5969, CHIP_RV100, "ATI ES1000 RN50"}, \ {0x1002, 0x5974, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART, "ATI Radeon RS482 XPRESS 200"}, \ {0x1002, 0x5975, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART, "ATI Radeon RS485 XPRESS 1100 IGP"}, \ {0x1002, 0x5A41, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART, "ATI Radeon XPRESS 200 5A41 (PCIE)"}, \ {0x1002, 0x5A42, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART, "ATI Radeon XPRESS 200M 5A42 (PCIE)"}, \ {0x1002, 0x5A61, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART, "ATI Radeon RC410 XPRESS 200"}, \ {0x1002, 0x5A62, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART, "ATI Radeon RC410 XPRESS 200M"}, \ {0x1002, 0x5B60, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI Radeon RV370 X300 SE"}, \ {0x1002, 0x5B62, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI Radeon RV370 X600 Pro"}, \ {0x1002, 0x5B63, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI Radeon RV370 X550"}, \ {0x1002, 0x5B64, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI FireGL V3100 (RV370) 5B64"}, \ {0x1002, 0x5B65, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI FireMV 2200 PCIE (RV370) 5B65"}, \ {0x1002, 0x5C61, CHIP_RV280|RADEON_IS_MOBILITY, "ATI Radeon RV280 Mobility"}, \ {0x1002, 0x5C63, CHIP_RV280|RADEON_IS_MOBILITY, "ATI Radeon RV280 Mobility"}, \ {0x1002, 0x5D48, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X800 XT M28"}, \ {0x1002, 0x5D49, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V5100 M28"}, \ {0x1002, 0x5D4A, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X800 M28"}, \ {0x1002, 0x5D4C, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R480 X850"}, \ {0x1002, 0x5D4D, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R480 X850 XT PE"}, \ {0x1002, 0x5D4E, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R480 X850 SE"}, \ {0x1002, 0x5D4F, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R480 X850 Pro"}, \ {0x1002, 0x5D50, CHIP_R423|RADEON_NEW_MEMMAP, "ATI unknown Radeon / FireGL R480"}, \ {0x1002, 0x5D52, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R480 X850 XT"}, \ {0x1002, 0x5D57, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R423 X800 XT"}, \ {0x1002, 0x5E48, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI FireGL V5000 RV410"}, \ {0x1002, 0x5E4A, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon RV410 X700 XT"}, \ {0x1002, 0x5E4B, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon RV410 X700 Pro"}, \ {0x1002, 0x5E4C, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon RV410 X700 SE"}, \ {0x1002, 0x5E4D, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon RV410 X700"}, \ {0x1002, 0x5E4F, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon RV410 X700 SE"}, \ {0x1002, 0x6700, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Cayman GL XT [ATI FirePro V (FireGL V) Graphics Adapter]"}, \ {0x1002, 0x6701, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Cayman GL XT [ATI FirePro V (FireGL V) Graphics Adapter]"}, \ {0x1002, 0x6702, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Cayman GL XT [ATI FirePro V (FireGL V) Graphics Adapter]"}, \ {0x1002, 0x6703, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Cayman GL XT [ATI FirePro V (FireGL V) Graphics Adapter]"}, \ {0x1002, 0x6704, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Cayman PRO GL [FirePro V7900]"}, \ {0x1002, 0x6705, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Cayman GL PRO [ATI FirePro V (FireGL V) Graphics Adapter]"}, \ {0x1002, 0x6706, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Cayman GL [ATI FirePro V (FireGL V) Graphics Adapter]"}, \ {0x1002, 0x6707, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Cayman LE GL [FirePro V5900]"}, \ {0x1002, 0x6708, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Cayman GL [ATI FirePro V (FireGL V) Graphics Adapter]"}, \ {0x1002, 0x6709, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Cayman GL [ATI FirePro V (FireGL V) Graphics Adapter]"}, \ {0x1002, 0x6718, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Cayman XT [Radeon HD 6970]"}, \ {0x1002, 0x6719, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Cayman PRO [Radeon HD 6950]"}, \ {0x1002, 0x671C, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Antilles [Radeon HD 6990]"}, \ {0x1002, 0x671D, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Antilles [AMD Radeon HD 6990]"}, \ {0x1002, 0x671F, CHIP_CAYMAN|RADEON_NEW_MEMMAP, "Cayman [Radeon HD 6900 Series]"}, \ {0x1002, 0x6720, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Blackcomb [Radeon HD 6900M series]"}, \ {0x1002, 0x6721, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Blackcomb [Mobility Radeon HD 6000 series]"}, \ {0x1002, 0x6722, CHIP_BARTS|RADEON_NEW_MEMMAP, "Barts GL [ATI FirePro V (FireGL V) Graphics Adapter]"}, \ {0x1002, 0x6723, CHIP_BARTS|RADEON_NEW_MEMMAP, "Barts GL [ATI FirePro V (FireGL V) Graphics Adapter]"}, \ {0x1002, 0x6724, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Blackcomb [Mobility Radeon HD 6000 series]"}, \ {0x1002, 0x6725, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Blackcomb [Mobility Radeon HD 6000 series]"}, \ {0x1002, 0x6726, CHIP_BARTS|RADEON_NEW_MEMMAP, "Barts GL [ATI FirePro V (FireGL V) Graphics Adapter]"}, \ {0x1002, 0x6727, CHIP_BARTS|RADEON_NEW_MEMMAP, "Barts GL [ATI FirePro V (FireGL V) Graphics Adapter]"}, \ {0x1002, 0x6728, CHIP_BARTS|RADEON_NEW_MEMMAP, "Barts GL [ATI FirePro V (FireGL V) Graphics Adapter]"}, \ {0x1002, 0x6729, CHIP_BARTS|RADEON_NEW_MEMMAP, "Barts GL [ATI FirePro V (FireGL V) Graphics Adapter]"}, \ {0x1002, 0x6738, CHIP_BARTS|RADEON_NEW_MEMMAP, "Barts XT [Radeon HD 6800 Series]"}, \ {0x1002, 0x6739, CHIP_BARTS|RADEON_NEW_MEMMAP, "Barts PRO [Radeon HD 6800 Series]"}, \ {0x1002, 0x673E, CHIP_BARTS|RADEON_NEW_MEMMAP, "Barts LE [AMD Radeon HD 6700 Series]"}, \ {0x1002, 0x6740, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Whistler XT [AMD Radeon HD 6700M Series]"}, \ {0x1002, 0x6741, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Whistler [AMD Radeon HD 6600M Series]"}, \ {0x1002, 0x6742, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Whistler LE [AMD Radeon HD 6625M Graphics]"}, \ {0x1002, 0x6743, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Whistler [Radeon E6760]"}, \ {0x1002, 0x6744, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Whistler [ATI Mobility Radeon HD 6000 series]"}, \ {0x1002, 0x6745, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Whistler"}, \ {0x1002, 0x6746, CHIP_TURKS|RADEON_NEW_MEMMAP, "Turks GL [ATI FirePro V (FireGL V) Graphics Adapter]"}, \ {0x1002, 0x6747, CHIP_TURKS|RADEON_NEW_MEMMAP, "Turks GL [ATI FirePro V (FireGL V) Graphics Adapter]"}, \ {0x1002, 0x6748, CHIP_TURKS|RADEON_NEW_MEMMAP, "Turks GL [ATI FirePro V (FireGL V) Graphics Adapter]"}, \ {0x1002, 0x6749, CHIP_TURKS|RADEON_NEW_MEMMAP, "Turks [FirePro V4900]"}, \ {0x1002, 0x674A, CHIP_TURKS|RADEON_NEW_MEMMAP, "Turks [AMD FirePro V3900]"}, \ {0x1002, 0x6750, CHIP_TURKS|RADEON_NEW_MEMMAP, "Turks [AMD Radeon HD 6570]"}, \ {0x1002, 0x6751, CHIP_TURKS|RADEON_NEW_MEMMAP, "Turks [Radeon HD 7600A Series]"}, \ {0x1002, 0x6758, CHIP_TURKS|RADEON_NEW_MEMMAP, "Turks [Radeon HD 6670]"}, \ {0x1002, 0x6759, CHIP_TURKS|RADEON_NEW_MEMMAP, "Turks [Radeon HD 6570]"}, \ {0x1002, 0x675B, CHIP_TURKS|RADEON_NEW_MEMMAP, "Unknown device name"}, \ {0x1002, 0x675D, CHIP_TURKS|RADEON_NEW_MEMMAP, "Turks [Radeon HD 7500 Series]"}, \ {0x1002, 0x675F, CHIP_TURKS|RADEON_NEW_MEMMAP, "Turks LE [Radeon HD 5500/7510 Series]"}, \ {0x1002, 0x6760, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Caicos [Radeon HD 6400M/7400M Series]"}, \ {0x1002, 0x6761, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Seymour LP [Radeon HD 6430M]"}, \ {0x1002, 0x6762, CHIP_CAICOS|RADEON_NEW_MEMMAP, "Caicos GL [ATI FirePro V (FireGL V) Graphics Adapter]"}, \ {0x1002, 0x6763, CHIP_CAICOS|RADEON_NEW_MEMMAP, "Seymour [Radeon E6460]"}, \ {0x1002, 0x6764, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Seymour [Mobility Radeon HD 6000 series]"}, \ {0x1002, 0x6765, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Seymour [Mobility Radeon HD 6000 series]"}, \ {0x1002, 0x6766, CHIP_CAICOS|RADEON_NEW_MEMMAP, "Caicos"}, \ {0x1002, 0x6767, CHIP_CAICOS|RADEON_NEW_MEMMAP, "Caicos"}, \ {0x1002, 0x6768, CHIP_CAICOS|RADEON_NEW_MEMMAP, "Caicos"}, \ {0x1002, 0x6770, CHIP_CAICOS|RADEON_NEW_MEMMAP, "Caicos [Radeon HD 6400 Series]"}, \ {0x1002, 0x6771, CHIP_CAICOS|RADEON_NEW_MEMMAP, "Caicos"}, \ {0x1002, 0x6772, CHIP_CAICOS|RADEON_NEW_MEMMAP, "Caicos [Radeon HD 7400A Series]"}, \ {0x1002, 0x6778, CHIP_CAICOS|RADEON_NEW_MEMMAP, "Caicos [Radeon HD 7000 Series]"}, \ {0x1002, 0x6779, CHIP_CAICOS|RADEON_NEW_MEMMAP, "Caicos [Radeon HD 6450]"}, \ {0x1002, 0x677B, CHIP_CAICOS|RADEON_NEW_MEMMAP, "Caicos [Radeon HD 7400 Series]"}, \ {0x1002, 0x6780, CHIP_TAHITI|RADEON_NEW_MEMMAP, "Tahiti [ATI FirePro V (FireGL V) Graphics Adapter]"}, \ {0x1002, 0x6784, CHIP_TAHITI|RADEON_NEW_MEMMAP, "Tahiti [ATI FirePro V (FireGL V) Graphics Adapter]"}, \ {0x1002, 0x6788, CHIP_TAHITI|RADEON_NEW_MEMMAP, "Tahiti [ATI FirePro V (FireGL V) Graphics Adapter]"}, \ {0x1002, 0x678A, CHIP_TAHITI|RADEON_NEW_MEMMAP, "Tahiti [ATI FirePro V (FireGL V) Graphics Adapter]"}, \ {0x1002, 0x6790, CHIP_TAHITI|RADEON_NEW_MEMMAP, "Tahiti"}, \ {0x1002, 0x6791, CHIP_TAHITI|RADEON_NEW_MEMMAP, "Tahiti"}, \ {0x1002, 0x6792, CHIP_TAHITI|RADEON_NEW_MEMMAP, "Tahiti"}, \ {0x1002, 0x6798, CHIP_TAHITI|RADEON_NEW_MEMMAP, "Tahiti XT [Radeon HD 7970]"}, \ {0x1002, 0x6799, CHIP_TAHITI|RADEON_NEW_MEMMAP, "New Zealand [Radeon HD 7990]"}, \ {0x1002, 0x679A, CHIP_TAHITI|RADEON_NEW_MEMMAP, "Tahiti PRO [Radeon HD 7950]"}, \ {0x1002, 0x679B, CHIP_TAHITI|RADEON_NEW_MEMMAP, "Tahiti [Radeon HD 7900 Series]"}, \ {0x1002, 0x679E, CHIP_TAHITI|RADEON_NEW_MEMMAP, "Tahiti LE [Radeon HD 7800 Series]"}, \ {0x1002, 0x679F, CHIP_TAHITI|RADEON_NEW_MEMMAP, "Tahiti"}, \ {0x1002, 0x6800, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Wimbledon XT [Radeon HD 7970M]"}, \ {0x1002, 0x6801, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Wimbledon"}, \ {0x1002, 0x6802, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Wimbledon"}, \ {0x1002, 0x6806, CHIP_PITCAIRN|RADEON_NEW_MEMMAP, "Pitcairn"}, \ {0x1002, 0x6808, CHIP_PITCAIRN|RADEON_NEW_MEMMAP, "Pitcairn [ATI FirePro V(FireGL V) Graphics Adapter]"}, \ {0x1002, 0x6809, CHIP_PITCAIRN|RADEON_NEW_MEMMAP, "Pitcairn [ATI FirePro V(FireGL V) Graphics Adapter]"}, \ {0x1002, 0x6810, CHIP_PITCAIRN|RADEON_NEW_MEMMAP, "Pitcairn"}, \ {0x1002, 0x6811, CHIP_PITCAIRN|RADEON_NEW_MEMMAP, "Pitcairn"}, \ {0x1002, 0x6816, CHIP_PITCAIRN|RADEON_NEW_MEMMAP, "Pitcairn"}, \ {0x1002, 0x6817, CHIP_PITCAIRN|RADEON_NEW_MEMMAP, "Pitcairn"}, \ {0x1002, 0x6818, CHIP_PITCAIRN|RADEON_NEW_MEMMAP, "Pitcairn [Radeon HD 7800]"}, \ {0x1002, 0x6819, CHIP_PITCAIRN|RADEON_NEW_MEMMAP, "Pitcairn PRO [Radeon HD 7800]"}, \ {0x1002, 0x6820, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Radeon HD 8800M Series"}, \ {0x1002, 0x6821, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Radeon HD 8800M Series"}, \ {0x1002, 0x6823, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Radeon HD 8800M Series"}, \ {0x1002, 0x6824, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Chelsea [Radeon HD 7700M Series]"}, \ {0x1002, 0x6825, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Cape Verde [Radeon HD 7800M Series]"}, \ {0x1002, 0x6826, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Chelsea [Radeon HD 7700M Series]"}, \ {0x1002, 0x6827, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Cape Verde [Radeon HD 7800M Series]"}, \ {0x1002, 0x6828, CHIP_VERDE|RADEON_NEW_MEMMAP, "Cape Verde"}, \ {0x1002, 0x6829, CHIP_VERDE|RADEON_NEW_MEMMAP, "Cape Verde"}, \ {0x1002, 0x682B, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Radeon HD 8800M Series"}, \ {0x1002, 0x682D, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Unknown device name"}, \ {0x1002, 0x682F, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Cape Verde [Radeon HD 7700M Series]"}, \ {0x1002, 0x6830, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Cape Verde [Radeon HD 7800M Series]"}, \ {0x1002, 0x6831, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Cape Verde [AMD Radeon HD 7700M Series]"}, \ {0x1002, 0x6837, CHIP_VERDE|RADEON_NEW_MEMMAP, "Cape Verde LE [Radeon HD 7700 Series]"}, \ {0x1002, 0x6838, CHIP_VERDE|RADEON_NEW_MEMMAP, "Cape Verde"}, \ {0x1002, 0x6839, CHIP_VERDE|RADEON_NEW_MEMMAP, "Cape Verde"}, \ {0x1002, 0x683B, CHIP_VERDE|RADEON_NEW_MEMMAP, "Cape Verde [Radeon HD 7700 Series]"}, \ {0x1002, 0x683D, CHIP_VERDE|RADEON_NEW_MEMMAP, "Cape Verde [Radeon HD 7700 Series]"}, \ {0x1002, 0x683F, CHIP_VERDE|RADEON_NEW_MEMMAP, "Cape Verde PRO [Radeon HD 7700 Series]"}, \ {0x1002, 0x6840, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Thames XT/GL [Radeon HD 7600M Series]"}, \ {0x1002, 0x6841, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Thames [Radeon 7500M/7600M Series]"}, \ {0x1002, 0x6842, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Thames LE [Radeon HD 7000M Series]"}, \ {0x1002, 0x6843, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Thames [Radeon HD 7670M]"}, \ {0x1002, 0x6849, CHIP_TURKS|RADEON_NEW_MEMMAP, "Lombok [AMD Radeon HD 7400 Series]"}, \ {0x1002, 0x684C, CHIP_PITCAIRN|RADEON_NEW_MEMMAP, "Pitcairn [ATI FirePro V(FireGL V) Graphics Adapter]"}, \ {0x1002, 0x6850, CHIP_TURKS|RADEON_NEW_MEMMAP, "Lombok GL AIO [Radeon HD 7570]"}, \ {0x1002, 0x6858, CHIP_TURKS|RADEON_NEW_MEMMAP, "Lombok [Radeon HD 7400 series]"}, \ {0x1002, 0x6859, CHIP_TURKS|RADEON_NEW_MEMMAP, "Unknown device name"}, \ {0x1002, 0x6880, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Cypress"}, \ {0x1002, 0x6888, CHIP_CYPRESS|RADEON_NEW_MEMMAP, "Cypress [FirePro 3D V8800]"}, \ {0x1002, 0x6889, CHIP_CYPRESS|RADEON_NEW_MEMMAP, "Cypress [FirePro V7800]"}, \ {0x1002, 0x688A, CHIP_CYPRESS|RADEON_NEW_MEMMAP, "Cypress XT [FirePro 3D V9800]"}, \ {0x1002, 0x688C, CHIP_CYPRESS|RADEON_NEW_MEMMAP, "Cypress [AMD FireStream 9370]"}, \ {0x1002, 0x688D, CHIP_CYPRESS|RADEON_NEW_MEMMAP, "Cypress [AMD FireStream 9350]"}, \ {0x1002, 0x6898, CHIP_CYPRESS|RADEON_NEW_MEMMAP, "Cypress XT [Radeon HD 5870]"}, \ {0x1002, 0x6899, CHIP_CYPRESS|RADEON_NEW_MEMMAP, "Cypress PRO [Radeon HD 5800 Series]"}, \ {0x1002, 0x689B, CHIP_CYPRESS|RADEON_NEW_MEMMAP, "Cypress [Radeon HD 6800 Series]"}, \ {0x1002, 0x689C, CHIP_HEMLOCK|RADEON_NEW_MEMMAP, "Hemlock [Radeon HD 5900 Series]"}, \ {0x1002, 0x689D, CHIP_HEMLOCK|RADEON_NEW_MEMMAP, "Hemlock [ATI Radeon HD 5900 Series]"}, \ {0x1002, 0x689E, CHIP_CYPRESS|RADEON_NEW_MEMMAP, "Cypress LE [Radeon HD 5800 Series]"}, \ {0x1002, 0x68A0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 5870"}, \ {0x1002, 0x68A1, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Broadway PRO [Mobility Radeon HD 5800 Series]"}, \ {0x1002, 0x68A8, CHIP_JUNIPER|RADEON_NEW_MEMMAP, "Broadway [ATI Mobility Radeon HD 6800 Series]"}, \ {0x1002, 0x68A9, CHIP_JUNIPER|RADEON_NEW_MEMMAP, "Juniper XT [FirePro 3D V5800]"}, \ {0x1002, 0x68B0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Unknown device name"}, \ {0x1002, 0x68B8, CHIP_JUNIPER|RADEON_NEW_MEMMAP, "Juniper [Radeon HD 5700 Series]"}, \ {0x1002, 0x68B9, CHIP_JUNIPER|RADEON_NEW_MEMMAP, "Juniper [Radeon HD 5600/5700]"}, \ {0x1002, 0x68BA, CHIP_JUNIPER|RADEON_NEW_MEMMAP, "Juniper XT [AMD Radeon HD 6000 Series]"}, \ {0x1002, 0x68BE, CHIP_JUNIPER|RADEON_NEW_MEMMAP, "Juniper [Radeon HD 5700 Series]"}, \ {0x1002, 0x68BF, CHIP_JUNIPER|RADEON_NEW_MEMMAP, "Juniper LE [Radeon HD 6700 Series]"}, \ {0x1002, 0x68C0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Madison [Mobility Radeon HD 5000 Series]"}, \ {0x1002, 0x68C1, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Madison [Radeon HD 5000M Series]"}, \ {0x1002, 0x68C7, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Pinewood [Radeon HD 5570]"}, \ {0x1002, 0x68C8, CHIP_REDWOOD|RADEON_NEW_MEMMAP, "FirePro V4800"}, \ {0x1002, 0x68C9, CHIP_REDWOOD|RADEON_NEW_MEMMAP, "Redwood [FirePro 3800 (FireGL)]"}, \ {0x1002, 0x68D8, CHIP_REDWOOD|RADEON_NEW_MEMMAP, "Redwood [Radeon HD 5670]"}, \ {0x1002, 0x68D9, CHIP_REDWOOD|RADEON_NEW_MEMMAP, "Redwood PRO [Radeon HD 5500 Series]"}, \ {0x1002, 0x68DA, CHIP_REDWOOD|RADEON_NEW_MEMMAP, "Redwood PRO [Radeon HD 5500 Series]"}, \ {0x1002, 0x68DE, CHIP_REDWOOD|RADEON_NEW_MEMMAP, "Redwood"}, \ {0x1002, 0x68E0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Manhattan [Mobility Radeon HD 5400 Series]"}, \ {0x1002, 0x68E1, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Manhattan [Mobility Radeon HD 5430 Series]"}, \ {0x1002, 0x68E4, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Robson CE [AMD Radeon HD 6300 Series]"}, \ {0x1002, 0x68E5, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Robson LE [AMD Radeon HD 6300M Series]"}, \ {0x1002, 0x68E8, CHIP_CEDAR|RADEON_NEW_MEMMAP, "Cedar"}, \ {0x1002, 0x68E9, CHIP_CEDAR|RADEON_NEW_MEMMAP, "Cedar [ATI FirePro (FireGL) Graphics Adapter]"}, \ {0x1002, 0x68F1, CHIP_CEDAR|RADEON_NEW_MEMMAP, "Cedar [FirePro 2460]"}, \ {0x1002, 0x68F2, CHIP_CEDAR|RADEON_NEW_MEMMAP, "Cedar [FirePro 2270]"}, \ {0x1002, 0x68F8, CHIP_CEDAR|RADEON_NEW_MEMMAP, "Cedar [Radeon HD 7300 Series]"}, \ {0x1002, 0x68F9, CHIP_CEDAR|RADEON_NEW_MEMMAP, "Cedar PRO [Radeon HD 5450/6350]"}, \ {0x1002, 0x68FA, CHIP_CEDAR|RADEON_NEW_MEMMAP, "EG Cedar [Radeon HD 7300 Series]"}, \ {0x1002, 0x68FE, CHIP_CEDAR|RADEON_NEW_MEMMAP, "Cedar LE"}, \ {0x1002, 0x7100, CHIP_R520|RADEON_NEW_MEMMAP, "ATI Radeon X1800"}, \ {0x1002, 0x7101, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1800 XT"}, \ {0x1002, 0x7102, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1800"}, \ {0x1002, 0x7103, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V7200"}, \ {0x1002, 0x7104, CHIP_R520|RADEON_NEW_MEMMAP, "ATI FireGL V7200"}, \ {0x1002, 0x7105, CHIP_R520|RADEON_NEW_MEMMAP, "ATI FireGL V5300"}, \ {0x1002, 0x7106, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V7100"}, \ {0x1002, 0x7108, CHIP_R520|RADEON_NEW_MEMMAP, "ATI Radeon X1800"}, \ {0x1002, 0x7109, CHIP_R520|RADEON_NEW_MEMMAP, "ATI Radeon X1800"}, \ {0x1002, 0x710A, CHIP_R520|RADEON_NEW_MEMMAP, "ATI Radeon X1800"}, \ {0x1002, 0x710B, CHIP_R520|RADEON_NEW_MEMMAP, "ATI Radeon X1800"}, \ {0x1002, 0x710C, CHIP_R520|RADEON_NEW_MEMMAP, "ATI Radeon X1800"}, \ {0x1002, 0x710E, CHIP_R520|RADEON_NEW_MEMMAP, "ATI FireGL V7300"}, \ {0x1002, 0x710F, CHIP_R520|RADEON_NEW_MEMMAP, "ATI FireGL V7350"}, \ {0x1002, 0x7140, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1600"}, \ {0x1002, 0x7141, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI RV505"}, \ {0x1002, 0x7142, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300/X1550"}, \ {0x1002, 0x7143, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1550"}, \ {0x1002, 0x7144, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI M54-GL"}, \ {0x1002, 0x7145, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1400"}, \ {0x1002, 0x7146, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300/X1550"}, \ {0x1002, 0x7147, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1550 64-bit"}, \ {0x1002, 0x7149, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1300"}, \ {0x1002, 0x714A, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1300"}, \ {0x1002, 0x714B, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1300"}, \ {0x1002, 0x714C, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1300"}, \ {0x1002, 0x714D, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300"}, \ {0x1002, 0x714E, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300"}, \ {0x1002, 0x714F, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI RV505"}, \ {0x1002, 0x7151, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI RV505"}, \ {0x1002, 0x7152, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI FireGL V3300"}, \ {0x1002, 0x7153, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI FireGL V3350"}, \ {0x1002, 0x715E, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300"}, \ {0x1002, 0x715F, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1550 64-bit"}, \ {0x1002, 0x7180, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300/X1550"}, \ {0x1002, 0x7181, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1600"}, \ {0x1002, 0x7183, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300/X1550"}, \ {0x1002, 0x7186, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1450"}, \ {0x1002, 0x7187, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300/X1550"}, \ {0x1002, 0x7188, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X2300"}, \ {0x1002, 0x718A, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X2300"}, \ {0x1002, 0x718B, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1350"}, \ {0x1002, 0x718C, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1350"}, \ {0x1002, 0x718D, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1450"}, \ {0x1002, 0x718F, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300"}, \ {0x1002, 0x7193, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1550"}, \ {0x1002, 0x7196, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1350"}, \ {0x1002, 0x719B, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI FireMV 2250"}, \ {0x1002, 0x719F, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1550 64-bit"}, \ {0x1002, 0x71C0, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI Radeon X1600"}, \ {0x1002, 0x71C1, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI Radeon X1650"}, \ {0x1002, 0x71C2, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI Radeon X1600"}, \ {0x1002, 0x71C3, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI Radeon X1600"}, \ {0x1002, 0x71C4, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V5200"}, \ {0x1002, 0x71C5, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1600"}, \ {0x1002, 0x71C6, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI Radeon X1650"}, \ {0x1002, 0x71C7, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI Radeon X1650"}, \ {0x1002, 0x71CD, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI Radeon X1600"}, \ {0x1002, 0x71CE, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI Radeon X1300 XT/X1600 Pro"}, \ {0x1002, 0x71D2, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI FireGL V3400"}, \ {0x1002, 0x71D4, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V5250"}, \ {0x1002, 0x71D5, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1700"}, \ {0x1002, 0x71D6, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1700 XT"}, \ {0x1002, 0x71DA, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI FireGL V5200"}, \ {0x1002, 0x71DE, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1700"}, \ {0x1002, 0x7200, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X2300HD"}, \ {0x1002, 0x7210, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 2300"}, \ {0x1002, 0x7211, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 2300"}, \ {0x1002, 0x7240, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1950"}, \ {0x1002, 0x7243, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \ {0x1002, 0x7244, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1950"}, \ {0x1002, 0x7245, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \ {0x1002, 0x7246, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \ {0x1002, 0x7247, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \ {0x1002, 0x7248, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \ {0x1002, 0x7249, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \ {0x1002, 0x724A, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \ {0x1002, 0x724B, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \ {0x1002, 0x724C, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \ {0x1002, 0x724D, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \ {0x1002, 0x724E, CHIP_R580|RADEON_NEW_MEMMAP, "ATI AMD Stream Processor"}, \ {0x1002, 0x724F, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \ {0x1002, 0x7280, CHIP_RV570|RADEON_NEW_MEMMAP, "ATI Radeon X1950"}, \ {0x1002, 0x7281, CHIP_RV560|RADEON_NEW_MEMMAP, "ATI RV560"}, \ {0x1002, 0x7283, CHIP_RV560|RADEON_NEW_MEMMAP, "ATI RV560"}, \ {0x1002, 0x7284, CHIP_R580|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1900"}, \ {0x1002, 0x7287, CHIP_RV560|RADEON_NEW_MEMMAP, "ATI RV560"}, \ {0x1002, 0x7288, CHIP_RV570|RADEON_NEW_MEMMAP, "ATI Radeon X1950 GT"}, \ {0x1002, 0x7289, CHIP_RV570|RADEON_NEW_MEMMAP, "ATI RV570"}, \ {0x1002, 0x728B, CHIP_RV570|RADEON_NEW_MEMMAP, "ATI RV570"}, \ {0x1002, 0x728C, CHIP_RV570|RADEON_NEW_MEMMAP, "ATI ATI FireGL V7400"}, \ {0x1002, 0x7290, CHIP_RV560|RADEON_NEW_MEMMAP, "ATI RV560"}, \ {0x1002, 0x7291, CHIP_RV560|RADEON_NEW_MEMMAP, "ATI Radeon X1650"}, \ {0x1002, 0x7293, CHIP_RV560|RADEON_NEW_MEMMAP, "ATI Radeon X1650"}, \ {0x1002, 0x7297, CHIP_RV560|RADEON_NEW_MEMMAP, "ATI RV560"}, \ {0x1002, 0x7834, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP, "ATI Radeon RS350 9000/9100 IGP"}, \ {0x1002, 0x7835, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Radeon RS350 Mobility IGP"}, \ {0x1002, 0x791E, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART, "ATI Radeon RS690 X1250 IGP"}, \ {0x1002, 0x791F, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART, "ATI Radeon RS690 X1270 IGP"}, \ {0x1002, 0x793F, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP, "ATI Radeon X1200"}, \ {0x1002, 0x7941, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP, "ATI Radeon X1200"}, \ {0x1002, 0x7942, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP, "ATI Radeon X1200"}, \ {0x1002, 0x796C, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART, "ATI Radeon RS740 HD2100 IGP"}, \ {0x1002, 0x796D, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART, "ATI Radeon RS740 HD2100 IGP"}, \ {0x1002, 0x796E, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART, "ATI Radeon RS740 HD2100 IGP"}, \ {0x1002, 0x796F, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART, "ATI Radeon RS740 HD2100 IGP"}, \ {0x1002, 0x9400, CHIP_R600|RADEON_NEW_MEMMAP, "ATI Radeon HD 2900 XT"}, \ {0x1002, 0x9401, CHIP_R600|RADEON_NEW_MEMMAP, "ATI Radeon HD 2900 XT"}, \ {0x1002, 0x9402, CHIP_R600|RADEON_NEW_MEMMAP, "ATI Radeon HD 2900 XT"}, \ {0x1002, 0x9403, CHIP_R600|RADEON_NEW_MEMMAP, "ATI Radeon HD 2900 Pro"}, \ {0x1002, 0x9405, CHIP_R600|RADEON_NEW_MEMMAP, "ATI Radeon HD 2900 GT"}, \ {0x1002, 0x940A, CHIP_R600|RADEON_NEW_MEMMAP, "ATI FireGL V8650"}, \ {0x1002, 0x940B, CHIP_R600|RADEON_NEW_MEMMAP, "ATI FireGL V8600"}, \ {0x1002, 0x940F, CHIP_R600|RADEON_NEW_MEMMAP, "ATI FireGL V7600"}, \ {0x1002, 0x9440, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4800 Series"}, \ {0x1002, 0x9441, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4870 X2"}, \ {0x1002, 0x9442, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4800 Series"}, \ {0x1002, 0x9443, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4850 X2"}, \ {0x1002, 0x9444, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI FirePro V8750 (FireGL)"}, \ {0x1002, 0x9446, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI FirePro V7760 (FireGL)"}, \ {0x1002, 0x944A, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 4850"}, \ {0x1002, 0x944B, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 4850 X2"}, \ {0x1002, 0x944C, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4800 Series"}, \ {0x1002, 0x944E, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI FirePro RV770"}, \ {0x1002, 0x9450, CHIP_RV770|RADEON_NEW_MEMMAP, "AMD FireStream 9270"}, \ {0x1002, 0x9452, CHIP_RV770|RADEON_NEW_MEMMAP, "AMD FireStream 9250"}, \ {0x1002, 0x9456, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI FirePro V8700 (FireGL)"}, \ {0x1002, 0x945A, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 4870"}, \ {0x1002, 0x945B, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon M98"}, \ {0x1002, 0x945E, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "RV770"}, \ {0x1002, 0x9460, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4800 Series"}, \ {0x1002, 0x9462, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4800 Series"}, \ {0x1002, 0x946A, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI FirePro M7750"}, \ {0x1002, 0x946B, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI M98"}, \ {0x1002, 0x947A, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI M98"}, \ {0x1002, 0x947B, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI M98"}, \ {0x1002, 0x9480, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 4650"}, \ {0x1002, 0x9487, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI Radeon RV730 (AGP)"}, \ {0x1002, 0x9488, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 4670"}, \ {0x1002, 0x9489, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI FirePro M5750"}, \ {0x1002, 0x948A, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "RV730"}, \ {0x1002, 0x948F, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI Radeon RV730 (AGP)"}, \ {0x1002, 0x9490, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI Radeon HD 4670"}, \ {0x1002, 0x9491, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI RADEON E4600"}, \ {0x1002, 0x9495, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI Radeon HD 4600 Series"}, \ {0x1002, 0x9498, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI Radeon HD 4650"}, \ {0x1002, 0x949C, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI FirePro V7750 (FireGL)"}, \ {0x1002, 0x949E, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI FirePro V5700 (FireGL)"}, \ {0x1002, 0x949F, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI FirePro V3750 (FireGL)"}, \ {0x1002, 0x94A0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 4830"}, \ {0x1002, 0x94A1, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 4850"}, \ {0x1002, 0x94A3, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI FirePro M7740"}, \ {0x1002, 0x94B1, CHIP_RV740|RADEON_NEW_MEMMAP, "ATI RV740"}, \ {0x1002, 0x94B3, CHIP_RV740|RADEON_NEW_MEMMAP, "ATI Radeon HD 4770"}, \ {0x1002, 0x94B4, CHIP_RV740|RADEON_NEW_MEMMAP, "ATI Radeon HD 4700 Series"}, \ {0x1002, 0x94B5, CHIP_RV740|RADEON_NEW_MEMMAP, "ATI Radeon HD 4770"}, \ {0x1002, 0x94B9, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI FirePro M5750"}, \ {0x1002, 0x94C0, CHIP_RV610|RADEON_NEW_MEMMAP, "RV610"}, \ {0x1002, 0x94C1, CHIP_RV610|RADEON_NEW_MEMMAP, "Radeon HD 2400 XT"}, \ {0x1002, 0x94C3, CHIP_RV610|RADEON_NEW_MEMMAP, "Radeon HD 2400 Pro"}, \ {0x1002, 0x94C4, CHIP_RV610|RADEON_NEW_MEMMAP, "Radeon HD 2400 PRO AGP"}, \ {0x1002, 0x94C5, CHIP_RV610|RADEON_NEW_MEMMAP, "FireGL V4000"}, \ {0x1002, 0x94C6, CHIP_RV610|RADEON_NEW_MEMMAP, "RV610"}, \ {0x1002, 0x94C7, CHIP_RV610|RADEON_NEW_MEMMAP, "ATI Radeon HD 2350"}, \ {0x1002, 0x94C8, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 2400 XT"}, \ {0x1002, 0x94C9, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 2400"}, \ {0x1002, 0x94CB, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI RADEON E2400"}, \ {0x1002, 0x94CC, CHIP_RV610|RADEON_NEW_MEMMAP, "ATI RV610"}, \ {0x1002, 0x94CD, CHIP_RV610|RADEON_NEW_MEMMAP, "ATI FireMV 2260"}, \ {0x1002, 0x9500, CHIP_RV670|RADEON_NEW_MEMMAP, "ATI RV670"}, \ {0x1002, 0x9501, CHIP_RV670|RADEON_NEW_MEMMAP, "ATI Radeon HD3870"}, \ {0x1002, 0x9504, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 3850"}, \ {0x1002, 0x9505, CHIP_RV670|RADEON_NEW_MEMMAP, "ATI Radeon HD3850"}, \ {0x1002, 0x9506, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 3850 X2"}, \ {0x1002, 0x9507, CHIP_RV670|RADEON_NEW_MEMMAP, "ATI RV670"}, \ {0x1002, 0x9508, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 3870"}, \ {0x1002, 0x9509, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 3870 X2"}, \ {0x1002, 0x950F, CHIP_RV670|RADEON_NEW_MEMMAP, "ATI Radeon HD3870 X2"}, \ {0x1002, 0x9511, CHIP_RV670|RADEON_NEW_MEMMAP, "ATI FireGL V7700"}, \ {0x1002, 0x9515, CHIP_RV670|RADEON_NEW_MEMMAP, "ATI Radeon HD3850"}, \ {0x1002, 0x9517, CHIP_RV670|RADEON_NEW_MEMMAP, "ATI Radeon HD3690"}, \ {0x1002, 0x9519, CHIP_RV670|RADEON_NEW_MEMMAP, "AMD Firestream 9170"}, \ {0x1002, 0x9540, CHIP_RV710|RADEON_NEW_MEMMAP, "ATI Radeon HD 4550"}, \ {0x1002, 0x9541, CHIP_RV710|RADEON_NEW_MEMMAP, "ATI Radeon RV710"}, \ {0x1002, 0x9542, CHIP_RV710|RADEON_NEW_MEMMAP, "ATI Radeon RV710"}, \ {0x1002, 0x954E, CHIP_RV710|RADEON_NEW_MEMMAP, "ATI Radeon RV710"}, \ {0x1002, 0x954F, CHIP_RV710|RADEON_NEW_MEMMAP, "ATI Radeon HD 4350"}, \ {0x1002, 0x9552, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon 4300 Series"}, \ {0x1002, 0x9553, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon 4500 Series"}, \ {0x1002, 0x9555, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon 4500 Series"}, \ {0x1002, 0x9557, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI FirePro RG220"}, \ {0x1002, 0x955F, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "RV710 [Mobility Radeon HD 4330]"}, \ {0x1002, 0x9580, CHIP_RV630|RADEON_NEW_MEMMAP, "ATI RV630"}, \ {0x1002, 0x9581, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 2600"}, \ {0x1002, 0x9583, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 2600 XT"}, \ {0x1002, 0x9586, CHIP_RV630|RADEON_NEW_MEMMAP, "ATI Radeon HD 2600 XT AGP"}, \ {0x1002, 0x9587, CHIP_RV630|RADEON_NEW_MEMMAP, "ATI Radeon HD 2600 Pro AGP"}, \ {0x1002, 0x9588, CHIP_RV630|RADEON_NEW_MEMMAP, "ATI Radeon HD 2600 XT"}, \ {0x1002, 0x9589, CHIP_RV630|RADEON_NEW_MEMMAP, "ATI Radeon HD 2600 Pro"}, \ {0x1002, 0x958A, CHIP_RV630|RADEON_NEW_MEMMAP, "ATI Gemini RV630"}, \ {0x1002, 0x958B, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Gemini Mobility Radeon HD 2600 XT"}, \ {0x1002, 0x958C, CHIP_RV630|RADEON_NEW_MEMMAP, "ATI FireGL V5600"}, \ {0x1002, 0x958D, CHIP_RV630|RADEON_NEW_MEMMAP, "ATI FireGL V3600"}, \ {0x1002, 0x958E, CHIP_RV630|RADEON_NEW_MEMMAP, "ATI Radeon HD 2600 LE"}, \ {0x1002, 0x958F, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL Graphics Processor"}, \ {0x1002, 0x9590, CHIP_RV635|RADEON_NEW_MEMMAP, "ATI ATI Radeon HD 3600 Series"}, \ {0x1002, 0x9591, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 3650"}, \ {0x1002, 0x9593, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 3670"}, \ {0x1002, 0x9595, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V5700"}, \ {0x1002, 0x9596, CHIP_RV635|RADEON_NEW_MEMMAP, "ATI ATI Radeon HD 3650 AGP"}, \ {0x1002, 0x9597, CHIP_RV635|RADEON_NEW_MEMMAP, "ATI ATI Radeon HD 3600 PRO"}, \ {0x1002, 0x9598, CHIP_RV635|RADEON_NEW_MEMMAP, "ATI ATI Radeon HD 3600 XT"}, \ {0x1002, 0x9599, CHIP_RV635|RADEON_NEW_MEMMAP, "ATI ATI Radeon HD 3600 PRO"}, \ {0x1002, 0x959B, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V5725"}, \ {0x1002, 0x95C0, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI Radeon HD 3470"}, \ {0x1002, 0x95C2, CHIP_RV620|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 3430"}, \ {0x1002, 0x95C4, CHIP_RV620|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 3400 Series"}, \ {0x1002, 0x95C5, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI Radeon HD 3450"}, \ {0x1002, 0x95C6, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI Radeon HD 3450"}, \ {0x1002, 0x95C7, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI Radeon HD 3430"}, \ {0x1002, 0x95C9, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI Radeon HD 3450"}, \ {0x1002, 0x95CC, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI FirePro V3700"}, \ {0x1002, 0x95CD, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI FireMV 2450"}, \ {0x1002, 0x95CE, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI FireMV 2260"}, \ {0x1002, 0x95CF, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI FireMV 2260"}, \ {0x1002, 0x9610, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon HD 3200 Graphics"}, \ {0x1002, 0x9611, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon 3100 Graphics"}, \ {0x1002, 0x9612, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon HD 3200 Graphics"}, \ {0x1002, 0x9613, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon 3100 Graphics"}, \ {0x1002, 0x9614, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon 3300 Graphics"}, \ {0x1002, 0x9615, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon 3200 Graphics"}, \ {0x1002, 0x9616, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon 3000 Graphics"}, \ {0x1002, 0x9640, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "BeaverCreek [Radeon HD 6550D]"}, \ {0x1002, 0x9641, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "BeaverCreek [Mobility Radeon HD 6620G]"}, \ {0x1002, 0x9642, CHIP_SUMO2|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Sumo [Radeon HD 6370D]"}, \ {0x1002, 0x9643, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Sumo [Radeon HD 6380G]"}, \ {0x1002, 0x9644, CHIP_SUMO2|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Sumo [Radeon HD 6410D]"}, \ {0x1002, 0x9645, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Sumo [Radeon HD 6410D]"}, \ {0x1002, 0x9647, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "BeaverCreek [Radeon HD 6520G]"}, \ {0x1002, 0x9648, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Sumo [Radeon HD 6480G]"}, \ {0x1002, 0x9649, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Sumo [Radeon HD 6480G]"}, \ {0x1002, 0x964A, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "BeaverCreek [Radeon HD 6530D]"}, \ {0x1002, 0x964B, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Sumo"}, \ {0x1002, 0x964C, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Sumo"}, \ {0x1002, 0x964E, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Sumo"}, \ {0x1002, 0x964F, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Sumo"}, \ {0x1002, 0x9710, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon HD 4200"}, \ {0x1002, 0x9711, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon 4100"}, \ {0x1002, 0x9712, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Mobility Radeon HD 4200"}, \ {0x1002, 0x9713, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Mobility Radeon 4100"}, \ {0x1002, 0x9714, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI RS880"}, \ {0x1002, 0x9715, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon HD 4250"}, \ {0x1002, 0x9802, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Wrestler [Radeon HD 6310]"}, \ {0x1002, 0x9803, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Wrestler [Radeon HD 6310]"}, \ {0x1002, 0x9804, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Wrestler [Radeon HD 6250]"}, \ {0x1002, 0x9805, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Wrestler [Radeon HD 6250]"}, \ {0x1002, 0x9806, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Wrestler [Radeon HD 6320]"}, \ {0x1002, 0x9807, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Wrestler [Radeon HD 6290]"}, \ {0x1002, 0x9808, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Wrestler [Radeon HD 7340]"}, \ {0x1002, 0x9809, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Wrestler [Radeon HD 7310]"}, \ {0x1002, 0x980A, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Wrestler [Radeon HD 7290]"}, \ {0x1002, 0x9900, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Unknown device name"}, \ {0x1002, 0x9901, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7660D]"}, \ {0x1002, 0x9903, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7640G]"}, \ {0x1002, 0x9904, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7560D]"}, \ {0x1002, 0x9905, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [FirePro A300 Series Graphics]"}, \ {0x1002, 0x9906, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [FirePro A300 Series Graphics]"}, \ {0x1002, 0x9907, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7620G]"}, \ {0x1002, 0x9908, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7600G]"}, \ {0x1002, 0x9909, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7500G]"}, \ {0x1002, 0x990A, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7500G]"}, \ {0x1002, 0x990F, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Unknown device name"}, \ {0x1002, 0x9910, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7660G]"}, \ {0x1002, 0x9913, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7640G]"}, \ {0x1002, 0x9917, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7620G]"}, \ {0x1002, 0x9918, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7600G]"}, \ {0x1002, 0x9919, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7500G]"}, \ {0x1002, 0x9990, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7520G]"}, \ {0x1002, 0x9991, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7540D]"}, \ {0x1002, 0x9992, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7420G]"}, \ {0x1002, 0x9993, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7480D]"}, \ {0x1002, 0x9994, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7400G]"}, \ {0x1002, 0x99A0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7520G]"}, \ {0x1002, 0x99A2, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7420G]"}, \ {0x1002, 0x99A4, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7400G]"}, \ {0, 0, 0, NULL} #define savage_PCI_IDS \ {0x5333, 0x8A20, S3_SAVAGE3D, "Savage 3D"}, \ {0x5333, 0x8A21, S3_SAVAGE3D, "Savage 3D/MV"}, \ {0x5333, 0x8A22, S3_SAVAGE4, "Savage4"}, \ {0x5333, 0x8A23, S3_SAVAGE4, "Savage4"}, \ {0x5333, 0x8A25, S3_PROSAVAGE, "ProSavage PM133"}, \ {0x5333, 0x8A26, S3_PROSAVAGE, "ProSavage KM133"}, \ {0x5333, 0x8C10, S3_SAVAGE_MX, "Savage/MX-MV"}, \ {0x5333, 0x8C11, S3_SAVAGE_MX, "Savage/MX"}, \ {0x5333, 0x8C12, S3_SAVAGE_MX, "Savage/IX-MV"}, \ {0x5333, 0x8C13, S3_SAVAGE_MX, "Savage/IX"}, \ {0x5333, 0x8C22, S3_SUPERSAVAGE, "SuperSavage MX/128"}, \ {0x5333, 0x8C24, S3_SUPERSAVAGE, "SuperSavage MX/64"}, \ {0x5333, 0x8C26, S3_SUPERSAVAGE, "SuperSavage MX/64C"}, \ {0x5333, 0x8C2A, S3_SUPERSAVAGE, "SuperSavage IX/128 SDR"}, \ {0x5333, 0x8C2B, S3_SUPERSAVAGE, "SuperSavage IX/128 DDR"}, \ {0x5333, 0x8C2C, S3_SUPERSAVAGE, "SuperSavage IX/64 SDR"}, \ {0x5333, 0x8C2D, S3_SUPERSAVAGE, "SuperSavage IX/64 DDR"}, \ {0x5333, 0x8C2E, S3_SUPERSAVAGE, "SuperSavage IX/C SDR"}, \ {0x5333, 0x8C2F, S3_SUPERSAVAGE, "SuperSavage IX/C DDR"}, \ {0x5333, 0x8D01, S3_TWISTER, "ProSavage Twister PN133"}, \ {0x5333, 0x8D02, S3_TWISTER, "ProSavage Twister KN133"}, \ {0x5333, 0x8D03, S3_PROSAVAGEDDR, "ProSavage DDR"}, \ {0x5333, 0x8D04, S3_PROSAVAGEDDR, "ProSavage DDR-K"}, \ {0, 0, 0, NULL} #define sis_PCI_IDS \ {0x18CA, 0x0040, SIS_CHIP_315, "Volari V3XT/V5/V8"}, \ {0x18CA, 0x0042, SIS_CHIP_315, "Volari Unknown"}, \ {0x1039, 0x0300, 0, "SiS 300/305"}, \ {0x1039, 0x5300, 0, "SiS 540"}, \ {0x1039, 0x6300, 0, "SiS 630"}, \ {0x1039, 0x6330, SIS_CHIP_315, "SiS 661"}, \ {0x1039, 0x7300, 0, "SiS 730"}, \ {0, 0, 0, NULL} #define tdfx_PCI_IDS \ {0x121A, 0x0003, 0, "3dfx Voodoo Banshee"}, \ {0x121A, 0x0004, 0, "3dfx Voodoo3 2000"}, \ {0x121A, 0x0005, 0, "3dfx Voodoo3 3000"}, \ {0x121A, 0x0007, 0, "3dfx Voodoo4 4500"}, \ {0x121A, 0x0009, 0, "3dfx Voodoo5 5500"}, \ {0x121A, 0x000B, 0, "3dfx Voodoo4 4200"}, \ {0, 0, 0, NULL} #define viadrv_PCI_IDS \ {0x1106, 0x3022, 0, "VIA CLE266 3022"}, \ {0x1106, 0x3108, 0, "VIA K8M800"}, \ {0x1106, 0x3118, VIA_PRO_GROUP_A, "VIA CN400 / PM8X0"}, \ {0x1106, 0x3122, 0, "VIA CLE266"}, \ {0x1106, 0x3157, VIA_PRO_GROUP_A, "VIA CX700"}, \ {0x1106, 0x3230, VIA_DX9_0, "VIA K8M890"}, \ {0x1106, 0x3343, 0, "VIA P4M890"}, \ {0x1106, 0x3344, 0, "VIA CN700 / VM800 / P4M800Pro"}, \ {0x1106, 0x3371, VIA_DX9_0, "VIA P4M900 / VN896"}, \ {0x1106, 0x7205, 0, "VIA KM400"}, \ {0, 0, 0, NULL} #define xgi_PCI_IDS \ {0x18CA, 0x0047, 0, "XP10 / XG47"}, \ {0x18CA, 0x2200, 0, "XP5"}, \ {0, 0, 0, NULL} Index: head/sys/dev/drm2/drm_stub.c =================================================================== --- head/sys/dev/drm2/drm_stub.c (revision 277486) +++ head/sys/dev/drm2/drm_stub.c (revision 277487) @@ -1,60 +1,60 @@ /** * \file drm_stub.h * Stub support * * \author Rickard E. (Rik) Faith */ /* * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org * * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #include "drmP.h" int drm_setmaster_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { DRM_DEBUG("setmaster\n"); if (file_priv->master != 0) return (0); - return (-EPERM); + return (EPERM); } int drm_dropmaster_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { DRM_DEBUG("dropmaster\n"); if (file_priv->master != 0) - return (-EINVAL); + return (EINVAL); return (0); } Index: head/sys/dev/drm2/i915/i915_debug.c =================================================================== --- head/sys/dev/drm2/i915/i915_debug.c (revision 277486) +++ head/sys/dev/drm2/i915/i915_debug.c (revision 277487) @@ -1,1683 +1,1769 @@ /* * Copyright © 2008 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Authors: * Eric Anholt * Keith Packard * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include enum { ACTIVE_LIST, FLUSHING_LIST, INACTIVE_LIST, PINNED_LIST, - DEFERRED_FREE_LIST, }; static const char * yesno(int v) { return (v ? "yes" : "no"); } static int i915_capabilities(struct drm_device *dev, struct sbuf *m, void *data) { const struct intel_device_info *info = INTEL_INFO(dev); sbuf_printf(m, "gen: %d\n", info->gen); if (HAS_PCH_SPLIT(dev)) sbuf_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); #define B(x) sbuf_printf(m, #x ": %s\n", yesno(info->x)) B(is_mobile); B(is_i85x); B(is_i915g); B(is_i945gm); B(is_g33); B(need_gfx_hws); B(is_g4x); B(is_pineview); B(has_fbc); B(has_pipe_cxsr); B(has_hotplug); B(cursor_needs_physical); B(has_overlay); B(overlay_needs_physical); B(supports_tv); B(has_bsd_ring); B(has_blt_ring); B(has_llc); #undef B return (0); } static const char * get_pin_flag(struct drm_i915_gem_object *obj) { if (obj->user_pin_count > 0) return "P"; else if (obj->pin_count > 0) return "p"; else return " "; } static const char * get_tiling_flag(struct drm_i915_gem_object *obj) { switch (obj->tiling_mode) { default: case I915_TILING_NONE: return (" "); case I915_TILING_X: return ("X"); case I915_TILING_Y: return ("Y"); } } static const char * cache_level_str(int type) { switch (type) { case I915_CACHE_NONE: return " uncached"; case I915_CACHE_LLC: return " snooped (LLC)"; case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)"; default: return (""); } } static void describe_obj(struct sbuf *m, struct drm_i915_gem_object *obj) { sbuf_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s", &obj->base, get_pin_flag(obj), get_tiling_flag(obj), obj->base.size / 1024, obj->base.read_domains, obj->base.write_domain, obj->last_rendering_seqno, obj->last_fenced_seqno, cache_level_str(obj->cache_level), obj->dirty ? " dirty" : "", obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); if (obj->base.name) sbuf_printf(m, " (name: %d)", obj->base.name); + if (obj->pin_display) + sbuf_printf(m, " (display)"); if (obj->fence_reg != I915_FENCE_REG_NONE) sbuf_printf(m, " (fence: %d)", obj->fence_reg); if (obj->gtt_space != NULL) sbuf_printf(m, " (gtt offset: %08x, size: %08x)", obj->gtt_offset, (unsigned int)obj->gtt_space->size); if (obj->pin_mappable || obj->fault_mappable) { char s[3], *t = s; if (obj->pin_mappable) *t++ = 'p'; if (obj->fault_mappable) *t++ = 'f'; *t = '\0'; sbuf_printf(m, " (%s mappable)", s); } if (obj->ring != NULL) sbuf_printf(m, " (%s)", obj->ring->name); } static int i915_gem_object_list_info(struct drm_device *dev, struct sbuf *m, void *data) { uintptr_t list = (uintptr_t)data; struct list_head *head; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; size_t total_obj_size, total_gtt_size; int count; if (sx_xlock_sig(&dev->dev_struct_lock)) return (EINTR); switch (list) { case ACTIVE_LIST: sbuf_printf(m, "Active:\n"); head = &dev_priv->mm.active_list; break; case INACTIVE_LIST: sbuf_printf(m, "Inactive:\n"); head = &dev_priv->mm.inactive_list; break; - case PINNED_LIST: - sbuf_printf(m, "Pinned:\n"); - head = &dev_priv->mm.pinned_list; - break; case FLUSHING_LIST: sbuf_printf(m, "Flushing:\n"); head = &dev_priv->mm.flushing_list; break; - case DEFERRED_FREE_LIST: - sbuf_printf(m, "Deferred free:\n"); - head = &dev_priv->mm.deferred_free_list; - break; default: DRM_UNLOCK(dev); return (EINVAL); } total_obj_size = total_gtt_size = count = 0; list_for_each_entry(obj, head, mm_list) { sbuf_printf(m, " "); describe_obj(m, obj); sbuf_printf(m, "\n"); total_obj_size += obj->base.size; total_gtt_size += obj->gtt_space->size; count++; } DRM_UNLOCK(dev); sbuf_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", count, total_obj_size, total_gtt_size); return (0); } #define count_objects(list, member) do { \ list_for_each_entry(obj, list, member) { \ size += obj->gtt_space->size; \ ++count; \ if (obj->map_and_fenceable) { \ mappable_size += obj->gtt_space->size; \ ++mappable_count; \ } \ } \ } while (0) static int i915_gem_object_info(struct drm_device *dev, struct sbuf *m, void *data) { struct drm_i915_private *dev_priv = dev->dev_private; u32 count, mappable_count; size_t size, mappable_size; struct drm_i915_gem_object *obj; if (sx_xlock_sig(&dev->dev_struct_lock)) return (EINTR); sbuf_printf(m, "%u objects, %zu bytes\n", dev_priv->mm.object_count, dev_priv->mm.object_memory); size = count = mappable_size = mappable_count = 0; count_objects(&dev_priv->mm.gtt_list, gtt_list); sbuf_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", count, mappable_count, size, mappable_size); size = count = mappable_size = mappable_count = 0; count_objects(&dev_priv->mm.active_list, mm_list); count_objects(&dev_priv->mm.flushing_list, mm_list); sbuf_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", count, mappable_count, size, mappable_size); size = count = mappable_size = mappable_count = 0; - count_objects(&dev_priv->mm.pinned_list, mm_list); - sbuf_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n", - count, mappable_count, size, mappable_size); - - size = count = mappable_size = mappable_count = 0; count_objects(&dev_priv->mm.inactive_list, mm_list); sbuf_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", count, mappable_count, size, mappable_size); size = count = mappable_size = mappable_count = 0; - count_objects(&dev_priv->mm.deferred_free_list, mm_list); - sbuf_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n", - count, mappable_count, size, mappable_size); - - size = count = mappable_size = mappable_count = 0; list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { if (obj->fault_mappable) { size += obj->gtt_space->size; ++count; } if (obj->pin_mappable) { mappable_size += obj->gtt_space->size; ++mappable_count; } } sbuf_printf(m, "%u pinned mappable objects, %zu bytes\n", mappable_count, mappable_size); sbuf_printf(m, "%u fault mappable objects, %zu bytes\n", count, size); sbuf_printf(m, "%zu [%zu] gtt total\n", dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total); DRM_UNLOCK(dev); return (0); } static int -i915_gem_gtt_info(struct drm_device *dev, struct sbuf *m, void* data) +i915_gem_gtt_info(struct drm_device *dev, struct sbuf *m, void *data) { struct drm_i915_private *dev_priv = dev->dev_private; + uintptr_t list = (uintptr_t)data; struct drm_i915_gem_object *obj; size_t total_obj_size, total_gtt_size; int count; if (sx_xlock_sig(&dev->dev_struct_lock)) return (EINTR); total_obj_size = total_gtt_size = count = 0; list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { + if (list == PINNED_LIST && obj->pin_count == 0) + continue; + sbuf_printf(m, " "); describe_obj(m, obj); sbuf_printf(m, "\n"); total_obj_size += obj->base.size; total_gtt_size += obj->gtt_space->size; count++; } DRM_UNLOCK(dev); sbuf_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", count, total_obj_size, total_gtt_size); return (0); } static int i915_gem_pageflip_info(struct drm_device *dev, struct sbuf *m, void *data) { struct intel_crtc *crtc; struct drm_i915_gem_object *obj; struct intel_unpin_work *work; char pipe; char plane; if ((dev->driver->driver_features & DRIVER_MODESET) == 0) return (0); list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { pipe = pipe_name(crtc->pipe); plane = plane_name(crtc->plane); mtx_lock(&dev->event_lock); work = crtc->unpin_work; if (work == NULL) { sbuf_printf(m, "No flip due on pipe %c (plane %c)\n", pipe, plane); } else { if (!work->pending) { sbuf_printf(m, "Flip queued on pipe %c (plane %c)\n", pipe, plane); } else { sbuf_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", pipe, plane); } if (work->enable_stall_check) sbuf_printf(m, "Stall check enabled, "); else sbuf_printf(m, "Stall check waiting for page flip ioctl, "); sbuf_printf(m, "%d prepares\n", work->pending); if (work->old_fb_obj) { obj = work->old_fb_obj; if (obj) sbuf_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); } if (work->pending_flip_obj) { obj = work->pending_flip_obj; if (obj) sbuf_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); } } mtx_unlock(&dev->event_lock); } return (0); } static int i915_gem_request_info(struct drm_device *dev, struct sbuf *m, void *data) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_request *gem_request; int count; if (sx_xlock_sig(&dev->dev_struct_lock)) return (EINTR); count = 0; if (!list_empty(&dev_priv->rings[RCS].request_list)) { sbuf_printf(m, "Render requests:\n"); list_for_each_entry(gem_request, &dev_priv->rings[RCS].request_list, list) { sbuf_printf(m, " %d @ %d\n", gem_request->seqno, (int) (jiffies - gem_request->emitted_jiffies)); } count++; } if (!list_empty(&dev_priv->rings[VCS].request_list)) { sbuf_printf(m, "BSD requests:\n"); list_for_each_entry(gem_request, &dev_priv->rings[VCS].request_list, list) { sbuf_printf(m, " %d @ %d\n", gem_request->seqno, (int) (jiffies - gem_request->emitted_jiffies)); } count++; } if (!list_empty(&dev_priv->rings[BCS].request_list)) { sbuf_printf(m, "BLT requests:\n"); list_for_each_entry(gem_request, &dev_priv->rings[BCS].request_list, list) { sbuf_printf(m, " %d @ %d\n", gem_request->seqno, (int) (jiffies - gem_request->emitted_jiffies)); } count++; } DRM_UNLOCK(dev); if (count == 0) sbuf_printf(m, "No requests\n"); return 0; } static void i915_ring_seqno_info(struct sbuf *m, struct intel_ring_buffer *ring) { if (ring->get_seqno) { sbuf_printf(m, "Current sequence (%s): %d\n", ring->name, ring->get_seqno(ring)); - sbuf_printf(m, "Waiter sequence (%s): %d\n", - ring->name, ring->waiting_seqno); - sbuf_printf(m, "IRQ sequence (%s): %d\n", - ring->name, ring->irq_seqno); } } static int i915_gem_seqno_info(struct drm_device *dev, struct sbuf *m, void *data) { drm_i915_private_t *dev_priv = dev->dev_private; int i; if (sx_xlock_sig(&dev->dev_struct_lock)) return (EINTR); for (i = 0; i < I915_NUM_RINGS; i++) i915_ring_seqno_info(m, &dev_priv->rings[i]); DRM_UNLOCK(dev); return (0); } static int i915_interrupt_info(struct drm_device *dev, struct sbuf *m, void *data) { drm_i915_private_t *dev_priv = dev->dev_private; int i, pipe; if (sx_xlock_sig(&dev->dev_struct_lock)) return (EINTR); - if (!HAS_PCH_SPLIT(dev)) { + if (IS_VALLEYVIEW(dev)) { + sbuf_printf(m, "Display IER:\t%08x\n", + I915_READ(VLV_IER)); + sbuf_printf(m, "Display IIR:\t%08x\n", + I915_READ(VLV_IIR)); + sbuf_printf(m, "Display IIR_RW:\t%08x\n", + I915_READ(VLV_IIR_RW)); + sbuf_printf(m, "Display IMR:\t%08x\n", + I915_READ(VLV_IMR)); + for_each_pipe(pipe) + sbuf_printf(m, "Pipe %c stat:\t%08x\n", + pipe_name(pipe), + I915_READ(PIPESTAT(pipe))); + + sbuf_printf(m, "Master IER:\t%08x\n", + I915_READ(VLV_MASTER_IER)); + + sbuf_printf(m, "Render IER:\t%08x\n", + I915_READ(GTIER)); + sbuf_printf(m, "Render IIR:\t%08x\n", + I915_READ(GTIIR)); + sbuf_printf(m, "Render IMR:\t%08x\n", + I915_READ(GTIMR)); + + sbuf_printf(m, "PM IER:\t\t%08x\n", + I915_READ(GEN6_PMIER)); + sbuf_printf(m, "PM IIR:\t\t%08x\n", + I915_READ(GEN6_PMIIR)); + sbuf_printf(m, "PM IMR:\t\t%08x\n", + I915_READ(GEN6_PMIMR)); + + sbuf_printf(m, "Port hotplug:\t%08x\n", + I915_READ(PORT_HOTPLUG_EN)); + sbuf_printf(m, "DPFLIPSTAT:\t%08x\n", + I915_READ(VLV_DPFLIPSTAT)); + sbuf_printf(m, "DPINVGTT:\t%08x\n", + I915_READ(DPINVGTT)); + + } else if (!HAS_PCH_SPLIT(dev)) { sbuf_printf(m, "Interrupt enable: %08x\n", I915_READ(IER)); sbuf_printf(m, "Interrupt identity: %08x\n", I915_READ(IIR)); sbuf_printf(m, "Interrupt mask: %08x\n", I915_READ(IMR)); for_each_pipe(pipe) sbuf_printf(m, "Pipe %c stat: %08x\n", pipe_name(pipe), I915_READ(PIPESTAT(pipe))); } else { sbuf_printf(m, "North Display Interrupt enable: %08x\n", I915_READ(DEIER)); sbuf_printf(m, "North Display Interrupt identity: %08x\n", I915_READ(DEIIR)); sbuf_printf(m, "North Display Interrupt mask: %08x\n", I915_READ(DEIMR)); sbuf_printf(m, "South Display Interrupt enable: %08x\n", I915_READ(SDEIER)); sbuf_printf(m, "South Display Interrupt identity: %08x\n", I915_READ(SDEIIR)); sbuf_printf(m, "South Display Interrupt mask: %08x\n", I915_READ(SDEIMR)); sbuf_printf(m, "Graphics Interrupt enable: %08x\n", I915_READ(GTIER)); sbuf_printf(m, "Graphics Interrupt identity: %08x\n", I915_READ(GTIIR)); sbuf_printf(m, "Graphics Interrupt mask: %08x\n", I915_READ(GTIMR)); } sbuf_printf(m, "Interrupts received: %d\n", atomic_read(&dev_priv->irq_received)); for (i = 0; i < I915_NUM_RINGS; i++) { if (IS_GEN6(dev) || IS_GEN7(dev)) { sbuf_printf(m, "Graphics Interrupt mask (%s): %08x\n", dev_priv->rings[i].name, I915_READ_IMR(&dev_priv->rings[i])); } i915_ring_seqno_info(m, &dev_priv->rings[i]); } DRM_UNLOCK(dev); return (0); } static int i915_gem_fence_regs_info(struct drm_device *dev, struct sbuf *m, void *data) { drm_i915_private_t *dev_priv = dev->dev_private; int i; if (sx_xlock_sig(&dev->dev_struct_lock)) return (EINTR); sbuf_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); sbuf_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); for (i = 0; i < dev_priv->num_fence_regs; i++) { struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; sbuf_printf(m, "Fenced object[%2d] = ", i); if (obj == NULL) sbuf_printf(m, "unused"); else describe_obj(m, obj); sbuf_printf(m, "\n"); } DRM_UNLOCK(dev); return (0); } static int i915_hws_info(struct drm_device *dev, struct sbuf *m, void *data) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; const volatile u32 *hws; int i; ring = &dev_priv->rings[(uintptr_t)data]; hws = (volatile u32 *)ring->status_page.page_addr; if (hws == NULL) return (0); for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { sbuf_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i * 4, hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); } return (0); } -static int -i915_ringbuffer_data(struct drm_device *dev, struct sbuf *m, void *data) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; - - if (sx_xlock_sig(&dev->dev_struct_lock)) - return (EINTR); - ring = &dev_priv->rings[(uintptr_t)data]; - if (!ring->obj) { - sbuf_printf(m, "No ringbuffer setup\n"); - } else { - u8 *virt = ring->virtual_start; - uint32_t off; - - for (off = 0; off < ring->size; off += 4) { - uint32_t *ptr = (uint32_t *)(virt + off); - sbuf_printf(m, "%08x : %08x\n", off, *ptr); - } - } - DRM_UNLOCK(dev); - return (0); -} - -static int -i915_ringbuffer_info(struct drm_device *dev, struct sbuf *m, void *data) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; - - ring = &dev_priv->rings[(uintptr_t)data]; - if (ring->size == 0) - return (0); - - if (sx_xlock_sig(&dev->dev_struct_lock)) - return (EINTR); - - sbuf_printf(m, "Ring %s:\n", ring->name); - sbuf_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); - sbuf_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR); - sbuf_printf(m, " Size : %08x\n", ring->size); - sbuf_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring)); - sbuf_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring)); - if (IS_GEN6(dev) || IS_GEN7(dev)) { - sbuf_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring)); - sbuf_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring)); - } - sbuf_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); - sbuf_printf(m, " Start : %08x\n", I915_READ_START(ring)); - - DRM_UNLOCK(dev); - - return (0); -} - static const char * ring_str(int ring) { switch (ring) { case RCS: return (" render"); case VCS: return (" bsd"); case BCS: return (" blt"); default: return (""); } } static const char * pin_flag(int pinned) { if (pinned > 0) return (" P"); else if (pinned < 0) return (" p"); else return (""); } static const char *tiling_flag(int tiling) { switch (tiling) { default: case I915_TILING_NONE: return ""; case I915_TILING_X: return " X"; case I915_TILING_Y: return " Y"; } } static const char *dirty_flag(int dirty) { return dirty ? " dirty" : ""; } static const char *purgeable_flag(int purgeable) { return purgeable ? " purgeable" : ""; } static void print_error_buffers(struct sbuf *m, const char *name, struct drm_i915_error_buffer *err, int count) { sbuf_printf(m, "%s [%d]:\n", name, count); while (count--) { sbuf_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s", err->gtt_offset, err->size, err->read_domains, err->write_domain, err->seqno, pin_flag(err->pinned), tiling_flag(err->tiling), dirty_flag(err->dirty), purgeable_flag(err->purgeable), err->ring != -1 ? " " : "", ring_str(err->ring), cache_level_str(err->cache_level)); if (err->name) sbuf_printf(m, " (name: %d)", err->name); if (err->fence_reg != I915_FENCE_REG_NONE) sbuf_printf(m, " (fence: %d)", err->fence_reg); sbuf_printf(m, "\n"); err++; } } static void i915_ring_error_state(struct sbuf *m, struct drm_device *dev, struct drm_i915_error_state *error, unsigned ring) { + MPASS((ring < I915_NUM_RINGS)); /* shut up confused gcc */ sbuf_printf(m, "%s command stream:\n", ring_str(ring)); sbuf_printf(m, " HEAD: 0x%08x\n", error->head[ring]); sbuf_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); sbuf_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); sbuf_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); sbuf_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); sbuf_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); if (ring == RCS && INTEL_INFO(dev)->gen >= 4) { sbuf_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); sbuf_printf(m, " BBADDR: 0x%08jx\n", (uintmax_t)error->bbaddr); } if (INTEL_INFO(dev)->gen >= 4) sbuf_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); sbuf_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); + sbuf_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); if (INTEL_INFO(dev)->gen >= 6) { - sbuf_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); sbuf_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); sbuf_printf(m, " SYNC_0: 0x%08x\n", error->semaphore_mboxes[ring][0]); sbuf_printf(m, " SYNC_1: 0x%08x\n", error->semaphore_mboxes[ring][1]); } sbuf_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); + sbuf_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); sbuf_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); sbuf_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); } -static int i915_error_state(struct drm_device *dev, struct sbuf *m, +static int +i915_error_state(struct drm_device *dev, struct sbuf *m, void *unused) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_error_state *error; + struct intel_ring_buffer *ring; int i, j, page, offset, elt; mtx_lock(&dev_priv->error_lock); - if (!dev_priv->first_error) { + error = dev_priv->first_error; + if (error != NULL) + refcount_acquire(&error->ref); + mtx_unlock(&dev_priv->error_lock); + if (error == NULL) { sbuf_printf(m, "no error state collected\n"); - goto out; + return (0); } error = dev_priv->first_error; sbuf_printf(m, "Time: %jd s %jd us\n", (intmax_t)error->time.tv_sec, (intmax_t)error->time.tv_usec); sbuf_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); sbuf_printf(m, "EIR: 0x%08x\n", error->eir); + sbuf_printf(m, "IER: 0x%08x\n", error->ier); sbuf_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); for (i = 0; i < dev_priv->num_fence_regs; i++) sbuf_printf(m, " fence[%d] = %08jx\n", i, (uintmax_t)error->fence[i]); if (INTEL_INFO(dev)->gen >= 6) { sbuf_printf(m, "ERROR: 0x%08x\n", error->error); sbuf_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); } - i915_ring_error_state(m, dev, error, RCS); - if (HAS_BLT(dev)) - i915_ring_error_state(m, dev, error, BCS); - if (HAS_BSD(dev)) - i915_ring_error_state(m, dev, error, VCS); + for_each_ring(ring, dev_priv, i) + i915_ring_error_state(m, dev, error, i); if (error->active_bo) print_error_buffers(m, "Active", error->active_bo, error->active_bo_count); if (error->pinned_bo) print_error_buffers(m, "Pinned", error->pinned_bo, error->pinned_bo_count); for (i = 0; i < DRM_ARRAY_SIZE(error->ring); i++) { struct drm_i915_error_object *obj; if ((obj = error->ring[i].batchbuffer)) { sbuf_printf(m, "%s --- gtt_offset = 0x%08x\n", dev_priv->rings[i].name, obj->gtt_offset); offset = 0; for (page = 0; page < obj->page_count; page++) { for (elt = 0; elt < PAGE_SIZE/4; elt++) { sbuf_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]); offset += 4; } } } if (error->ring[i].num_requests) { sbuf_printf(m, "%s --- %d requests\n", dev_priv->rings[i].name, error->ring[i].num_requests); for (j = 0; j < error->ring[i].num_requests; j++) { sbuf_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", error->ring[i].requests[j].seqno, error->ring[i].requests[j].jiffies, error->ring[i].requests[j].tail); } } if ((obj = error->ring[i].ringbuffer)) { sbuf_printf(m, "%s --- ringbuffer = 0x%08x\n", dev_priv->rings[i].name, obj->gtt_offset); offset = 0; for (page = 0; page < obj->page_count; page++) { for (elt = 0; elt < PAGE_SIZE/4; elt++) { sbuf_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]); offset += 4; } } } } if (error->overlay) intel_overlay_print_error_state(m, error->overlay); if (error->display) intel_display_print_error_state(m, dev, error->display); -out: - mtx_unlock(&dev_priv->error_lock); + if (refcount_release(&error->ref)) + i915_error_state_free(error); return (0); } static int +i915_error_state_w(struct drm_device *dev, const char *str, void *unused) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_error_state *error; + + DRM_DEBUG_DRIVER("Resetting error state\n"); + mtx_lock(&dev_priv->error_lock); + error = dev_priv->first_error; + dev_priv->first_error = NULL; + mtx_unlock(&dev_priv->error_lock); + if (error != NULL && refcount_release(&error->ref)) + i915_error_state_free(error); + return (0); +} + +static int i915_rstdby_delays(struct drm_device *dev, struct sbuf *m, void *unused) { drm_i915_private_t *dev_priv = dev->dev_private; u16 crstanddelay; if (sx_xlock_sig(&dev->dev_struct_lock)) return (EINTR); crstanddelay = I915_READ16(CRSTANDVID); DRM_UNLOCK(dev); sbuf_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); return 0; } static int i915_cur_delayinfo(struct drm_device *dev, struct sbuf *m, void *unused) { drm_i915_private_t *dev_priv = dev->dev_private; if (IS_GEN5(dev)) { u16 rgvswctl = I915_READ16(MEMSWCTL); u16 rgvstat = I915_READ16(MEMSTAT_ILK); sbuf_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); sbuf_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); sbuf_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> MEMSTAT_VID_SHIFT); sbuf_printf(m, "Current P-state: %d\n", (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); } else if (IS_GEN6(dev)) { u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); u32 rpstat; u32 rpupei, rpcurup, rpprevup; u32 rpdownei, rpcurdown, rpprevdown; int max_freq; /* RPSTAT1 is in the GT power well */ if (sx_xlock_sig(&dev->dev_struct_lock)) return (EINTR); gen6_gt_force_wake_get(dev_priv); rpstat = I915_READ(GEN6_RPSTAT1); rpupei = I915_READ(GEN6_RP_CUR_UP_EI); rpcurup = I915_READ(GEN6_RP_CUR_UP); rpprevup = I915_READ(GEN6_RP_PREV_UP); rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); gen6_gt_force_wake_put(dev_priv); DRM_UNLOCK(dev); sbuf_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); sbuf_printf(m, "RPSTAT1: 0x%08x\n", rpstat); sbuf_printf(m, "Render p-state ratio: %d\n", (gt_perf_status & 0xff00) >> 8); sbuf_printf(m, "Render p-state VID: %d\n", gt_perf_status & 0xff); sbuf_printf(m, "Render p-state limit: %d\n", rp_state_limits & 0xff); sbuf_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT) * 50); sbuf_printf(m, "RP CUR UP EI: %dus\n", rpupei & GEN6_CURICONT_MASK); sbuf_printf(m, "RP CUR UP: %dus\n", rpcurup & GEN6_CURBSYTAVG_MASK); sbuf_printf(m, "RP PREV UP: %dus\n", rpprevup & GEN6_CURBSYTAVG_MASK); sbuf_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & GEN6_CURIAVG_MASK); sbuf_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & GEN6_CURBSYTAVG_MASK); sbuf_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & GEN6_CURBSYTAVG_MASK); max_freq = (rp_state_cap & 0xff0000) >> 16; sbuf_printf(m, "Lowest (RPN) frequency: %dMHz\n", max_freq * 50); max_freq = (rp_state_cap & 0xff00) >> 8; sbuf_printf(m, "Nominal (RP1) frequency: %dMHz\n", max_freq * 50); max_freq = rp_state_cap & 0xff; sbuf_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", max_freq * 50); } else { sbuf_printf(m, "no P-state info available\n"); } return 0; } static int i915_delayfreq_table(struct drm_device *dev, struct sbuf *m, void *unused) { drm_i915_private_t *dev_priv = dev->dev_private; u32 delayfreq; int i; if (sx_xlock_sig(&dev->dev_struct_lock)) return (EINTR); for (i = 0; i < 16; i++) { delayfreq = I915_READ(PXVFREQ_BASE + i * 4); sbuf_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); } DRM_UNLOCK(dev); return (0); } static inline int MAP_TO_MV(int map) { return 1250 - (map * 25); } static int i915_inttoext_table(struct drm_device *dev, struct sbuf *m, void *unused) { drm_i915_private_t *dev_priv = dev->dev_private; u32 inttoext; int i; if (sx_xlock_sig(&dev->dev_struct_lock)) return (EINTR); for (i = 1; i <= 32; i++) { inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); sbuf_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); } DRM_UNLOCK(dev); return (0); } static int ironlake_drpc_info(struct drm_device *dev, struct sbuf *m) { drm_i915_private_t *dev_priv = dev->dev_private; u32 rgvmodectl; u32 rstdbyctl; u16 crstandvid; if (sx_xlock_sig(&dev->dev_struct_lock)) return (EINTR); rgvmodectl = I915_READ(MEMMODECTL); rstdbyctl = I915_READ(RSTDBYCTL); crstandvid = I915_READ16(CRSTANDVID); DRM_UNLOCK(dev); sbuf_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? "yes" : "no"); sbuf_printf(m, "Boost freq: %d\n", (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> MEMMODE_BOOST_FREQ_SHIFT); sbuf_printf(m, "HW control enabled: %s\n", rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); sbuf_printf(m, "SW control enabled: %s\n", rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); sbuf_printf(m, "Gated voltage change: %s\n", rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); sbuf_printf(m, "Starting frequency: P%d\n", (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); sbuf_printf(m, "Max P-state: P%d\n", (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); sbuf_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); sbuf_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); sbuf_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); sbuf_printf(m, "Render standby enabled: %s\n", (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); sbuf_printf(m, "Current RS state: "); switch (rstdbyctl & RSX_STATUS_MASK) { case RSX_STATUS_ON: sbuf_printf(m, "on\n"); break; case RSX_STATUS_RC1: sbuf_printf(m, "RC1\n"); break; case RSX_STATUS_RC1E: sbuf_printf(m, "RC1E\n"); break; case RSX_STATUS_RS1: sbuf_printf(m, "RS1\n"); break; case RSX_STATUS_RS2: sbuf_printf(m, "RS2 (RC6)\n"); break; case RSX_STATUS_RS3: sbuf_printf(m, "RC3 (RC6+)\n"); break; default: sbuf_printf(m, "unknown\n"); break; } return 0; } static int gen6_drpc_info(struct drm_device *dev, struct sbuf *m) { drm_i915_private_t *dev_priv = dev->dev_private; u32 rpmodectl1, gt_core_status, rcctl1; unsigned forcewake_count; int count=0; if (sx_xlock_sig(&dev->dev_struct_lock)) return (EINTR); mtx_lock(&dev_priv->gt_lock); forcewake_count = dev_priv->forcewake_count; mtx_unlock(&dev_priv->gt_lock); if (forcewake_count) { sbuf_printf(m, "RC information inaccurate because userspace " "holds a reference \n"); } else { /* NB: we cannot use forcewake, else we read the wrong values */ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) DRM_UDELAY(10); sbuf_printf(m, "RC information accurate: %s\n", yesno(count < 51)); } gt_core_status = DRM_READ32(dev_priv->mmio_map, GEN6_GT_CORE_STATUS); trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4); rpmodectl1 = I915_READ(GEN6_RP_CONTROL); rcctl1 = I915_READ(GEN6_RC_CONTROL); DRM_UNLOCK(dev); sbuf_printf(m, "Video Turbo Mode: %s\n", yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); sbuf_printf(m, "HW control enabled: %s\n", yesno(rpmodectl1 & GEN6_RP_ENABLE)); sbuf_printf(m, "SW control enabled: %s\n", yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE)); sbuf_printf(m, "RC1e Enabled: %s\n", yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); sbuf_printf(m, "RC6 Enabled: %s\n", yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); sbuf_printf(m, "Deep RC6 Enabled: %s\n", yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); sbuf_printf(m, "Deepest RC6 Enabled: %s\n", yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); sbuf_printf(m, "Current RC state: "); switch (gt_core_status & GEN6_RCn_MASK) { case GEN6_RC0: if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) sbuf_printf(m, "Core Power Down\n"); else sbuf_printf(m, "on\n"); break; case GEN6_RC3: sbuf_printf(m, "RC3\n"); break; case GEN6_RC6: sbuf_printf(m, "RC6\n"); break; case GEN6_RC7: sbuf_printf(m, "RC7\n"); break; default: sbuf_printf(m, "Unknown\n"); break; } sbuf_printf(m, "Core Power Down: %s\n", yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); + + /* Not exactly sure what this is */ + sbuf_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n", + I915_READ(GEN6_GT_GFX_RC6_LOCKED)); + sbuf_printf(m, "RC6 residency since boot: %u\n", + I915_READ(GEN6_GT_GFX_RC6)); + sbuf_printf(m, "RC6+ residency since boot: %u\n", + I915_READ(GEN6_GT_GFX_RC6p)); + sbuf_printf(m, "RC6++ residency since boot: %u\n", + I915_READ(GEN6_GT_GFX_RC6pp)); + return 0; } static int i915_drpc_info(struct drm_device *dev, struct sbuf *m, void *unused) { if (IS_GEN6(dev) || IS_GEN7(dev)) return (gen6_drpc_info(dev, m)); else return (ironlake_drpc_info(dev, m)); } static int i915_fbc_status(struct drm_device *dev, struct sbuf *m, void *unused) { drm_i915_private_t *dev_priv = dev->dev_private; if (!I915_HAS_FBC(dev)) { sbuf_printf(m, "FBC unsupported on this chipset"); return 0; } if (intel_fbc_enabled(dev)) { sbuf_printf(m, "FBC enabled"); } else { sbuf_printf(m, "FBC disabled: "); switch (dev_priv->no_fbc_reason) { case FBC_NO_OUTPUT: sbuf_printf(m, "no outputs"); break; case FBC_STOLEN_TOO_SMALL: sbuf_printf(m, "not enough stolen memory"); break; case FBC_UNSUPPORTED_MODE: sbuf_printf(m, "mode not supported"); break; case FBC_MODE_TOO_LARGE: sbuf_printf(m, "mode too large"); break; case FBC_BAD_PLANE: sbuf_printf(m, "FBC unsupported on plane"); break; case FBC_NOT_TILED: sbuf_printf(m, "scanout buffer not tiled"); break; case FBC_MULTIPLE_PIPES: sbuf_printf(m, "multiple pipes are enabled"); break; default: sbuf_printf(m, "unknown reason"); } } return 0; } static int i915_sr_status(struct drm_device *dev, struct sbuf *m, void *unused) { drm_i915_private_t *dev_priv = dev->dev_private; bool sr_enabled = false; if (HAS_PCH_SPLIT(dev)) sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; else if (IS_I915GM(dev)) sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; else if (IS_PINEVIEW(dev)) sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; sbuf_printf(m, "self-refresh: %s", sr_enabled ? "enabled" : "disabled"); return (0); } static int i915_ring_freq_table(struct drm_device *dev, struct sbuf *m, void *unused) { drm_i915_private_t *dev_priv = dev->dev_private; int gpu_freq, ia_freq; if (!(IS_GEN6(dev) || IS_GEN7(dev))) { sbuf_printf(m, "unsupported on this chipset"); return (0); } if (sx_xlock_sig(&dev->dev_struct_lock)) return (EINTR); sbuf_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay; gpu_freq++) { I915_WRITE(GEN6_PCODE_DATA, gpu_freq); I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | GEN6_PCODE_READ_MIN_FREQ_TABLE); if (_intel_wait_for(dev, (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 10, 1, "915frq")) { DRM_ERROR("pcode read of freq table timed out\n"); continue; } ia_freq = I915_READ(GEN6_PCODE_DATA); sbuf_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100); } DRM_UNLOCK(dev); return (0); } static int i915_emon_status(struct drm_device *dev, struct sbuf *m, void *unused) { drm_i915_private_t *dev_priv = dev->dev_private; unsigned long temp, chipset, gfx; if (!IS_GEN5(dev)) { sbuf_printf(m, "Not supported\n"); return (0); } if (sx_xlock_sig(&dev->dev_struct_lock)) return (EINTR); temp = i915_mch_val(dev_priv); chipset = i915_chipset_val(dev_priv); gfx = i915_gfx_val(dev_priv); DRM_UNLOCK(dev); sbuf_printf(m, "GMCH temp: %ld\n", temp); sbuf_printf(m, "Chipset power: %ld\n", chipset); sbuf_printf(m, "GFX power: %ld\n", gfx); sbuf_printf(m, "Total power: %ld\n", chipset + gfx); return (0); } static int i915_gfxec(struct drm_device *dev, struct sbuf *m, void *unused) { drm_i915_private_t *dev_priv = dev->dev_private; if (sx_xlock_sig(&dev->dev_struct_lock)) return (EINTR); sbuf_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); DRM_UNLOCK(dev); return (0); } #if 0 static int i915_opregion(struct drm_device *dev, struct sbuf *m, void *unused) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; if (sx_xlock_sig(&dev->dev_struct_lock)) return (EINTR); if (opregion->header) seq_write(m, opregion->header, OPREGION_SIZE); DRM_UNLOCK(dev); return 0; } #endif static int i915_gem_framebuffer_info(struct drm_device *dev, struct sbuf *m, void *data) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_fbdev *ifbdev; struct intel_framebuffer *fb; if (sx_xlock_sig(&dev->dev_struct_lock)) return (EINTR); ifbdev = dev_priv->fbdev; if (ifbdev == NULL) { DRM_UNLOCK(dev); return (0); } fb = to_intel_framebuffer(ifbdev->helper.fb); sbuf_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ", fb->base.width, fb->base.height, fb->base.depth, fb->base.bits_per_pixel); describe_obj(m, fb->obj); sbuf_printf(m, "\n"); list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { if (&fb->base == ifbdev->helper.fb) continue; sbuf_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ", fb->base.width, fb->base.height, fb->base.depth, fb->base.bits_per_pixel); describe_obj(m, fb->obj); sbuf_printf(m, "\n"); } DRM_UNLOCK(dev); return (0); } static int i915_context_status(struct drm_device *dev, struct sbuf *m, void *data) { drm_i915_private_t *dev_priv; int ret; if ((dev->driver->driver_features & DRIVER_MODESET) == 0) return (0); dev_priv = dev->dev_private; ret = sx_xlock_sig(&dev->mode_config.mutex); if (ret != 0) return (EINTR); if (dev_priv->pwrctx != NULL) { sbuf_printf(m, "power context "); describe_obj(m, dev_priv->pwrctx); sbuf_printf(m, "\n"); } if (dev_priv->renderctx != NULL) { sbuf_printf(m, "render context "); describe_obj(m, dev_priv->renderctx); sbuf_printf(m, "\n"); } sx_xunlock(&dev->mode_config.mutex); return (0); } static int i915_gen6_forcewake_count_info(struct drm_device *dev, struct sbuf *m, void *data) { struct drm_i915_private *dev_priv; unsigned forcewake_count; dev_priv = dev->dev_private; mtx_lock(&dev_priv->gt_lock); forcewake_count = dev_priv->forcewake_count; mtx_unlock(&dev_priv->gt_lock); sbuf_printf(m, "forcewake count = %u\n", forcewake_count); return (0); } static const char * swizzle_string(unsigned swizzle) { switch(swizzle) { case I915_BIT_6_SWIZZLE_NONE: return "none"; case I915_BIT_6_SWIZZLE_9: return "bit9"; case I915_BIT_6_SWIZZLE_9_10: return "bit9/bit10"; case I915_BIT_6_SWIZZLE_9_11: return "bit9/bit11"; case I915_BIT_6_SWIZZLE_9_10_11: return "bit9/bit10/bit11"; case I915_BIT_6_SWIZZLE_9_17: return "bit9/bit17"; case I915_BIT_6_SWIZZLE_9_10_17: return "bit9/bit10/bit17"; case I915_BIT_6_SWIZZLE_UNKNOWN: return "unknown"; } return "bug"; } static int i915_swizzle_info(struct drm_device *dev, struct sbuf *m, void *data) { struct drm_i915_private *dev_priv; int ret; dev_priv = dev->dev_private; ret = sx_xlock_sig(&dev->dev_struct_lock); if (ret != 0) return (EINTR); sbuf_printf(m, "bit6 swizzle for X-tiling = %s\n", swizzle_string(dev_priv->mm.bit_6_swizzle_x)); sbuf_printf(m, "bit6 swizzle for Y-tiling = %s\n", swizzle_string(dev_priv->mm.bit_6_swizzle_y)); if (IS_GEN3(dev) || IS_GEN4(dev)) { sbuf_printf(m, "DDC = 0x%08x\n", I915_READ(DCC)); sbuf_printf(m, "C0DRB3 = 0x%04x\n", I915_READ16(C0DRB3)); sbuf_printf(m, "C1DRB3 = 0x%04x\n", I915_READ16(C1DRB3)); } else if (IS_GEN6(dev) || IS_GEN7(dev)) { sbuf_printf(m, "MAD_DIMM_C0 = 0x%08x\n", I915_READ(MAD_DIMM_C0)); sbuf_printf(m, "MAD_DIMM_C1 = 0x%08x\n", I915_READ(MAD_DIMM_C1)); sbuf_printf(m, "MAD_DIMM_C2 = 0x%08x\n", I915_READ(MAD_DIMM_C2)); sbuf_printf(m, "TILECTL = 0x%08x\n", I915_READ(TILECTL)); sbuf_printf(m, "ARB_MODE = 0x%08x\n", I915_READ(ARB_MODE)); sbuf_printf(m, "DISP_ARB_CTL = 0x%08x\n", I915_READ(DISP_ARB_CTL)); } DRM_UNLOCK(dev); return (0); } static int i915_ppgtt_info(struct drm_device *dev, struct sbuf *m, void *data) { struct drm_i915_private *dev_priv; struct intel_ring_buffer *ring; int i, ret; dev_priv = dev->dev_private; ret = sx_xlock_sig(&dev->dev_struct_lock); if (ret != 0) return (EINTR); if (INTEL_INFO(dev)->gen == 6) sbuf_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); for (i = 0; i < I915_NUM_RINGS; i++) { ring = &dev_priv->rings[i]; sbuf_printf(m, "%s\n", ring->name); if (INTEL_INFO(dev)->gen == 7) sbuf_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); sbuf_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring))); sbuf_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring))); sbuf_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring))); } if (dev_priv->mm.aliasing_ppgtt) { struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; sbuf_printf(m, "aliasing PPGTT:\n"); sbuf_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); } sbuf_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); DRM_UNLOCK(dev); return (0); } +static int i915_dpio_info(struct drm_device *dev, struct sbuf *m, void *data) +{ + struct drm_i915_private *dev_priv; + int ret; + + if (!IS_VALLEYVIEW(dev)) { + sbuf_printf(m, "unsupported\n"); + return 0; + } + + dev_priv = dev->dev_private; + + ret = sx_xlock_sig(&dev->mode_config.mutex); + if (ret != 0) + return (EINTR); + + sbuf_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL)); + + sbuf_printf(m, "DPIO_DIV_A: 0x%08x\n", + intel_dpio_read(dev_priv, _DPIO_DIV_A)); + sbuf_printf(m, "DPIO_DIV_B: 0x%08x\n", + intel_dpio_read(dev_priv, _DPIO_DIV_B)); + + sbuf_printf(m, "DPIO_REFSFR_A: 0x%08x\n", + intel_dpio_read(dev_priv, _DPIO_REFSFR_A)); + sbuf_printf(m, "DPIO_REFSFR_B: 0x%08x\n", + intel_dpio_read(dev_priv, _DPIO_REFSFR_B)); + + sbuf_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n", + intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A)); + sbuf_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n", + intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B)); + + sbuf_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n", + intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A)); + sbuf_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n", + intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B)); + + sbuf_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", + intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE)); + + sx_xunlock(&dev->mode_config.mutex); + + return 0; +} + static int i915_debug_set_wedged(SYSCTL_HANDLER_ARGS) { struct drm_device *dev; drm_i915_private_t *dev_priv; int error, wedged; dev = arg1; dev_priv = dev->dev_private; if (dev_priv == NULL) return (EBUSY); wedged = dev_priv->mm.wedged; error = sysctl_handle_int(oidp, &wedged, 0, req); if (error || !req->newptr) return (error); DRM_INFO("Manually setting wedged to %d\n", wedged); i915_handle_error(dev, wedged); return (error); } static int i915_max_freq(SYSCTL_HANDLER_ARGS) { struct drm_device *dev; drm_i915_private_t *dev_priv; int error, max_freq; dev = arg1; dev_priv = dev->dev_private; if (dev_priv == NULL) return (EBUSY); max_freq = dev_priv->max_delay * 50; error = sysctl_handle_int(oidp, &max_freq, 0, req); if (error || !req->newptr) return (error); DRM_DEBUG("Manually setting max freq to %d\n", max_freq); /* * Turbo will still be enabled, but won't go above the set value. */ dev_priv->max_delay = max_freq / 50; gen6_set_rps(dev, max_freq / 50); return (error); } static int i915_cache_sharing(SYSCTL_HANDLER_ARGS) { struct drm_device *dev; drm_i915_private_t *dev_priv; int error, snpcr, cache_sharing; dev = arg1; dev_priv = dev->dev_private; if (dev_priv == NULL) return (EBUSY); DRM_LOCK(dev); snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); DRM_UNLOCK(dev); cache_sharing = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; error = sysctl_handle_int(oidp, &cache_sharing, 0, req); if (error || !req->newptr) return (error); if (cache_sharing < 0 || cache_sharing > 3) return (EINVAL); DRM_DEBUG("Manually setting uncore sharing to %d\n", cache_sharing); DRM_LOCK(dev); /* Update the cache sharing policy here as well */ snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); snpcr &= ~GEN6_MBC_SNPCR_MASK; snpcr |= (cache_sharing << GEN6_MBC_SNPCR_SHIFT); I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); DRM_UNLOCK(dev); return (0); } +static int +i915_stop_rings(SYSCTL_HANDLER_ARGS) +{ + struct drm_device *dev; + drm_i915_private_t *dev_priv; + int error, val; + + dev = arg1; + dev_priv = dev->dev_private; + if (dev_priv == NULL) + return (EBUSY); + DRM_LOCK(dev); + val = dev_priv->stop_rings; + DRM_UNLOCK(dev); + error = sysctl_handle_int(oidp, &val, 0, req); + if (error || !req->newptr) + return (error); + DRM_DEBUG("Stopping rings 0x%08x\n", val); + + DRM_LOCK(dev); + dev_priv->stop_rings = val; + DRM_UNLOCK(dev); + return (0); +} + static struct i915_info_sysctl_list { const char *name; int (*ptr)(struct drm_device *dev, struct sbuf *m, void *data); + int (*ptr_w)(struct drm_device *dev, const char *str, void *data); int flags; void *data; } i915_info_sysctl_list[] = { - {"i915_capabilities", i915_capabilities, 0}, - {"i915_gem_objects", i915_gem_object_info, 0}, - {"i915_gem_gtt", i915_gem_gtt_info, 0}, - {"i915_gem_active", i915_gem_object_list_info, 0, (void *)ACTIVE_LIST}, - {"i915_gem_flushing", i915_gem_object_list_info, 0, + {"i915_capabilities", i915_capabilities, NULL, 0}, + {"i915_gem_objects", i915_gem_object_info, NULL, 0}, + {"i915_gem_gtt", i915_gem_gtt_info, NULL, 0}, + {"i915_gem_pinned", i915_gem_gtt_info, NULL, 0, (void *)PINNED_LIST}, + {"i915_gem_active", i915_gem_object_list_info, NULL, 0, + (void *)ACTIVE_LIST}, + {"i915_gem_flushing", i915_gem_object_list_info, NULL, 0, (void *)FLUSHING_LIST}, - {"i915_gem_inactive", i915_gem_object_list_info, 0, + {"i915_gem_inactive", i915_gem_object_list_info, NULL, 0, (void *)INACTIVE_LIST}, - {"i915_gem_pinned", i915_gem_object_list_info, 0, - (void *)PINNED_LIST}, - {"i915_gem_deferred_free", i915_gem_object_list_info, 0, - (void *)DEFERRED_FREE_LIST}, - {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, - {"i915_gem_request", i915_gem_request_info, 0}, - {"i915_gem_seqno", i915_gem_seqno_info, 0}, - {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, - {"i915_gem_interrupt", i915_interrupt_info, 0}, - {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, - {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, - {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, - {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS}, - {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS}, - {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS}, - {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS}, - {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS}, - {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS}, - {"i915_error_state", i915_error_state, 0}, - {"i915_rstdby_delays", i915_rstdby_delays, 0}, - {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, - {"i915_delayfreq_table", i915_delayfreq_table, 0}, - {"i915_inttoext_table", i915_inttoext_table, 0}, - {"i915_drpc_info", i915_drpc_info, 0}, - {"i915_emon_status", i915_emon_status, 0}, - {"i915_ring_freq_table", i915_ring_freq_table, 0}, - {"i915_gfxec", i915_gfxec, 0}, - {"i915_fbc_status", i915_fbc_status, 0}, - {"i915_sr_status", i915_sr_status, 0}, + {"i915_gem_pageflip", i915_gem_pageflip_info, NULL, 0}, + {"i915_gem_request", i915_gem_request_info, NULL, 0}, + {"i915_gem_seqno", i915_gem_seqno_info, NULL, 0}, + {"i915_gem_fence_regs", i915_gem_fence_regs_info, NULL, 0}, + {"i915_gem_interrupt", i915_interrupt_info, NULL, 0}, + {"i915_gem_hws", i915_hws_info, NULL, 0, (void *)RCS}, + {"i915_gem_hws_blt", i915_hws_info, NULL, 0, (void *)BCS}, + {"i915_gem_hws_bsd", i915_hws_info, NULL, 0, (void *)VCS}, + {"i915_error_state", i915_error_state, i915_error_state_w, 0}, + {"i915_rstdby_delays", i915_rstdby_delays, NULL, 0}, + {"i915_cur_delayinfo", i915_cur_delayinfo, NULL, 0}, + {"i915_delayfreq_table", i915_delayfreq_table, NULL, 0}, + {"i915_inttoext_table", i915_inttoext_table, NULL, 0}, + {"i915_drpc_info", i915_drpc_info, NULL, 0}, + {"i915_emon_status", i915_emon_status, NULL, 0}, + {"i915_ring_freq_table", i915_ring_freq_table, NULL, 0}, + {"i915_gfxec", i915_gfxec, NULL, 0}, + {"i915_fbc_status", i915_fbc_status, NULL, 0}, + {"i915_sr_status", i915_sr_status, NULL, 0}, #if 0 - {"i915_opregion", i915_opregion, 0}, + {"i915_opregion", i915_opregion, NULL, 0}, #endif - {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, - {"i915_context_status", i915_context_status, 0}, - {"i915_gen6_forcewake_count_info", i915_gen6_forcewake_count_info, 0}, - {"i915_swizzle_info", i915_swizzle_info, 0}, - {"i915_ppgtt_info", i915_ppgtt_info, 0}, + {"i915_gem_framebuffer", i915_gem_framebuffer_info, NULL, 0}, + {"i915_context_status", i915_context_status, NULL, 0}, + {"i915_gen6_forcewake_count_info", i915_gen6_forcewake_count_info, + NULL, 0}, + {"i915_swizzle_info", i915_swizzle_info, NULL, 0}, + {"i915_ppgtt_info", i915_ppgtt_info, NULL, 0}, + {"i915_dpio", i915_dpio_info, NULL, 0}, }; struct i915_info_sysctl_thunk { struct drm_device *dev; int idx; void *arg; }; static int i915_info_sysctl_handler(SYSCTL_HANDLER_ARGS) { struct sbuf m; struct i915_info_sysctl_thunk *thunk; struct drm_device *dev; drm_i915_private_t *dev_priv; + char *p; int error; thunk = arg1; dev = thunk->dev; dev_priv = dev->dev_private; if (dev_priv == NULL) return (EBUSY); error = sysctl_wire_old_buffer(req, 0); if (error != 0) return (error); sbuf_new_for_sysctl(&m, NULL, 128, req); error = i915_info_sysctl_list[thunk->idx].ptr(dev, &m, thunk->arg); if (error == 0) error = sbuf_finish(&m); sbuf_delete(&m); + if (error != 0 || req->newptr == NULL) + return (error); + if (req->newlen > 2048) + return (E2BIG); + p = malloc(req->newlen + 1, M_TEMP, M_WAITOK); + error = SYSCTL_IN(req, p, req->newlen); + if (error != 0) + goto out; + p[req->newlen] = '\0'; + error = i915_info_sysctl_list[thunk->idx].ptr_w(dev, p, + thunk->arg); +out: + free(p, M_TEMP); return (error); } extern int i915_gem_sync_exec_requests; extern int i915_fix_mi_batchbuffer_end; extern int i915_intr_pf; extern long i915_gem_wired_pages_cnt; int i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx, struct sysctl_oid *top) { struct sysctl_oid *oid, *info; struct i915_info_sysctl_thunk *thunks; int i, error; thunks = malloc(sizeof(*thunks) * DRM_ARRAY_SIZE(i915_info_sysctl_list), DRM_MEM_DRIVER, M_WAITOK | M_ZERO); for (i = 0; i < DRM_ARRAY_SIZE(i915_info_sysctl_list); i++) { thunks[i].dev = dev; thunks[i].idx = i; thunks[i].arg = i915_info_sysctl_list[i].data; } dev->sysctl_private = thunks; info = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "info", CTLFLAG_RW, NULL, NULL); if (info == NULL) return (ENOMEM); for (i = 0; i < DRM_ARRAY_SIZE(i915_info_sysctl_list); i++) { oid = SYSCTL_ADD_OID(ctx, SYSCTL_CHILDREN(info), OID_AUTO, - i915_info_sysctl_list[i].name, CTLTYPE_STRING | CTLFLAG_RD, + i915_info_sysctl_list[i].name, CTLTYPE_STRING | + (i915_info_sysctl_list[i].ptr_w != NULL ? CTLFLAG_RW : + CTLFLAG_RD), &thunks[i], 0, i915_info_sysctl_handler, "A", NULL); if (oid == NULL) return (ENOMEM); } oid = SYSCTL_ADD_LONG(ctx, SYSCTL_CHILDREN(info), OID_AUTO, "i915_gem_wired_pages", CTLFLAG_RD, &i915_gem_wired_pages_cnt, NULL); oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "wedged", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0, i915_debug_set_wedged, "I", NULL); if (oid == NULL) return (ENOMEM); oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "max_freq", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0, i915_max_freq, "I", NULL); if (oid == NULL) return (ENOMEM); oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "cache_sharing", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0, i915_cache_sharing, "I", NULL); + if (oid == NULL) + return (ENOMEM); + oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, + "stop_rings", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, + 0, i915_stop_rings, "I", NULL); if (oid == NULL) return (ENOMEM); oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "sync_exec", CTLFLAG_RW, &i915_gem_sync_exec_requests, 0, NULL); if (oid == NULL) return (ENOMEM); oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "fix_mi", CTLFLAG_RW, &i915_fix_mi_batchbuffer_end, 0, NULL); if (oid == NULL) return (ENOMEM); oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "intr_pf", CTLFLAG_RW, &i915_intr_pf, 0, NULL); if (oid == NULL) return (ENOMEM); error = drm_add_busid_modesetting(dev, ctx, top); if (error != 0) return (error); return (0); } void i915_sysctl_cleanup(struct drm_device *dev) { free(dev->sysctl_private, DRM_MEM_DRIVER); } Index: head/sys/dev/drm2/i915/i915_dma.c =================================================================== --- head/sys/dev/drm2/i915/i915_dma.c (revision 277486) +++ head/sys/dev/drm2/i915/i915_dma.c (revision 277487) @@ -1,2090 +1,1708 @@ /* i915_dma.c -- DMA support for the I915 -*- linux-c -*- */ /*- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include -static struct drm_i915_private *i915_mch_dev; -/* - * Lock protecting IPS related data structures - * - i915_mch_dev - * - dev_priv->max_delay - * - dev_priv->min_delay - * - dev_priv->fmax - * - dev_priv->gpu_busy - */ -static struct mtx mchdev_lock; -MTX_SYSINIT(mchdev, &mchdev_lock, "mchdev", MTX_DEF); +#define LP_RING(d) (&((struct drm_i915_private *)(d))->rings[RCS]) -static void i915_pineview_get_mem_freq(struct drm_device *dev); -static void i915_ironlake_get_mem_freq(struct drm_device *dev); +#define BEGIN_LP_RING(n) \ + intel_ring_begin(LP_RING(dev_priv), (n)) + +#define OUT_RING(x) \ + intel_ring_emit(LP_RING(dev_priv), x) + +#define ADVANCE_LP_RING() \ + intel_ring_advance(LP_RING(dev_priv)) + +#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ + if (LP_RING(dev->dev_private)->obj == NULL) \ + LOCK_TEST_WITH_RETURN(dev, file); \ +} while (0) + +static inline u32 +intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg) +{ + if (I915_NEED_GFX_HWS(dev_priv->dev)) + return ((volatile u32*)(dev_priv->dri1.gfx_hws_cpu_addr))[reg]; + else + return intel_read_status_page(LP_RING(dev_priv), reg); +} + +#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg) +#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) +#define I915_BREADCRUMB_INDEX 0x21 + static int i915_driver_unload_int(struct drm_device *dev, bool locked); +void i915_update_dri1_breadcrumb(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; +#if 0 + struct drm_i915_master_private *master_priv; + + if (dev->primary->master) { + master_priv = dev->primary->master->driver_priv; + if (master_priv->sarea_priv) + master_priv->sarea_priv->last_dispatch = + READ_BREADCRUMB(dev_priv); + } +#else + if (dev_priv->sarea_priv) + dev_priv->sarea_priv->last_dispatch = + READ_BREADCRUMB(dev_priv); +#endif +} + static void i915_write_hws_pga(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; u32 addr; addr = dev_priv->status_page_dmah->busaddr; if (INTEL_INFO(dev)->gen >= 4) addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; I915_WRITE(HWS_PGA, addr); } /** * Sets up the hardware status page for devices that need a physical address * in the register. */ static int i915_init_phys_hws(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = LP_RING(dev_priv); /* * Program Hardware Status Page * XXXKIB Keep 4GB limit for allocation for now. This method * of allocation is used on <= 965 hardware, that has several * erratas regarding the use of physical memory > 4 GB. */ DRM_UNLOCK(dev); dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); DRM_LOCK(dev); if (!dev_priv->status_page_dmah) { DRM_ERROR("Can not allocate hardware status page\n"); return -ENOMEM; } ring->status_page.page_addr = dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; memset(dev_priv->hw_status_page, 0, PAGE_SIZE); i915_write_hws_pga(dev); DRM_DEBUG("Enabled hardware status page, phys %jx\n", (uintmax_t)dev_priv->dma_status_page); return 0; } /** * Frees the hardware status page, whether it's a physical address or a virtual * address set up by the X Server. */ static void i915_free_hws(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = LP_RING(dev_priv); if (dev_priv->status_page_dmah) { drm_pci_free(dev, dev_priv->status_page_dmah); dev_priv->status_page_dmah = NULL; } if (dev_priv->status_gfx_addr) { dev_priv->status_gfx_addr = 0; ring->status_page.gfx_addr = 0; - drm_core_ioremapfree(&dev_priv->hws_map, dev); + pmap_unmapdev((vm_offset_t)dev_priv->dri1.gfx_hws_cpu_addr, + PAGE_SIZE); } /* Need to rewrite hardware status page */ I915_WRITE(HWS_PGA, 0x1ffff000); } void i915_kernel_lost_context(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = LP_RING(dev_priv); /* * We should never lose context on the ring with modesetting * as we don't expose it to userspace */ if (drm_core_check_feature(dev, DRIVER_MODESET)) return; ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) ring->space += ring->size; #if 1 KIB_NOTYET(); #else if (!dev->primary->master) return; #endif if (ring->head == ring->tail && dev_priv->sarea_priv) dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; } static int i915_dma_cleanup(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; int i; /* Make sure interrupts are disabled here because the uninstall ioctl * may not have been called from userspace and after dev_private * is freed, it's too late. */ if (dev->irq_enabled) drm_irq_uninstall(dev); for (i = 0; i < I915_NUM_RINGS; i++) intel_cleanup_ring_buffer(&dev_priv->rings[i]); /* Clear the HWS virtual address at teardown */ if (I915_NEED_GFX_HWS(dev)) i915_free_hws(dev); return 0; } static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) { drm_i915_private_t *dev_priv = dev->dev_private; int ret; dev_priv->sarea = drm_getsarea(dev); if (!dev_priv->sarea) { DRM_ERROR("can not find sarea!\n"); i915_dma_cleanup(dev); return -EINVAL; } dev_priv->sarea_priv = (drm_i915_sarea_t *) ((u8 *) dev_priv->sarea->virtual + init->sarea_priv_offset); if (init->ring_size != 0) { if (LP_RING(dev_priv)->obj != NULL) { i915_dma_cleanup(dev); DRM_ERROR("Client tried to initialize ringbuffer in " "GEM mode\n"); return -EINVAL; } ret = intel_render_ring_init_dri(dev, init->ring_start, init->ring_size); if (ret) { i915_dma_cleanup(dev); return ret; } } dev_priv->cpp = init->cpp; dev_priv->back_offset = init->back_offset; dev_priv->front_offset = init->front_offset; dev_priv->current_page = 0; dev_priv->sarea_priv->pf_current_page = 0; /* Allow hardware batchbuffers unless told otherwise. */ - dev_priv->allow_batchbuffer = 1; + dev_priv->dri1.allow_batchbuffer = 1; return 0; } static int i915_dma_resume(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; struct intel_ring_buffer *ring = LP_RING(dev_priv); DRM_DEBUG("\n"); - if (ring->map.handle == NULL) { + if (ring->virtual_start == NULL) { DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); return -ENOMEM; } /* Program Hardware Status Page */ if (!ring->status_page.page_addr) { DRM_ERROR("Can not find hardware status page\n"); return -EINVAL; } DRM_DEBUG("hw status page @ %p\n", ring->status_page.page_addr); if (ring->status_page.gfx_addr != 0) intel_ring_setup_status_page(ring); else i915_write_hws_pga(dev); DRM_DEBUG("Enabled hardware status page\n"); return 0; } static int i915_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_init_t *init = data; int retcode = 0; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + switch (init->func) { case I915_INIT_DMA: retcode = i915_initialize(dev, init); break; case I915_CLEANUP_DMA: retcode = i915_dma_cleanup(dev); break; case I915_RESUME_DMA: retcode = i915_dma_resume(dev); break; default: retcode = -EINVAL; break; } return retcode; } /* Implement basically the same security restrictions as hardware does * for MI_BATCH_NON_SECURE. These can be made stricter at any time. * * Most of the calculations below involve calculating the size of a * particular instruction. It's important to get the size right as * that tells us where the next instruction to check is. Any illegal * instruction detected will be given a size of zero, which is a * signal to abort the rest of the buffer. */ static int do_validate_cmd(int cmd) { switch (((cmd >> 29) & 0x7)) { case 0x0: switch ((cmd >> 23) & 0x3f) { case 0x0: return 1; /* MI_NOOP */ case 0x4: return 1; /* MI_FLUSH */ default: return 0; /* disallow everything else */ } break; case 0x1: return 0; /* reserved */ case 0x2: return (cmd & 0xff) + 2; /* 2d commands */ case 0x3: if (((cmd >> 24) & 0x1f) <= 0x18) return 1; switch ((cmd >> 24) & 0x1f) { case 0x1c: return 1; case 0x1d: switch ((cmd >> 16) & 0xff) { case 0x3: return (cmd & 0x1f) + 2; case 0x4: return (cmd & 0xf) + 2; default: return (cmd & 0xffff) + 2; } case 0x1e: if (cmd & (1 << 23)) return (cmd & 0xffff) + 1; else return 1; case 0x1f: if ((cmd & (1 << 23)) == 0) /* inline vertices */ return (cmd & 0x1ffff) + 2; else if (cmd & (1 << 17)) /* indirect random */ if ((cmd & 0xffff) == 0) return 0; /* unknown length, too hard */ else return (((cmd & 0xffff) + 1) / 2) + 1; else return 2; /* indirect sequential */ default: return 0; } default: return 0; } return 0; } static int validate_cmd(int cmd) { int ret = do_validate_cmd(cmd); /* printk("validate_cmd( %x ): %d\n", cmd, ret); */ return ret; } static int i915_emit_cmds(struct drm_device *dev, int __user *buffer, int dwords) { drm_i915_private_t *dev_priv = dev->dev_private; int i; if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) return -EINVAL; BEGIN_LP_RING((dwords+1)&~1); for (i = 0; i < dwords;) { int cmd, sz; if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) return -EINVAL; if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) return -EINVAL; OUT_RING(cmd); while (++i, --sz) { if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) { return -EINVAL; } OUT_RING(cmd); } } if (dwords & 1) OUT_RING(0); ADVANCE_LP_RING(); return 0; } int i915_emit_box(struct drm_device * dev, struct drm_clip_rect *boxes, int i, int DR1, int DR4) { struct drm_clip_rect box; if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) { return -EFAULT; } return (i915_emit_box_p(dev, &box, DR1, DR4)); } int i915_emit_box_p(struct drm_device *dev, struct drm_clip_rect *box, int DR1, int DR4) { drm_i915_private_t *dev_priv = dev->dev_private; int ret; if (box->y2 <= box->y1 || box->x2 <= box->x1 || box->y2 <= 0 || box->x2 <= 0) { DRM_ERROR("Bad box %d,%d..%d,%d\n", box->x1, box->y1, box->x2, box->y2); return -EINVAL; } if (INTEL_INFO(dev)->gen >= 4) { ret = BEGIN_LP_RING(4); if (ret != 0) return (ret); OUT_RING(GFX_OP_DRAWRECT_INFO_I965); OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); OUT_RING(DR4); } else { ret = BEGIN_LP_RING(6); if (ret != 0) return (ret); OUT_RING(GFX_OP_DRAWRECT_INFO); OUT_RING(DR1); OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); OUT_RING(DR4); OUT_RING(0); } ADVANCE_LP_RING(); return 0; } /* XXX: Emitting the counter should really be moved to part of the IRQ * emit. For now, do it in both places: */ static void i915_emit_breadcrumb(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; if (++dev_priv->counter > 0x7FFFFFFFUL) dev_priv->counter = 0; if (dev_priv->sarea_priv) dev_priv->sarea_priv->last_enqueue = dev_priv->counter; if (BEGIN_LP_RING(4) == 0) { OUT_RING(MI_STORE_DWORD_INDEX); OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); OUT_RING(dev_priv->counter); OUT_RING(0); ADVANCE_LP_RING(); } } static int i915_dispatch_cmdbuffer(struct drm_device * dev, drm_i915_cmdbuffer_t * cmd, struct drm_clip_rect *cliprects, void *cmdbuf) { int nbox = cmd->num_cliprects; int i = 0, count, ret; if (cmd->sz & 0x3) { DRM_ERROR("alignment\n"); return -EINVAL; } i915_kernel_lost_context(dev); count = nbox ? nbox : 1; for (i = 0; i < count; i++) { if (i < nbox) { ret = i915_emit_box_p(dev, &cmd->cliprects[i], cmd->DR1, cmd->DR4); if (ret) return ret; } ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); if (ret) return ret; } i915_emit_breadcrumb(dev); return 0; } static int i915_dispatch_batchbuffer(struct drm_device * dev, drm_i915_batchbuffer_t * batch, struct drm_clip_rect *cliprects) { drm_i915_private_t *dev_priv = dev->dev_private; int nbox = batch->num_cliprects; int i, count, ret; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + if ((batch->start | batch->used) & 0x7) { DRM_ERROR("alignment\n"); return -EINVAL; } i915_kernel_lost_context(dev); count = nbox ? nbox : 1; for (i = 0; i < count; i++) { if (i < nbox) { int ret = i915_emit_box_p(dev, &cliprects[i], batch->DR1, batch->DR4); if (ret) return ret; } if (!IS_I830(dev) && !IS_845G(dev)) { ret = BEGIN_LP_RING(2); if (ret != 0) return (ret); if (INTEL_INFO(dev)->gen >= 4) { OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); OUT_RING(batch->start); } else { OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); OUT_RING(batch->start | MI_BATCH_NON_SECURE); } } else { ret = BEGIN_LP_RING(4); if (ret != 0) return (ret); OUT_RING(MI_BATCH_BUFFER); OUT_RING(batch->start | MI_BATCH_NON_SECURE); OUT_RING(batch->start + batch->used - 4); OUT_RING(0); } ADVANCE_LP_RING(); } i915_emit_breadcrumb(dev); return 0; } static int i915_dispatch_flip(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; int ret; if (!dev_priv->sarea_priv) return -EINVAL; DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", __func__, dev_priv->current_page, dev_priv->sarea_priv->pf_current_page); i915_kernel_lost_context(dev); ret = BEGIN_LP_RING(10); if (ret) return ret; OUT_RING(MI_FLUSH | MI_READ_FLUSH); OUT_RING(0); OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); OUT_RING(0); if (dev_priv->current_page == 0) { OUT_RING(dev_priv->back_offset); dev_priv->current_page = 1; } else { OUT_RING(dev_priv->front_offset); dev_priv->current_page = 0; } OUT_RING(0); OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); OUT_RING(0); ADVANCE_LP_RING(); if (++dev_priv->counter > 0x7FFFFFFFUL) dev_priv->counter = 0; if (dev_priv->sarea_priv) dev_priv->sarea_priv->last_enqueue = dev_priv->counter; if (BEGIN_LP_RING(4) == 0) { OUT_RING(MI_STORE_DWORD_INDEX); OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); OUT_RING(dev_priv->counter); OUT_RING(0); ADVANCE_LP_RING(); } dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; return 0; } static int i915_quiescent(struct drm_device *dev) { struct intel_ring_buffer *ring = LP_RING(dev->dev_private); i915_kernel_lost_context(dev); return (intel_wait_ring_idle(ring)); } static int i915_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { int ret; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + RING_LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_LOCK(dev); ret = i915_quiescent(dev); DRM_UNLOCK(dev); return (ret); } int i915_batchbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_sarea_t *sarea_priv; drm_i915_batchbuffer_t *batch = data; struct drm_clip_rect *cliprects; size_t cliplen; int ret; - if (!dev_priv->allow_batchbuffer) { + if (!dev_priv->dri1.allow_batchbuffer) { DRM_ERROR("Batchbuffer ioctl disabled\n"); return -EINVAL; } DRM_UNLOCK(dev); DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n", batch->start, batch->used, batch->num_cliprects); cliplen = batch->num_cliprects * sizeof(struct drm_clip_rect); if (batch->num_cliprects < 0) return -EFAULT; if (batch->num_cliprects != 0) { cliprects = malloc(batch->num_cliprects * sizeof(struct drm_clip_rect), DRM_MEM_DMA, M_WAITOK | M_ZERO); ret = -copyin(batch->cliprects, cliprects, batch->num_cliprects * sizeof(struct drm_clip_rect)); if (ret != 0) { DRM_LOCK(dev); goto fail_free; } } else cliprects = NULL; DRM_LOCK(dev); RING_LOCK_TEST_WITH_RETURN(dev, file_priv); ret = i915_dispatch_batchbuffer(dev, batch, cliprects); sarea_priv = (drm_i915_sarea_t *)dev_priv->sarea_priv; if (sarea_priv) sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); fail_free: free(cliprects, DRM_MEM_DMA); return ret; } int i915_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_sarea_t *sarea_priv; drm_i915_cmdbuffer_t *cmdbuf = data; struct drm_clip_rect *cliprects = NULL; void *batch_data; int ret; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); if (cmdbuf->num_cliprects < 0) return -EINVAL; DRM_UNLOCK(dev); batch_data = malloc(cmdbuf->sz, DRM_MEM_DMA, M_WAITOK); ret = -copyin(cmdbuf->buf, batch_data, cmdbuf->sz); if (ret != 0) { DRM_LOCK(dev); goto fail_batch_free; } if (cmdbuf->num_cliprects) { cliprects = malloc(cmdbuf->num_cliprects * sizeof(struct drm_clip_rect), DRM_MEM_DMA, M_WAITOK | M_ZERO); ret = -copyin(cmdbuf->cliprects, cliprects, cmdbuf->num_cliprects * sizeof(struct drm_clip_rect)); if (ret != 0) { DRM_LOCK(dev); goto fail_clip_free; } } DRM_LOCK(dev); RING_LOCK_TEST_WITH_RETURN(dev, file_priv); ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); if (ret) { DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); goto fail_clip_free; } sarea_priv = (drm_i915_sarea_t *)dev_priv->sarea_priv; if (sarea_priv) sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); fail_clip_free: free(cliprects, DRM_MEM_DMA); fail_batch_free: free(batch_data, DRM_MEM_DMA); return ret; } +static int i915_emit_irq(struct drm_device * dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; +#if 0 + struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; +#endif + + i915_kernel_lost_context(dev); + + DRM_DEBUG("i915: emit_irq\n"); + + dev_priv->counter++; + if (dev_priv->counter > 0x7FFFFFFFUL) + dev_priv->counter = 1; +#if 0 + if (master_priv->sarea_priv) + master_priv->sarea_priv->last_enqueue = dev_priv->counter; +#else + if (dev_priv->sarea_priv) + dev_priv->sarea_priv->last_enqueue = dev_priv->counter; +#endif + + if (BEGIN_LP_RING(4) == 0) { + OUT_RING(MI_STORE_DWORD_INDEX); + OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + OUT_RING(dev_priv->counter); + OUT_RING(MI_USER_INTERRUPT); + ADVANCE_LP_RING(); + } + + return dev_priv->counter; +} + +static int i915_wait_irq(struct drm_device * dev, int irq_nr) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; +#if 0 + struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; +#endif + int ret; + struct intel_ring_buffer *ring = LP_RING(dev_priv); + + DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, + READ_BREADCRUMB(dev_priv)); + +#if 0 + if (READ_BREADCRUMB(dev_priv) >= irq_nr) { + if (master_priv->sarea_priv) + master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); + return 0; + } + + if (master_priv->sarea_priv) + master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; +#else + if (READ_BREADCRUMB(dev_priv) >= irq_nr) { + if (dev_priv->sarea_priv) { + dev_priv->sarea_priv->last_dispatch = + READ_BREADCRUMB(dev_priv); + } + return 0; + } + + if (dev_priv->sarea_priv) + dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; +#endif + + ret = 0; + mtx_lock(&dev_priv->irq_lock); + if (ring->irq_get(ring)) { + DRM_UNLOCK(dev); + while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) { + ret = -msleep(ring, &dev_priv->irq_lock, PCATCH, + "915wtq", 3 * hz); + } + ring->irq_put(ring); + mtx_unlock(&dev_priv->irq_lock); + DRM_LOCK(dev); + } else { + mtx_unlock(&dev_priv->irq_lock); + if (_intel_wait_for(dev, READ_BREADCRUMB(dev_priv) >= irq_nr, + 3000, 1, "915wir")) + ret = -EBUSY; + } + + if (ret == -EBUSY) { + DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", + READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); + } + + return ret; +} + +/* Needs the lock as it touches the ring. + */ +int i915_irq_emit(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_irq_emit_t *emit = data; + int result; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + + if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + + RING_LOCK_TEST_WITH_RETURN(dev, file_priv); + + DRM_LOCK(dev); + result = i915_emit_irq(dev); + DRM_UNLOCK(dev); + + if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { + DRM_ERROR("copy_to_user\n"); + return -EFAULT; + } + + return 0; +} + +/* Doesn't need the hardware lock. + */ +static int i915_irq_wait(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_irq_wait_t *irqwait = data; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + + if (!dev_priv) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + + return i915_wait_irq(dev, irqwait->irq_seq); +} + +static int i915_vblank_pipe_get(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_vblank_pipe_t *pipe = data; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + + if (!dev_priv) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + + pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; + + return 0; +} + +/** + * Schedule buffer swap at given vertical blank. + */ +static int i915_vblank_swap(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + /* The delayed swap mechanism was fundamentally racy, and has been + * removed. The model was that the client requested a delayed flip/swap + * from the kernel, then waited for vblank before continuing to perform + * rendering. The problem was that the kernel might wake the client + * up before it dispatched the vblank swap (since the lock has to be + * held while touching the ringbuffer), in which case the client would + * clear and start the next frame before the swap occurred, and + * flicker would occur in addition to likely missing the vblank. + * + * In the absence of this ioctl, userland falls back to a correct path + * of waiting for a vblank, then dispatching the swap on its own. + * Context switching to userland and back is plenty fast enough for + * meeting the requirements of vblank swapping. + */ + return -EINVAL; +} + static int i915_flip_bufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { int ret; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + DRM_DEBUG("%s\n", __func__); RING_LOCK_TEST_WITH_RETURN(dev, file_priv); ret = i915_dispatch_flip(dev); return ret; } int i915_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_getparam_t *param = data; int value; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } switch (param->param) { case I915_PARAM_IRQ_ACTIVE: value = dev->irq_enabled ? 1 : 0; break; case I915_PARAM_ALLOW_BATCHBUFFER: - value = dev_priv->allow_batchbuffer ? 1 : 0; + value = dev_priv->dri1.allow_batchbuffer ? 1 : 0; break; case I915_PARAM_LAST_DISPATCH: value = READ_BREADCRUMB(dev_priv); break; case I915_PARAM_CHIPSET_ID: value = dev->pci_device; break; case I915_PARAM_HAS_GEM: value = 1; break; case I915_PARAM_NUM_FENCES_AVAIL: value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; break; case I915_PARAM_HAS_OVERLAY: value = dev_priv->overlay ? 1 : 0; break; case I915_PARAM_HAS_PAGEFLIPPING: value = 1; break; case I915_PARAM_HAS_EXECBUF2: value = 1; break; case I915_PARAM_HAS_BSD: - value = HAS_BSD(dev); + value = intel_ring_initialized(&dev_priv->rings[VCS]); break; case I915_PARAM_HAS_BLT: - value = HAS_BLT(dev); + value = intel_ring_initialized(&dev_priv->rings[BCS]); break; case I915_PARAM_HAS_RELAXED_FENCING: value = 1; break; case I915_PARAM_HAS_COHERENT_RINGS: value = 1; break; case I915_PARAM_HAS_EXEC_CONSTANTS: value = INTEL_INFO(dev)->gen >= 4; break; case I915_PARAM_HAS_RELAXED_DELTA: value = 1; break; case I915_PARAM_HAS_GEN7_SOL_RESET: value = 1; break; case I915_PARAM_HAS_LLC: value = HAS_LLC(dev); break; + case I915_PARAM_HAS_ALIASING_PPGTT: + value = dev_priv->mm.aliasing_ppgtt ? 1 : 0; + break; default: DRM_DEBUG_DRIVER("Unknown parameter %d\n", param->param); return -EINVAL; } if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { DRM_ERROR("DRM_COPY_TO_USER failed\n"); return -EFAULT; } return 0; } static int i915_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_setparam_t *param = data; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } switch (param->param) { case I915_SETPARAM_USE_MI_BATCHBUFFER_START: break; case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: - dev_priv->tex_lru_log_granularity = param->value; break; case I915_SETPARAM_ALLOW_BATCHBUFFER: - dev_priv->allow_batchbuffer = param->value; + dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0; break; case I915_SETPARAM_NUM_USED_FENCES: if (param->value > dev_priv->num_fence_regs || param->value < 0) return -EINVAL; /* Userspace can use first N regs */ dev_priv->fence_reg_start = param->value; break; default: DRM_DEBUG("unknown parameter %d\n", param->param); return -EINVAL; } return 0; } static int i915_set_status_page(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_hws_addr_t *hws = data; struct intel_ring_buffer *ring = LP_RING(dev_priv); + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + if (!I915_NEED_GFX_HWS(dev)) return -EINVAL; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr); if (drm_core_check_feature(dev, DRIVER_MODESET)) { DRM_ERROR("tried to set status page when mode setting active\n"); return 0; } ring->status_page.gfx_addr = dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); - dev_priv->hws_map.offset = dev->agp->base + hws->addr; - dev_priv->hws_map.size = 4*1024; - dev_priv->hws_map.type = 0; - dev_priv->hws_map.flags = 0; - dev_priv->hws_map.mtrr = 0; - - drm_core_ioremap_wc(&dev_priv->hws_map, dev); - if (dev_priv->hws_map.virtual == NULL) { + dev_priv->dri1.gfx_hws_cpu_addr = pmap_mapdev_attr( + dev->agp->base + hws->addr, PAGE_SIZE, + VM_MEMATTR_WRITE_COMBINING); + if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) { i915_dma_cleanup(dev); ring->status_page.gfx_addr = dev_priv->status_gfx_addr = 0; DRM_ERROR("can not ioremap virtual address for" " G33 hw status page\n"); return -ENOMEM; } - ring->status_page.page_addr = dev_priv->hw_status_page = - dev_priv->hws_map.virtual; - memset(dev_priv->hw_status_page, 0, PAGE_SIZE); + memset(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE); I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n", dev_priv->status_gfx_addr); DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page); return 0; } -static bool -intel_enable_ppgtt(struct drm_device *dev) -{ - if (i915_enable_ppgtt >= 0) - return i915_enable_ppgtt; - - /* Disable ppgtt on SNB if VT-d is on. */ - if (INTEL_INFO(dev)->gen == 6 && intel_iommu_enabled) - return false; - - return true; -} - static int -i915_load_gem_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long prealloc_size, gtt_size, mappable_size; - int ret; - - prealloc_size = dev_priv->mm.gtt.stolen_size; - gtt_size = dev_priv->mm.gtt.gtt_total_entries << PAGE_SHIFT; - mappable_size = dev_priv->mm.gtt.gtt_mappable_entries << PAGE_SHIFT; - - /* Basic memrange allocator for stolen space */ - drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size); - - DRM_LOCK(dev); - if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { - /* PPGTT pdes are stolen from global gtt ptes, so shrink the - * aperture accordingly when using aliasing ppgtt. */ - gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; - /* For paranoia keep the guard page in between. */ - gtt_size -= PAGE_SIZE; - - i915_gem_do_init(dev, 0, mappable_size, gtt_size); - - ret = i915_gem_init_aliasing_ppgtt(dev); - if (ret) { - DRM_UNLOCK(dev); - return ret; - } - } else { - /* Let GEM Manage all of the aperture. - * - * However, leave one page at the end still bound to the scratch - * page. There are a number of places where the hardware - * apparently prefetches past the end of the object, and we've - * seen multiple hangs with the GPU head pointer stuck in a - * batchbuffer bound at the last page of the aperture. One page - * should be enough to keep any prefetching inside of the - * aperture. - */ - i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE); - } - - ret = i915_gem_init_hw(dev); - DRM_UNLOCK(dev); - if (ret != 0) { - i915_gem_cleanup_aliasing_ppgtt(dev); - return (ret); - } - -#if 0 - /* Try to set up FBC with a reasonable compressed buffer size */ - if (I915_HAS_FBC(dev) && i915_powersave) { - int cfb_size; - - /* Leave 1M for line length buffer & misc. */ - - /* Try to get a 32M buffer... */ - if (prealloc_size > (36*1024*1024)) - cfb_size = 32*1024*1024; - else /* fall back to 7/8 of the stolen space */ - cfb_size = prealloc_size * 7 / 8; - i915_setup_compression(dev, cfb_size); - } -#endif - - /* Allow hardware batchbuffers unless told otherwise. */ - dev_priv->allow_batchbuffer = 1; - return 0; -} - -static int i915_load_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; ret = intel_parse_bios(dev); if (ret) DRM_INFO("failed to find VBIOS tables\n"); #if 0 intel_register_dsm_handler(); #endif - /* IIR "flip pending" bit means done if this bit is set */ - if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE)) - dev_priv->flip_pending_is_done = true; + /* Initialise stolen first so that we may reserve preallocated + * objects for the BIOS to KMS transition. + */ + ret = i915_gem_init_stolen(dev); + if (ret) + goto cleanup_vga_switcheroo; intel_modeset_init(dev); - ret = i915_load_gem_init(dev); + ret = i915_gem_init(dev); if (ret != 0) - goto cleanup_gem; + goto cleanup_gem_stolen; intel_modeset_gem_init(dev); ret = drm_irq_install(dev); if (ret) goto cleanup_gem; dev->vblank_disable_allowed = 1; ret = intel_fbdev_init(dev); if (ret) goto cleanup_gem; drm_kms_helper_poll_init(dev); /* We're off and running w/KMS */ dev_priv->mm.suspended = 0; return (0); cleanup_gem: DRM_LOCK(dev); i915_gem_cleanup_ringbuffer(dev); DRM_UNLOCK(dev); i915_gem_cleanup_aliasing_ppgtt(dev); +cleanup_gem_stolen: + i915_gem_cleanup_stolen(dev); +cleanup_vga_switcheroo: return (ret); } static int i915_get_bridge_dev(struct drm_device *dev) { struct drm_i915_private *dev_priv; dev_priv = dev->dev_private; dev_priv->bridge_dev = intel_gtt_get_bridge_device(); if (dev_priv->bridge_dev == NULL) { DRM_ERROR("bridge device not found\n"); return (-1); } return (0); } #define MCHBAR_I915 0x44 #define MCHBAR_I965 0x48 #define MCHBAR_SIZE (4*4096) #define DEVEN_REG 0x54 #define DEVEN_MCHBAR_EN (1 << 28) /* Allocate space for the MCH regs if needed, return nonzero on error */ static int intel_alloc_mchbar_resource(struct drm_device *dev) { drm_i915_private_t *dev_priv; device_t vga; int reg; u32 temp_lo, temp_hi; u64 mchbar_addr, temp; dev_priv = dev->dev_private; reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; if (INTEL_INFO(dev)->gen >= 4) temp_hi = pci_read_config(dev_priv->bridge_dev, reg + 4, 4); else temp_hi = 0; temp_lo = pci_read_config(dev_priv->bridge_dev, reg, 4); mchbar_addr = ((u64)temp_hi << 32) | temp_lo; /* If ACPI doesn't have it, assume we need to allocate it ourselves */ #ifdef XXX_CONFIG_PNP if (mchbar_addr && pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) return 0; #endif /* Get some space for it */ vga = device_get_parent(dev->device); dev_priv->mch_res_rid = 0x100; dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga), dev->device, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL, MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE); if (dev_priv->mch_res == NULL) { DRM_ERROR("failed mchbar resource alloc\n"); return (-ENOMEM); } if (INTEL_INFO(dev)->gen >= 4) { temp = rman_get_start(dev_priv->mch_res); temp >>= 32; pci_write_config(dev_priv->bridge_dev, reg + 4, temp, 4); } pci_write_config(dev_priv->bridge_dev, reg, rman_get_start(dev_priv->mch_res) & UINT32_MAX, 4); return (0); } static void intel_setup_mchbar(struct drm_device *dev) { drm_i915_private_t *dev_priv; int mchbar_reg; u32 temp; bool enabled; dev_priv = dev->dev_private; mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; dev_priv->mchbar_need_disable = false; if (IS_I915G(dev) || IS_I915GM(dev)) { temp = pci_read_config(dev_priv->bridge_dev, DEVEN_REG, 4); enabled = (temp & DEVEN_MCHBAR_EN) != 0; } else { temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4); enabled = temp & 1; } /* If it's already enabled, don't have to do anything */ if (enabled) { DRM_DEBUG("mchbar already enabled\n"); return; } if (intel_alloc_mchbar_resource(dev)) return; dev_priv->mchbar_need_disable = true; /* Space is allocated or reserved, so enable it. */ if (IS_I915G(dev) || IS_I915GM(dev)) { pci_write_config(dev_priv->bridge_dev, DEVEN_REG, temp | DEVEN_MCHBAR_EN, 4); } else { temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4); pci_write_config(dev_priv->bridge_dev, mchbar_reg, temp | 1, 4); } } static void intel_teardown_mchbar(struct drm_device *dev) { drm_i915_private_t *dev_priv; device_t vga; int mchbar_reg; u32 temp; dev_priv = dev->dev_private; mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; if (dev_priv->mchbar_need_disable) { if (IS_I915G(dev) || IS_I915GM(dev)) { temp = pci_read_config(dev_priv->bridge_dev, DEVEN_REG, 4); temp &= ~DEVEN_MCHBAR_EN; pci_write_config(dev_priv->bridge_dev, DEVEN_REG, temp, 4); } else { temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4); temp &= ~1; pci_write_config(dev_priv->bridge_dev, mchbar_reg, temp, 4); } } if (dev_priv->mch_res != NULL) { vga = device_get_parent(dev->device); BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->device, SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res); BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->device, SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res); dev_priv->mch_res = NULL; } } int i915_driver_load(struct drm_device *dev, unsigned long flags) { struct drm_i915_private *dev_priv = dev->dev_private; + const struct intel_device_info *info; unsigned long base, size; int mmio_bar, ret; + info = i915_get_device_id(dev->pci_device); + + /* Refuse to load on gen6+ without kms enabled. */ + if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + + ret = 0; /* i915 has 4 more counters */ dev->counters += 4; dev->types[6] = _DRM_STAT_IRQ; dev->types[7] = _DRM_STAT_PRIMARY; dev->types[8] = _DRM_STAT_SECONDARY; dev->types[9] = _DRM_STAT_DMA; dev_priv = malloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER, M_ZERO | M_WAITOK); - if (dev_priv == NULL) - return -ENOMEM; dev->dev_private = (void *)dev_priv; dev_priv->dev = dev; - dev_priv->info = i915_get_device_id(dev->pci_device); + dev_priv->info = info; if (i915_get_bridge_dev(dev)) { free(dev_priv, DRM_MEM_DRIVER); return (-EIO); } dev_priv->mm.gtt = intel_gtt_get(); /* Add register map (needed for suspend/resume) */ mmio_bar = IS_GEN2(dev) ? 1 : 0; base = drm_get_resource_start(dev, mmio_bar); size = drm_get_resource_len(dev, mmio_bar); ret = drm_addmap(dev, base, size, _DRM_REGISTERS, _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map); dev_priv->tq = taskqueue_create("915", M_WAITOK, taskqueue_thread_enqueue, &dev_priv->tq); taskqueue_start_threads(&dev_priv->tq, 1, PWAIT, "i915 taskq"); mtx_init(&dev_priv->gt_lock, "915gt", NULL, MTX_DEF); mtx_init(&dev_priv->error_lock, "915err", NULL, MTX_DEF); mtx_init(&dev_priv->error_completion_lock, "915cmp", NULL, MTX_DEF); mtx_init(&dev_priv->rps_lock, "915rps", NULL, MTX_DEF); + mtx_init(&dev_priv->dpio_lock, "915dpi", NULL, MTX_DEF); - dev_priv->has_gem = 1; intel_irq_init(dev); intel_setup_mchbar(dev); intel_setup_gmbus(dev); intel_opregion_setup(dev); intel_setup_bios(dev); i915_gem_load(dev); /* Init HWS */ if (!I915_NEED_GFX_HWS(dev)) { ret = i915_init_phys_hws(dev); if (ret != 0) { drm_rmmap(dev, dev_priv->mmio_map); drm_free(dev_priv, sizeof(struct drm_i915_private), DRM_MEM_DRIVER); return ret; } } - if (IS_PINEVIEW(dev)) - i915_pineview_get_mem_freq(dev); - else if (IS_GEN5(dev)) - i915_ironlake_get_mem_freq(dev); - mtx_init(&dev_priv->irq_lock, "userirq", NULL, MTX_DEF); - if (IS_IVYBRIDGE(dev)) + if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) dev_priv->num_pipe = 3; else if (IS_MOBILE(dev) || !IS_GEN2(dev)) dev_priv->num_pipe = 2; else dev_priv->num_pipe = 1; ret = drm_vblank_init(dev, dev_priv->num_pipe); if (ret) goto out_gem_unload; /* Start out suspended */ dev_priv->mm.suspended = 1; intel_detect_pch(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { DRM_UNLOCK(dev); ret = i915_load_modeset_init(dev); DRM_LOCK(dev); if (ret < 0) { DRM_ERROR("failed to init modeset\n"); goto out_gem_unload; } } intel_opregion_init(dev); callout_init(&dev_priv->hangcheck_timer, 1); callout_reset(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD, i915_hangcheck_elapsed, dev); - if (IS_GEN5(dev)) { - mtx_lock(&mchdev_lock); - i915_mch_dev = dev_priv; - dev_priv->mchdev_lock = &mchdev_lock; - mtx_unlock(&mchdev_lock); - } + if (IS_GEN5(dev)) + intel_gpu_ips_init(dev_priv); return (0); out_gem_unload: /* XXXKIB */ (void) i915_driver_unload_int(dev, true); return (ret); } static int i915_driver_unload_int(struct drm_device *dev, bool locked) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; if (!locked) DRM_LOCK(dev); - ret = i915_gpu_idle(dev, true); + ret = i915_gpu_idle(dev); if (ret) DRM_ERROR("failed to idle hardware: %d\n", ret); + i915_gem_retire_requests(dev); if (!locked) DRM_UNLOCK(dev); i915_free_hws(dev); intel_teardown_mchbar(dev); if (locked) DRM_UNLOCK(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_fbdev_fini(dev); intel_modeset_cleanup(dev); } /* Free error state after interrupts are fully disabled. */ callout_stop(&dev_priv->hangcheck_timer); callout_drain(&dev_priv->hangcheck_timer); i915_destroy_error_state(dev); intel_opregion_fini(dev); if (locked) DRM_LOCK(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { if (!locked) DRM_LOCK(dev); i915_gem_free_all_phys_object(dev); i915_gem_cleanup_ringbuffer(dev); i915_gem_context_fini(dev); if (!locked) DRM_UNLOCK(dev); i915_gem_cleanup_aliasing_ppgtt(dev); #if 1 KIB_NOTYET(); #else if (I915_HAS_FBC(dev) && i915_powersave) i915_cleanup_compression(dev); #endif drm_mm_takedown(&dev_priv->mm.stolen); intel_cleanup_overlay(dev); if (!I915_NEED_GFX_HWS(dev)) i915_free_hws(dev); } i915_gem_unload(dev); mtx_destroy(&dev_priv->irq_lock); if (dev_priv->tq != NULL) taskqueue_free(dev_priv->tq); bus_generic_detach(dev->device); drm_rmmap(dev, dev_priv->mmio_map); intel_teardown_gmbus(dev); + mtx_destroy(&dev_priv->dpio_lock); mtx_destroy(&dev_priv->error_lock); mtx_destroy(&dev_priv->error_completion_lock); mtx_destroy(&dev_priv->rps_lock); drm_free(dev->dev_private, sizeof(drm_i915_private_t), DRM_MEM_DRIVER); return (0); } int i915_driver_unload(struct drm_device *dev) { return (i915_driver_unload_int(dev, true)); } int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) { struct drm_i915_file_private *i915_file_priv; i915_file_priv = malloc(sizeof(*i915_file_priv), DRM_MEM_FILES, M_WAITOK | M_ZERO); mtx_init(&i915_file_priv->mm.lck, "915fp", NULL, MTX_DEF); INIT_LIST_HEAD(&i915_file_priv->mm.request_list); file_priv->driver_priv = i915_file_priv; drm_gem_names_init(&i915_file_priv->context_idr); return (0); } void i915_driver_lastclose(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { #if 1 KIB_NOTYET(); #else drm_fb_helper_restore(); vga_switcheroo_process_delayed_switch(); #endif return; } i915_gem_lastclose(dev); i915_dma_cleanup(dev); } void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) { i915_gem_context_close(dev, file_priv); i915_gem_release(dev, file_priv); } void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) { struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; mtx_destroy(&i915_file_priv->mm.lck); drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES); } struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_I915_ALLOC, drm_noop, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_FREE, drm_noop, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), - DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), + DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH | DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH | DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0), DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0), DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED), }; #ifdef COMPAT_FREEBSD32 extern drm_ioctl_desc_t i915_compat_ioctls[]; extern int i915_compat_ioctls_nr; #endif struct drm_driver_info i915_driver_info = { .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | DRIVER_LOCKLESS_IRQ | DRIVER_GEM /*| DRIVER_MODESET*/, .buf_priv_size = sizeof(drm_i915_private_t), .load = i915_driver_load, .open = i915_driver_open, .unload = i915_driver_unload, .preclose = i915_driver_preclose, .lastclose = i915_driver_lastclose, .postclose = i915_driver_postclose, .device_is_agp = i915_driver_device_is_agp, .gem_init_object = i915_gem_init_object, .gem_free_object = i915_gem_free_object, .gem_pager_ops = &i915_gem_pager_ops, .dumb_create = i915_gem_dumb_create, .dumb_map_offset = i915_gem_mmap_gtt, .dumb_destroy = i915_gem_dumb_destroy, .sysctl_init = i915_sysctl_init, .sysctl_cleanup = i915_sysctl_cleanup, .ioctls = i915_ioctls, #ifdef COMPAT_FREEBSD32 .compat_ioctls = i915_compat_ioctls, .compat_ioctls_nr = &i915_compat_ioctls_nr, #endif .max_ioctl = DRM_ARRAY_SIZE(i915_ioctls), .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, }; -/** - * Determine if the device really is AGP or not. - * - * All Intel graphics chipsets are treated as AGP, even if they are really - * built-in. - * - * \param dev The device to be tested. - * - * \returns - * A value of 1 is always retured to indictate every i9x5 is AGP. +/* + * This is really ugly: Because old userspace abused the linux agp interface to + * manage the gtt, we need to claim that all intel devices are agp. For + * otherwise the drm core refuses to initialize the agp support code. */ int i915_driver_device_is_agp(struct drm_device * dev) { return 1; } -static void i915_pineview_get_mem_freq(struct drm_device *dev) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - u32 tmp; - - tmp = I915_READ(CLKCFG); - - switch (tmp & CLKCFG_FSB_MASK) { - case CLKCFG_FSB_533: - dev_priv->fsb_freq = 533; /* 133*4 */ - break; - case CLKCFG_FSB_800: - dev_priv->fsb_freq = 800; /* 200*4 */ - break; - case CLKCFG_FSB_667: - dev_priv->fsb_freq = 667; /* 167*4 */ - break; - case CLKCFG_FSB_400: - dev_priv->fsb_freq = 400; /* 100*4 */ - break; - } - - switch (tmp & CLKCFG_MEM_MASK) { - case CLKCFG_MEM_533: - dev_priv->mem_freq = 533; - break; - case CLKCFG_MEM_667: - dev_priv->mem_freq = 667; - break; - case CLKCFG_MEM_800: - dev_priv->mem_freq = 800; - break; - } - - /* detect pineview DDR3 setting */ - tmp = I915_READ(CSHRDDR3CTL); - dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; -} - -static void i915_ironlake_get_mem_freq(struct drm_device *dev) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - u16 ddrpll, csipll; - - ddrpll = I915_READ16(DDRMPLL1); - csipll = I915_READ16(CSIPLL0); - - switch (ddrpll & 0xff) { - case 0xc: - dev_priv->mem_freq = 800; - break; - case 0x10: - dev_priv->mem_freq = 1066; - break; - case 0x14: - dev_priv->mem_freq = 1333; - break; - case 0x18: - dev_priv->mem_freq = 1600; - break; - default: - DRM_DEBUG("unknown memory frequency 0x%02x\n", - ddrpll & 0xff); - dev_priv->mem_freq = 0; - break; - } - - dev_priv->r_t = dev_priv->mem_freq; - - switch (csipll & 0x3ff) { - case 0x00c: - dev_priv->fsb_freq = 3200; - break; - case 0x00e: - dev_priv->fsb_freq = 3733; - break; - case 0x010: - dev_priv->fsb_freq = 4266; - break; - case 0x012: - dev_priv->fsb_freq = 4800; - break; - case 0x014: - dev_priv->fsb_freq = 5333; - break; - case 0x016: - dev_priv->fsb_freq = 5866; - break; - case 0x018: - dev_priv->fsb_freq = 6400; - break; - default: - DRM_DEBUG("unknown fsb frequency 0x%04x\n", - csipll & 0x3ff); - dev_priv->fsb_freq = 0; - break; - } - - if (dev_priv->fsb_freq == 3200) { - dev_priv->c_m = 0; - } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { - dev_priv->c_m = 1; - } else { - dev_priv->c_m = 2; - } -} - -static const struct cparams { - u16 i; - u16 t; - u16 m; - u16 c; -} cparams[] = { - { 1, 1333, 301, 28664 }, - { 1, 1066, 294, 24460 }, - { 1, 800, 294, 25192 }, - { 0, 1333, 276, 27605 }, - { 0, 1066, 276, 27605 }, - { 0, 800, 231, 23784 }, -}; - -unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) -{ - u64 total_count, diff, ret; - u32 count1, count2, count3, m = 0, c = 0; - unsigned long now = jiffies_to_msecs(jiffies), diff1; - int i; - - diff1 = now - dev_priv->last_time1; - /* - * sysctl(8) reads the value of sysctl twice in rapid - * succession. There is high chance that it happens in the - * same timer tick. Use the cached value to not divide by - * zero and give the hw a chance to gather more samples. - */ - if (diff1 <= 10) - return (dev_priv->chipset_power); - - count1 = I915_READ(DMIEC); - count2 = I915_READ(DDREC); - count3 = I915_READ(CSIEC); - - total_count = count1 + count2 + count3; - - /* FIXME: handle per-counter overflow */ - if (total_count < dev_priv->last_count1) { - diff = ~0UL - dev_priv->last_count1; - diff += total_count; - } else { - diff = total_count - dev_priv->last_count1; - } - - for (i = 0; i < DRM_ARRAY_SIZE(cparams); i++) { - if (cparams[i].i == dev_priv->c_m && - cparams[i].t == dev_priv->r_t) { - m = cparams[i].m; - c = cparams[i].c; - break; - } - } - - diff = diff / diff1; - ret = ((m * diff) + c); - ret = ret / 10; - - dev_priv->last_count1 = total_count; - dev_priv->last_time1 = now; - - dev_priv->chipset_power = ret; - return (ret); -} - -unsigned long i915_mch_val(struct drm_i915_private *dev_priv) -{ - unsigned long m, x, b; - u32 tsfs; - - tsfs = I915_READ(TSFS); - - m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); - x = I915_READ8(I915_TR1); - - b = tsfs & TSFS_INTR_MASK; - - return ((m * x) / 127) - b; -} - -static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) -{ - static const struct v_table { - u16 vd; /* in .1 mil */ - u16 vm; /* in .1 mil */ - } v_table[] = { - { 0, 0, }, - { 375, 0, }, - { 500, 0, }, - { 625, 0, }, - { 750, 0, }, - { 875, 0, }, - { 1000, 0, }, - { 1125, 0, }, - { 4125, 3000, }, - { 4125, 3000, }, - { 4125, 3000, }, - { 4125, 3000, }, - { 4125, 3000, }, - { 4125, 3000, }, - { 4125, 3000, }, - { 4125, 3000, }, - { 4125, 3000, }, - { 4125, 3000, }, - { 4125, 3000, }, - { 4125, 3000, }, - { 4125, 3000, }, - { 4125, 3000, }, - { 4125, 3000, }, - { 4125, 3000, }, - { 4125, 3000, }, - { 4125, 3000, }, - { 4125, 3000, }, - { 4125, 3000, }, - { 4125, 3000, }, - { 4125, 3000, }, - { 4125, 3000, }, - { 4125, 3000, }, - { 4250, 3125, }, - { 4375, 3250, }, - { 4500, 3375, }, - { 4625, 3500, }, - { 4750, 3625, }, - { 4875, 3750, }, - { 5000, 3875, }, - { 5125, 4000, }, - { 5250, 4125, }, - { 5375, 4250, }, - { 5500, 4375, }, - { 5625, 4500, }, - { 5750, 4625, }, - { 5875, 4750, }, - { 6000, 4875, }, - { 6125, 5000, }, - { 6250, 5125, }, - { 6375, 5250, }, - { 6500, 5375, }, - { 6625, 5500, }, - { 6750, 5625, }, - { 6875, 5750, }, - { 7000, 5875, }, - { 7125, 6000, }, - { 7250, 6125, }, - { 7375, 6250, }, - { 7500, 6375, }, - { 7625, 6500, }, - { 7750, 6625, }, - { 7875, 6750, }, - { 8000, 6875, }, - { 8125, 7000, }, - { 8250, 7125, }, - { 8375, 7250, }, - { 8500, 7375, }, - { 8625, 7500, }, - { 8750, 7625, }, - { 8875, 7750, }, - { 9000, 7875, }, - { 9125, 8000, }, - { 9250, 8125, }, - { 9375, 8250, }, - { 9500, 8375, }, - { 9625, 8500, }, - { 9750, 8625, }, - { 9875, 8750, }, - { 10000, 8875, }, - { 10125, 9000, }, - { 10250, 9125, }, - { 10375, 9250, }, - { 10500, 9375, }, - { 10625, 9500, }, - { 10750, 9625, }, - { 10875, 9750, }, - { 11000, 9875, }, - { 11125, 10000, }, - { 11250, 10125, }, - { 11375, 10250, }, - { 11500, 10375, }, - { 11625, 10500, }, - { 11750, 10625, }, - { 11875, 10750, }, - { 12000, 10875, }, - { 12125, 11000, }, - { 12250, 11125, }, - { 12375, 11250, }, - { 12500, 11375, }, - { 12625, 11500, }, - { 12750, 11625, }, - { 12875, 11750, }, - { 13000, 11875, }, - { 13125, 12000, }, - { 13250, 12125, }, - { 13375, 12250, }, - { 13500, 12375, }, - { 13625, 12500, }, - { 13750, 12625, }, - { 13875, 12750, }, - { 14000, 12875, }, - { 14125, 13000, }, - { 14250, 13125, }, - { 14375, 13250, }, - { 14500, 13375, }, - { 14625, 13500, }, - { 14750, 13625, }, - { 14875, 13750, }, - { 15000, 13875, }, - { 15125, 14000, }, - { 15250, 14125, }, - { 15375, 14250, }, - { 15500, 14375, }, - { 15625, 14500, }, - { 15750, 14625, }, - { 15875, 14750, }, - { 16000, 14875, }, - { 16125, 15000, }, - }; - if (dev_priv->info->is_mobile) - return v_table[pxvid].vm; - else - return v_table[pxvid].vd; -} - -void i915_update_gfx_val(struct drm_i915_private *dev_priv) -{ - struct timespec now, diff1; - u64 diff; - unsigned long diffms; - u32 count; - - if (dev_priv->info->gen != 5) - return; - - nanotime(&now); - diff1 = now; - timespecsub(&diff1, &dev_priv->last_time2); - - /* Don't divide by 0 */ - diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; - if (!diffms) - return; - - count = I915_READ(GFXEC); - - if (count < dev_priv->last_count2) { - diff = ~0UL - dev_priv->last_count2; - diff += count; - } else { - diff = count - dev_priv->last_count2; - } - - dev_priv->last_count2 = count; - dev_priv->last_time2 = now; - - /* More magic constants... */ - diff = diff * 1181; - diff = diff / (diffms * 10); - dev_priv->gfx_power = diff; -} - -unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) -{ - unsigned long t, corr, state1, corr2, state2; - u32 pxvid, ext_v; - - pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4)); - pxvid = (pxvid >> 24) & 0x7f; - ext_v = pvid_to_extvid(dev_priv, pxvid); - - state1 = ext_v; - - t = i915_mch_val(dev_priv); - - /* Revel in the empirically derived constants */ - - /* Correction factor in 1/100000 units */ - if (t > 80) - corr = ((t * 2349) + 135940); - else if (t >= 50) - corr = ((t * 964) + 29317); - else /* < 50 */ - corr = ((t * 301) + 1004); - - corr = corr * ((150142 * state1) / 10000 - 78642); - corr /= 100000; - corr2 = (corr * dev_priv->corr); - - state2 = (corr2 * state1) / 10000; - state2 /= 100; /* convert to mW */ - - i915_update_gfx_val(dev_priv); - - return dev_priv->gfx_power + state2; -} - -/** - * i915_read_mch_val - return value for IPS use - * - * Calculate and return a value for the IPS driver to use when deciding whether - * we have thermal and power headroom to increase CPU or GPU power budget. - */ -unsigned long i915_read_mch_val(void) -{ - struct drm_i915_private *dev_priv; - unsigned long chipset_val, graphics_val, ret = 0; - - mtx_lock(&mchdev_lock); - if (!i915_mch_dev) - goto out_unlock; - dev_priv = i915_mch_dev; - - chipset_val = i915_chipset_val(dev_priv); - graphics_val = i915_gfx_val(dev_priv); - - ret = chipset_val + graphics_val; - -out_unlock: - mtx_unlock(&mchdev_lock); - - return ret; -} - -/** - * i915_gpu_raise - raise GPU frequency limit - * - * Raise the limit; IPS indicates we have thermal headroom. - */ -bool i915_gpu_raise(void) -{ - struct drm_i915_private *dev_priv; - bool ret = true; - - mtx_lock(&mchdev_lock); - if (!i915_mch_dev) { - ret = false; - goto out_unlock; - } - dev_priv = i915_mch_dev; - - if (dev_priv->max_delay > dev_priv->fmax) - dev_priv->max_delay--; - -out_unlock: - mtx_unlock(&mchdev_lock); - - return ret; -} - -/** - * i915_gpu_lower - lower GPU frequency limit - * - * IPS indicates we're close to a thermal limit, so throttle back the GPU - * frequency maximum. - */ -bool i915_gpu_lower(void) -{ - struct drm_i915_private *dev_priv; - bool ret = true; - - mtx_lock(&mchdev_lock); - if (!i915_mch_dev) { - ret = false; - goto out_unlock; - } - dev_priv = i915_mch_dev; - - if (dev_priv->max_delay < dev_priv->min_delay) - dev_priv->max_delay++; - -out_unlock: - mtx_unlock(&mchdev_lock); - - return ret; -} - -/** - * i915_gpu_busy - indicate GPU business to IPS - * - * Tell the IPS driver whether or not the GPU is busy. - */ -bool i915_gpu_busy(void) -{ - struct drm_i915_private *dev_priv; - bool ret = false; - - mtx_lock(&mchdev_lock); - if (!i915_mch_dev) - goto out_unlock; - dev_priv = i915_mch_dev; - - ret = dev_priv->busy; - -out_unlock: - mtx_unlock(&mchdev_lock); - - return ret; -} - -/** - * i915_gpu_turbo_disable - disable graphics turbo - * - * Disable graphics turbo by resetting the max frequency and setting the - * current frequency to the default. - */ -bool i915_gpu_turbo_disable(void) -{ - struct drm_i915_private *dev_priv; - bool ret = true; - - mtx_lock(&mchdev_lock); - if (!i915_mch_dev) { - ret = false; - goto out_unlock; - } - dev_priv = i915_mch_dev; - - dev_priv->max_delay = dev_priv->fstart; - - if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart)) - ret = false; - -out_unlock: - mtx_unlock(&mchdev_lock); - - return ret; -} Index: head/sys/dev/drm2/i915/i915_drm.h =================================================================== --- head/sys/dev/drm2/i915/i915_drm.h (revision 277486) +++ head/sys/dev/drm2/i915/i915_drm.h (revision 277487) @@ -1,992 +1,993 @@ /*- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include __FBSDID("$FreeBSD$"); #ifndef _I915_DRM_H_ #define _I915_DRM_H_ /* Please note that modifications to all structs defined here are * subject to backwards-compatibility constraints. */ #include /* Each region is a minimum of 16k, and there are at most 255 of them. */ #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use * of chars for next/prev indices */ #define I915_LOG_MIN_TEX_REGION_SIZE 14 typedef struct _drm_i915_init { enum { I915_INIT_DMA = 0x01, I915_CLEANUP_DMA = 0x02, I915_RESUME_DMA = 0x03, /* Since this struct isn't versioned, just used a new * 'func' code to indicate the presence of dri2 sarea * info. */ I915_INIT_DMA2 = 0x04 } func; unsigned int mmio_offset; int sarea_priv_offset; unsigned int ring_start; unsigned int ring_end; unsigned int ring_size; unsigned int front_offset; unsigned int back_offset; unsigned int depth_offset; unsigned int w; unsigned int h; unsigned int pitch; unsigned int pitch_bits; unsigned int back_pitch; unsigned int depth_pitch; unsigned int cpp; unsigned int chipset; unsigned int sarea_handle; } drm_i915_init_t; typedef struct drm_i915_sarea { struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1]; int last_upload; /* last time texture was uploaded */ int last_enqueue; /* last time a buffer was enqueued */ int last_dispatch; /* age of the most recently dispatched buffer */ int ctxOwner; /* last context to upload state */ int texAge; int pf_enabled; /* is pageflipping allowed? */ int pf_active; int pf_current_page; /* which buffer is being displayed? */ int perf_boxes; /* performance boxes to be displayed */ int width, height; /* screen size in pixels */ drm_handle_t front_handle; int front_offset; int front_size; drm_handle_t back_handle; int back_offset; int back_size; drm_handle_t depth_handle; int depth_offset; int depth_size; drm_handle_t tex_handle; int tex_offset; int tex_size; int log_tex_granularity; int pitch; int rotation; /* 0, 90, 180 or 270 */ int rotated_offset; int rotated_size; int rotated_pitch; int virtualX, virtualY; unsigned int front_tiled; unsigned int back_tiled; unsigned int depth_tiled; unsigned int rotated_tiled; unsigned int rotated2_tiled; int planeA_x; int planeA_y; int planeA_w; int planeA_h; int planeB_x; int planeB_y; int planeB_w; int planeB_h; /* Triple buffering */ drm_handle_t third_handle; int third_offset; int third_size; unsigned int third_tiled; /* buffer object handles for the static buffers. May change * over the lifetime of the client, though it doesn't in our current * implementation. */ unsigned int front_bo_handle; unsigned int back_bo_handle; unsigned int third_bo_handle; unsigned int depth_bo_handle; } drm_i915_sarea_t; /* Driver specific fence types and classes. */ /* The only fence class we support */ #define DRM_I915_FENCE_CLASS_ACCEL 0 /* Fence type that guarantees read-write flush */ #define DRM_I915_FENCE_TYPE_RW 2 /* MI_FLUSH programmed just before the fence */ #define DRM_I915_FENCE_FLAG_FLUSHED 0x01000000 /* Flags for perf_boxes */ #define I915_BOX_RING_EMPTY 0x1 #define I915_BOX_FLIP 0x2 #define I915_BOX_WAIT 0x4 #define I915_BOX_TEXTURE_LOAD 0x8 #define I915_BOX_LOST_CONTEXT 0x10 /* I915 specific ioctls * The device specific ioctl range is 0x40 to 0x79. */ #define DRM_I915_INIT 0x00 #define DRM_I915_FLUSH 0x01 #define DRM_I915_FLIP 0x02 #define DRM_I915_BATCHBUFFER 0x03 #define DRM_I915_IRQ_EMIT 0x04 #define DRM_I915_IRQ_WAIT 0x05 #define DRM_I915_GETPARAM 0x06 #define DRM_I915_SETPARAM 0x07 #define DRM_I915_ALLOC 0x08 #define DRM_I915_FREE 0x09 #define DRM_I915_INIT_HEAP 0x0a #define DRM_I915_CMDBUFFER 0x0b #define DRM_I915_DESTROY_HEAP 0x0c #define DRM_I915_SET_VBLANK_PIPE 0x0d #define DRM_I915_GET_VBLANK_PIPE 0x0e #define DRM_I915_VBLANK_SWAP 0x0f #define DRM_I915_MMIO 0x10 #define DRM_I915_HWS_ADDR 0x11 #define DRM_I915_EXECBUFFER 0x12 #define DRM_I915_GEM_INIT 0x13 #define DRM_I915_GEM_EXECBUFFER 0x14 #define DRM_I915_GEM_PIN 0x15 #define DRM_I915_GEM_UNPIN 0x16 #define DRM_I915_GEM_BUSY 0x17 #define DRM_I915_GEM_THROTTLE 0x18 #define DRM_I915_GEM_ENTERVT 0x19 #define DRM_I915_GEM_LEAVEVT 0x1a #define DRM_I915_GEM_CREATE 0x1b #define DRM_I915_GEM_PREAD 0x1c #define DRM_I915_GEM_PWRITE 0x1d #define DRM_I915_GEM_MMAP 0x1e #define DRM_I915_GEM_SET_DOMAIN 0x1f #define DRM_I915_GEM_SW_FINISH 0x20 #define DRM_I915_GEM_SET_TILING 0x21 #define DRM_I915_GEM_GET_TILING 0x22 #define DRM_I915_GEM_GET_APERTURE 0x23 #define DRM_I915_GEM_MMAP_GTT 0x24 #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 #define DRM_I915_GEM_MADVISE 0x26 #define DRM_I915_OVERLAY_PUT_IMAGE 0x27 #define DRM_I915_OVERLAY_ATTRS 0x28 #define DRM_I915_GEM_EXECBUFFER2 0x29 #define DRM_I915_GET_SPRITE_COLORKEY 0x2a #define DRM_I915_SET_SPRITE_COLORKEY 0x2b #define DRM_I915_GEM_CONTEXT_CREATE 0x2d #define DRM_I915_GEM_CONTEXT_DESTROY 0x2e #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) #define DRM_IOCTL_I915_FLIP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FLIP, drm_i915_flip_t) #define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t) #define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t) #define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t) #define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t) #define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t) #define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t) #define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t) #define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t) #define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t) #define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t) #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) #define DRM_IOCTL_I915_MMIO DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_MMIO, drm_i915_mmio) #define DRM_IOCTL_I915_EXECBUFFER DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_EXECBUFFER, struct drm_i915_execbuffer) #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) #define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) #define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt) #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) #define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id) #define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_IOCTL_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image) #define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) /* Asynchronous page flipping: */ typedef struct drm_i915_flip { /* * This is really talking about planes, and we could rename it * except for the fact that some of the duplicated i915_drm.h files * out there check for HAVE_I915_FLIP and so might pick up this * version. */ int pipes; } drm_i915_flip_t; /* Allow drivers to submit batchbuffers directly to hardware, relying * on the security mechanisms provided by hardware. */ typedef struct drm_i915_batchbuffer { int start; /* agp offset */ int used; /* nr bytes in use */ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ int num_cliprects; /* mulitpass with multiple cliprects? */ struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ } drm_i915_batchbuffer_t; /* As above, but pass a pointer to userspace buffer which can be * validated by the kernel prior to sending to hardware. */ typedef struct _drm_i915_cmdbuffer { char __user *buf; /* pointer to userspace command buffer */ int sz; /* nr bytes in buf */ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ int num_cliprects; /* mulitpass with multiple cliprects? */ struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ } drm_i915_cmdbuffer_t; /* Userspace can request & wait on irq's: */ typedef struct drm_i915_irq_emit { int __user *irq_seq; } drm_i915_irq_emit_t; typedef struct drm_i915_irq_wait { int irq_seq; } drm_i915_irq_wait_t; /* Ioctl to query kernel params: */ -#define I915_PARAM_IRQ_ACTIVE 1 -#define I915_PARAM_ALLOW_BATCHBUFFER 2 -#define I915_PARAM_LAST_DISPATCH 3 -#define I915_PARAM_CHIPSET_ID 4 -#define I915_PARAM_HAS_GEM 5 -#define I915_PARAM_NUM_FENCES_AVAIL 6 -#define I915_PARAM_HAS_OVERLAY 7 +#define I915_PARAM_IRQ_ACTIVE 1 +#define I915_PARAM_ALLOW_BATCHBUFFER 2 +#define I915_PARAM_LAST_DISPATCH 3 +#define I915_PARAM_CHIPSET_ID 4 +#define I915_PARAM_HAS_GEM 5 +#define I915_PARAM_NUM_FENCES_AVAIL 6 +#define I915_PARAM_HAS_OVERLAY 7 #define I915_PARAM_HAS_PAGEFLIPPING 8 -#define I915_PARAM_HAS_EXECBUF2 9 +#define I915_PARAM_HAS_EXECBUF2 9 #define I915_PARAM_HAS_BSD 10 #define I915_PARAM_HAS_BLT 11 #define I915_PARAM_HAS_RELAXED_FENCING 12 #define I915_PARAM_HAS_COHERENT_RINGS 13 #define I915_PARAM_HAS_EXEC_CONSTANTS 14 #define I915_PARAM_HAS_RELAXED_DELTA 15 #define I915_PARAM_HAS_GEN7_SOL_RESET 16 -#define I915_PARAM_HAS_LLC 17 +#define I915_PARAM_HAS_LLC 17 +#define I915_PARAM_HAS_ALIASING_PPGTT 18 typedef struct drm_i915_getparam { int param; int __user *value; } drm_i915_getparam_t; /* Ioctl to set kernel params: */ #define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1 #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2 #define I915_SETPARAM_ALLOW_BATCHBUFFER 3 #define I915_SETPARAM_NUM_USED_FENCES 4 typedef struct drm_i915_setparam { int param; int value; } drm_i915_setparam_t; /* A memory manager for regions of shared memory: */ #define I915_MEM_REGION_AGP 1 typedef struct drm_i915_mem_alloc { int region; int alignment; int size; int __user *region_offset; /* offset from start of fb or agp */ } drm_i915_mem_alloc_t; typedef struct drm_i915_mem_free { int region; int region_offset; } drm_i915_mem_free_t; typedef struct drm_i915_mem_init_heap { int region; int size; int start; } drm_i915_mem_init_heap_t; /* Allow memory manager to be torn down and re-initialized (eg on * rotate): */ typedef struct drm_i915_mem_destroy_heap { int region; } drm_i915_mem_destroy_heap_t; /* Allow X server to configure which pipes to monitor for vblank signals */ #define DRM_I915_VBLANK_PIPE_A 1 #define DRM_I915_VBLANK_PIPE_B 2 typedef struct drm_i915_vblank_pipe { int pipe; } drm_i915_vblank_pipe_t; /* Schedule buffer swap at given vertical blank: */ typedef struct drm_i915_vblank_swap { drm_drawable_t drawable; enum drm_vblank_seq_type seqtype; unsigned int sequence; } drm_i915_vblank_swap_t; #define I915_MMIO_READ 0 #define I915_MMIO_WRITE 1 #define I915_MMIO_MAY_READ 0x1 #define I915_MMIO_MAY_WRITE 0x2 #define MMIO_REGS_IA_PRIMATIVES_COUNT 0 #define MMIO_REGS_IA_VERTICES_COUNT 1 #define MMIO_REGS_VS_INVOCATION_COUNT 2 #define MMIO_REGS_GS_PRIMITIVES_COUNT 3 #define MMIO_REGS_GS_INVOCATION_COUNT 4 #define MMIO_REGS_CL_PRIMITIVES_COUNT 5 #define MMIO_REGS_CL_INVOCATION_COUNT 6 #define MMIO_REGS_PS_INVOCATION_COUNT 7 #define MMIO_REGS_PS_DEPTH_COUNT 8 typedef struct drm_i915_mmio_entry { unsigned int flag; unsigned int offset; unsigned int size; } drm_i915_mmio_entry_t; typedef struct drm_i915_mmio { unsigned int read_write:1; unsigned int reg:31; void __user *data; } drm_i915_mmio_t; typedef struct drm_i915_hws_addr { uint64_t addr; } drm_i915_hws_addr_t; /* * Relocation header is 4 uint32_ts * 0 - 32 bit reloc count * 1 - 32-bit relocation type * 2-3 - 64-bit user buffer handle ptr for another list of relocs. */ #define I915_RELOC_HEADER 4 /* * type 0 relocation has 4-uint32_t stride * 0 - offset into buffer * 1 - delta to add in * 2 - buffer handle * 3 - reserved (for optimisations later). */ /* * type 1 relocation has 4-uint32_t stride. * Hangs off the first item in the op list. * Performed after all valiations are done. * Try to group relocs into the same relocatee together for * performance reasons. * 0 - offset into buffer * 1 - delta to add in * 2 - buffer index in op list. * 3 - relocatee index in op list. */ #define I915_RELOC_TYPE_0 0 #define I915_RELOC0_STRIDE 4 #define I915_RELOC_TYPE_1 1 #define I915_RELOC1_STRIDE 4 struct drm_i915_op_arg { uint64_t next; uint64_t reloc_ptr; int handled; unsigned int pad64; union { struct drm_bo_op_req req; struct drm_bo_arg_rep rep; } d; }; struct drm_i915_execbuffer { uint64_t ops_list; uint32_t num_buffers; struct drm_i915_batchbuffer batch; drm_context_t context; /* for lockless use in the future */ struct drm_fence_arg fence_arg; }; struct drm_i915_gem_init { /** * Beginning offset in the GTT to be managed by the DRM memory * manager. */ uint64_t gtt_start; /** * Ending offset in the GTT to be managed by the DRM memory * manager. */ uint64_t gtt_end; }; struct drm_i915_gem_create { /** * Requested size for the object. * * The (page-aligned) allocated size for the object will be returned. */ uint64_t size; /** * Returned handle for the object. * * Object handles are nonzero. */ uint32_t handle; uint32_t pad; }; struct drm_i915_gem_pread { /** Handle for the object being read. */ uint32_t handle; uint32_t pad; /** Offset into the object to read from */ uint64_t offset; /** Length of data to read */ uint64_t size; /** Pointer to write the data into. */ uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */ }; struct drm_i915_gem_pwrite { /** Handle for the object being written to. */ uint32_t handle; uint32_t pad; /** Offset into the object to write to */ uint64_t offset; /** Length of data to write */ uint64_t size; /** Pointer to read the data from. */ uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */ }; struct drm_i915_gem_mmap { /** Handle for the object being mapped. */ uint32_t handle; uint32_t pad; /** Offset in the object to map. */ uint64_t offset; /** * Length of data to map. * * The value will be page-aligned. */ uint64_t size; /** Returned pointer the data was mapped at */ uint64_t addr_ptr; /* void *, but pointers are not 32/64 compatible */ }; struct drm_i915_gem_mmap_gtt { /** Handle for the object being mapped. */ uint32_t handle; uint32_t pad; /** * Fake offset to use for subsequent mmap call * * This is a fixed-size type for 32/64 compatibility. */ uint64_t offset; }; struct drm_i915_gem_set_domain { /** Handle for the object */ uint32_t handle; /** New read domains */ uint32_t read_domains; /** New write domain */ uint32_t write_domain; }; struct drm_i915_gem_sw_finish { /** Handle for the object */ uint32_t handle; }; struct drm_i915_gem_relocation_entry { /** * Handle of the buffer being pointed to by this relocation entry. * * It's appealing to make this be an index into the mm_validate_entry * list to refer to the buffer, but this allows the driver to create * a relocation list for state buffers and not re-write it per * exec using the buffer. */ uint32_t target_handle; /** * Value to be added to the offset of the target buffer to make up * the relocation entry. */ uint32_t delta; /** Offset in the buffer the relocation entry will be written into */ uint64_t offset; /** * Offset value of the target buffer that the relocation entry was last * written as. * * If the buffer has the same offset as last time, we can skip syncing * and writing the relocation. This value is written back out by * the execbuffer ioctl when the relocation is written. */ uint64_t presumed_offset; /** * Target memory domains read by this operation. */ uint32_t read_domains; /** * Target memory domains written by this operation. * * Note that only one domain may be written by the whole * execbuffer operation, so that where there are conflicts, * the application will get -EINVAL back. */ uint32_t write_domain; }; /** @{ * Intel memory domains * * Most of these just align with the various caches in * the system and are used to flush and invalidate as * objects end up cached in different domains. */ /** CPU cache */ #define I915_GEM_DOMAIN_CPU 0x00000001 /** Render cache, used by 2D and 3D drawing */ #define I915_GEM_DOMAIN_RENDER 0x00000002 /** Sampler cache, used by texture engine */ #define I915_GEM_DOMAIN_SAMPLER 0x00000004 /** Command queue, used to load batch buffers */ #define I915_GEM_DOMAIN_COMMAND 0x00000008 /** Instruction cache, used by shader programs */ #define I915_GEM_DOMAIN_INSTRUCTION 0x00000010 /** Vertex address cache */ #define I915_GEM_DOMAIN_VERTEX 0x00000020 /** GTT domain - aperture and scanout */ #define I915_GEM_DOMAIN_GTT 0x00000040 /** @} */ struct drm_i915_gem_exec_object { /** * User's handle for a buffer to be bound into the GTT for this * operation. */ uint32_t handle; /** Number of relocations to be performed on this buffer */ uint32_t relocation_count; /** * Pointer to array of struct drm_i915_gem_relocation_entry containing * the relocations to be performed in this buffer. */ uint64_t relocs_ptr; /** Required alignment in graphics aperture */ uint64_t alignment; /** * Returned value of the updated offset of the object, for future * presumed_offset writes. */ uint64_t offset; }; struct drm_i915_gem_execbuffer { /** * List of buffers to be validated with their relocations to be * performend on them. * * This is a pointer to an array of struct drm_i915_gem_validate_entry. * * These buffers must be listed in an order such that all relocations * a buffer is performing refer to buffers that have already appeared * in the validate list. */ uint64_t buffers_ptr; uint32_t buffer_count; /** Offset in the batchbuffer to start execution from. */ uint32_t batch_start_offset; /** Bytes used in batchbuffer from batch_start_offset */ uint32_t batch_len; uint32_t DR1; uint32_t DR4; uint32_t num_cliprects; uint64_t cliprects_ptr; /* struct drm_clip_rect *cliprects */ }; struct drm_i915_gem_exec_object2 { /** * User's handle for a buffer to be bound into the GTT for this * operation. */ uint32_t handle; /** Number of relocations to be performed on this buffer */ uint32_t relocation_count; /** * Pointer to array of struct drm_i915_gem_relocation_entry containing * the relocations to be performed in this buffer. */ uint64_t relocs_ptr; /** Required alignment in graphics aperture */ uint64_t alignment; /** * Returned value of the updated offset of the object, for future * presumed_offset writes. */ uint64_t offset; #define EXEC_OBJECT_NEEDS_FENCE (1<<0) uint64_t flags; uint64_t rsvd1; /* now used for context info */ uint64_t rsvd2; }; struct drm_i915_gem_execbuffer2 { /** * List of gem_exec_object2 structs */ uint64_t buffers_ptr; uint32_t buffer_count; /** Offset in the batchbuffer to start execution from. */ uint32_t batch_start_offset; /** Bytes used in batchbuffer from batch_start_offset */ uint32_t batch_len; uint32_t DR1; uint32_t DR4; uint32_t num_cliprects; /** This is a struct drm_clip_rect *cliprects */ uint64_t cliprects_ptr; #define I915_EXEC_RING_MASK (7<<0) #define I915_EXEC_DEFAULT (0<<0) #define I915_EXEC_RENDER (1<<0) #define I915_EXEC_BSD (2<<0) #define I915_EXEC_BLT (3<<0) /* Used for switching the constants addressing mode on gen4+ RENDER ring. * Gen6+ only supports relative addressing to dynamic state (default) and * absolute addressing. * * These flags are ignored for the BSD and BLT rings. */ #define I915_EXEC_CONSTANTS_MASK (3<<6) #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ #define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ uint64_t flags; uint64_t rsvd1; uint64_t rsvd2; }; /** Resets the SO write offset registers for transform feedback on gen7. */ #define I915_EXEC_GEN7_SOL_RESET (1<<8) #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) #define i915_execbuffer2_set_context_id(eb2, context) \ (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK #define i915_execbuffer2_get_context_id(eb2) \ ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK) struct drm_i915_gem_pin { /** Handle of the buffer to be pinned. */ uint32_t handle; uint32_t pad; /** alignment required within the aperture */ uint64_t alignment; /** Returned GTT offset of the buffer. */ uint64_t offset; }; struct drm_i915_gem_unpin { /** Handle of the buffer to be unpinned. */ uint32_t handle; uint32_t pad; }; struct drm_i915_gem_busy { /** Handle of the buffer to check for busy */ uint32_t handle; /** Return busy status (1 if busy, 0 if idle) */ uint32_t busy; }; #define I915_TILING_NONE 0 #define I915_TILING_X 1 #define I915_TILING_Y 2 #define I915_BIT_6_SWIZZLE_NONE 0 #define I915_BIT_6_SWIZZLE_9 1 #define I915_BIT_6_SWIZZLE_9_10 2 #define I915_BIT_6_SWIZZLE_9_11 3 #define I915_BIT_6_SWIZZLE_9_10_11 4 /* Not seen by userland */ #define I915_BIT_6_SWIZZLE_UNKNOWN 5 /* Seen by userland. */ #define I915_BIT_6_SWIZZLE_9_17 6 #define I915_BIT_6_SWIZZLE_9_10_17 7 struct drm_i915_gem_set_tiling { /** Handle of the buffer to have its tiling state updated */ uint32_t handle; /** * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, * I915_TILING_Y). * * This value is to be set on request, and will be updated by the * kernel on successful return with the actual chosen tiling layout. * * The tiling mode may be demoted to I915_TILING_NONE when the system * has bit 6 swizzling that can't be managed correctly by GEM. * * Buffer contents become undefined when changing tiling_mode. */ uint32_t tiling_mode; /** * Stride in bytes for the object when in I915_TILING_X or * I915_TILING_Y. */ uint32_t stride; /** * Returned address bit 6 swizzling required for CPU access through * mmap mapping. */ uint32_t swizzle_mode; }; struct drm_i915_gem_get_tiling { /** Handle of the buffer to get tiling state for. */ uint32_t handle; /** * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, * I915_TILING_Y). */ uint32_t tiling_mode; /** * Returned address bit 6 swizzling required for CPU access through * mmap mapping. */ uint32_t swizzle_mode; }; struct drm_i915_gem_get_aperture { /** Total size of the aperture used by i915_gem_execbuffer, in bytes */ uint64_t aper_size; /** * Available space in the aperture used by i915_gem_execbuffer, in * bytes */ uint64_t aper_available_size; }; struct drm_i915_get_pipe_from_crtc_id { /** ID of CRTC being requested **/ uint32_t crtc_id; /** pipe of requested CRTC **/ uint32_t pipe; }; #define I915_MADV_WILLNEED 0 #define I915_MADV_DONTNEED 1 #define I915_MADV_PURGED_INTERNAL 2 /* internal state */ struct drm_i915_gem_madvise { /** Handle of the buffer to change the backing store advice */ uint32_t handle; /* Advice: either the buffer will be needed again in the near future, * or wont be and could be discarded under memory pressure. */ uint32_t madv; /** Whether the backing store still exists. */ uint32_t retained; }; #define I915_OVERLAY_TYPE_MASK 0xff #define I915_OVERLAY_YUV_PLANAR 0x01 #define I915_OVERLAY_YUV_PACKED 0x02 #define I915_OVERLAY_RGB 0x03 #define I915_OVERLAY_DEPTH_MASK 0xff00 #define I915_OVERLAY_RGB24 0x1000 #define I915_OVERLAY_RGB16 0x2000 #define I915_OVERLAY_RGB15 0x3000 #define I915_OVERLAY_YUV422 0x0100 #define I915_OVERLAY_YUV411 0x0200 #define I915_OVERLAY_YUV420 0x0300 #define I915_OVERLAY_YUV410 0x0400 #define I915_OVERLAY_SWAP_MASK 0xff0000 #define I915_OVERLAY_NO_SWAP 0x000000 #define I915_OVERLAY_UV_SWAP 0x010000 #define I915_OVERLAY_Y_SWAP 0x020000 #define I915_OVERLAY_Y_AND_UV_SWAP 0x030000 #define I915_OVERLAY_FLAGS_MASK 0xff000000 #define I915_OVERLAY_ENABLE 0x01000000 struct drm_intel_overlay_put_image { /* various flags and src format description */ uint32_t flags; /* source picture description */ uint32_t bo_handle; /* stride values and offsets are in bytes, buffer relative */ uint16_t stride_Y; /* stride for packed formats */ uint16_t stride_UV; uint32_t offset_Y; /* offset for packet formats */ uint32_t offset_U; uint32_t offset_V; /* in pixels */ uint16_t src_width; uint16_t src_height; /* to compensate the scaling factors for partially covered surfaces */ uint16_t src_scan_width; uint16_t src_scan_height; /* output crtc description */ uint32_t crtc_id; uint16_t dst_x; uint16_t dst_y; uint16_t dst_width; uint16_t dst_height; }; /* flags */ #define I915_OVERLAY_UPDATE_ATTRS (1<<0) #define I915_OVERLAY_UPDATE_GAMMA (1<<1) struct drm_intel_overlay_attrs { uint32_t flags; uint32_t color_key; int32_t brightness; uint32_t contrast; uint32_t saturation; uint32_t gamma0; uint32_t gamma1; uint32_t gamma2; uint32_t gamma3; uint32_t gamma4; uint32_t gamma5; }; /* * Intel sprite handling * * Color keying works with a min/mask/max tuple. Both source and destination * color keying is allowed. * * Source keying: * Sprite pixels within the min & max values, masked against the color channels * specified in the mask field, will be transparent. All other pixels will * be displayed on top of the primary plane. For RGB surfaces, only the min * and mask fields will be used; ranged compares are not allowed. * * Destination keying: * Primary plane pixels that match the min value, masked against the color * channels specified in the mask field, will be replaced by corresponding * pixels from the sprite plane. * * Note that source & destination keying are exclusive; only one can be * active on a given plane. */ #define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */ #define I915_SET_COLORKEY_DESTINATION (1<<1) #define I915_SET_COLORKEY_SOURCE (1<<2) struct drm_intel_sprite_colorkey { uint32_t plane_id; uint32_t min_value; uint32_t channel_mask; uint32_t max_value; uint32_t flags; }; struct drm_i915_gem_context_create { /* output: id of new context*/ uint32_t ctx_id; uint32_t pad; }; struct drm_i915_gem_context_destroy { uint32_t ctx_id; uint32_t pad; }; #endif /* _I915_DRM_H_ */ Index: head/sys/dev/drm2/i915/i915_drv.c =================================================================== --- head/sys/dev/drm2/i915/i915_drv.c (revision 277486) +++ head/sys/dev/drm2/i915/i915_drv.c (revision 277487) @@ -1,870 +1,994 @@ /* i915_drv.c -- Intel i915 driver -*- linux-c -*- * Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com */ /*- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Gareth Hughes * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include "fb_if.h" /* drv_PCI_IDs comes from drm_pciids.h, generated from drm_pciids.txt. */ static drm_pci_id_list_t i915_pciidlist[] = { i915_PCI_IDS }; static const struct intel_device_info intel_i830_info = { .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_845g_info = { .gen = 2, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i85x_info = { .gen = 2, .is_i85x = 1, .is_mobile = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i865g_info = { .gen = 2, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i915g_info = { .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i915gm_info = { .gen = 3, .is_mobile = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, .supports_tv = 1, }; static const struct intel_device_info intel_i945g_info = { .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i945gm_info = { .gen = 3, .is_i945gm = 1, .is_mobile = 1, .has_hotplug = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, .supports_tv = 1, }; static const struct intel_device_info intel_i965g_info = { .gen = 4, .is_broadwater = 1, .has_hotplug = 1, .has_overlay = 1, }; static const struct intel_device_info intel_i965gm_info = { .gen = 4, .is_crestline = 1, .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, .has_overlay = 1, .supports_tv = 1, }; static const struct intel_device_info intel_g33_info = { .gen = 3, .is_g33 = 1, .need_gfx_hws = 1, .has_hotplug = 1, .has_overlay = 1, }; static const struct intel_device_info intel_g45_info = { .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, .has_bsd_ring = 1, }; static const struct intel_device_info intel_gm45_info = { .gen = 4, .is_g4x = 1, .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, .supports_tv = 1, .has_bsd_ring = 1, }; static const struct intel_device_info intel_pineview_info = { .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, .has_overlay = 1, }; static const struct intel_device_info intel_ironlake_d_info = { .gen = 5, .need_gfx_hws = 1, .has_hotplug = 1, .has_bsd_ring = 1, + .has_pch_split = 1, }; static const struct intel_device_info intel_ironlake_m_info = { .gen = 5, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, .has_fbc = 0, /* disabled due to buggy hardware */ .has_bsd_ring = 1, + .has_pch_split = 1, }; static const struct intel_device_info intel_sandybridge_d_info = { .gen = 6, .need_gfx_hws = 1, .has_hotplug = 1, .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, + .has_pch_split = 1, }; static const struct intel_device_info intel_sandybridge_m_info = { .gen = 6, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, .has_fbc = 1, .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, + .has_pch_split = 1, }; static const struct intel_device_info intel_ivybridge_d_info = { .is_ivybridge = 1, .gen = 7, .need_gfx_hws = 1, .has_hotplug = 1, .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, + .has_pch_split = 1, }; static const struct intel_device_info intel_ivybridge_m_info = { .is_ivybridge = 1, .gen = 7, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, .has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */ .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, + .has_pch_split = 1, }; +#if 0 +static const struct intel_device_info intel_valleyview_m_info = { + .gen = 7, .is_mobile = 1, + .need_gfx_hws = 1, .has_hotplug = 1, + .has_fbc = 0, + .has_bsd_ring = 1, + .has_blt_ring = 1, + .is_valleyview = 1, +}; + +static const struct intel_device_info intel_valleyview_d_info = { + .gen = 7, + .need_gfx_hws = 1, .has_hotplug = 1, + .has_fbc = 0, + .has_bsd_ring = 1, + .has_blt_ring = 1, + .is_valleyview = 1, +}; +#endif + +static const struct intel_device_info intel_haswell_d_info = { + .is_haswell = 1, .gen = 7, + .need_gfx_hws = 1, .has_hotplug = 1, + .has_bsd_ring = 1, + .has_blt_ring = 1, + .has_llc = 1, + .has_pch_split = 1, +}; + +static const struct intel_device_info intel_haswell_m_info = { + .is_haswell = 1, .gen = 7, .is_mobile = 1, + .need_gfx_hws = 1, .has_hotplug = 1, + .has_bsd_ring = 1, + .has_blt_ring = 1, + .has_llc = 1, + .has_pch_split = 1, +}; + #define INTEL_VGA_DEVICE(id, info_) { \ .device = id, \ .info = info_, \ } static const struct intel_gfx_device_id { int device; const struct intel_device_info *info; } pciidlist[] = { /* aka */ INTEL_VGA_DEVICE(0x3577, &intel_i830_info), INTEL_VGA_DEVICE(0x2562, &intel_845g_info), INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info), INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info), INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info), INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info), INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */ INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */ INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */ INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ + INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */ + INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */ + INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */ + INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */ + INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */ + INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */ + INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */ {0, 0} }; static int i915_drm_freeze(struct drm_device *dev) { struct drm_i915_private *dev_priv; int error; dev_priv = dev->dev_private; drm_kms_helper_poll_disable(dev); #if 0 pci_save_state(dev->pdev); #endif DRM_LOCK(dev); /* If KMS is active, we do the leavevt stuff here */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { error = -i915_gem_idle(dev); if (error) { DRM_UNLOCK(dev); device_printf(dev->device, "GEM idle failed, resume might fail\n"); return (error); } drm_irq_uninstall(dev); } i915_save_state(dev); intel_opregion_fini(dev); /* Modeset on resume, not lid events */ dev_priv->modeset_on_lid = 0; DRM_UNLOCK(dev); return 0; } static int i915_suspend(device_t kdev) { struct drm_device *dev; int error; dev = device_get_softc(kdev); if (dev == NULL || dev->dev_private == NULL) { DRM_ERROR("DRM not initialized, aborting suspend.\n"); return -ENODEV; } DRM_DEBUG_KMS("starting suspend\n"); error = i915_drm_freeze(dev); if (error) return (error); error = bus_generic_suspend(kdev); DRM_DEBUG_KMS("finished suspend %d\n", error); return (error); } static int i915_drm_thaw(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int error = 0; DRM_LOCK(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { i915_gem_restore_gtt_mappings(dev); } i915_restore_state(dev); intel_opregion_setup(dev); /* KMS EnterVT equivalent */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { - dev_priv->mm.suspended = 0; - - error = i915_gem_init_hw(dev); - if (HAS_PCH_SPLIT(dev)) ironlake_init_pch_refclk(dev); + dev_priv->mm.suspended = 0; + + error = i915_gem_init_hw(dev); DRM_UNLOCK(dev); + + intel_modeset_init_hw(dev); sx_xlock(&dev->mode_config.mutex); drm_mode_config_reset(dev); sx_xunlock(&dev->mode_config.mutex); drm_irq_install(dev); sx_xlock(&dev->mode_config.mutex); /* Resume the modeset for every activated CRTC */ drm_helper_resume_force_mode(dev); sx_xunlock(&dev->mode_config.mutex); - - if (IS_IRONLAKE_M(dev)) - ironlake_enable_rc6(dev); DRM_LOCK(dev); } intel_opregion_init(dev); dev_priv->modeset_on_lid = 0; DRM_UNLOCK(dev); return error; } static int i915_resume(device_t kdev) { struct drm_device *dev; int ret; dev = device_get_softc(kdev); DRM_DEBUG_KMS("starting resume\n"); #if 0 if (pci_enable_device(dev->pdev)) return -EIO; pci_set_master(dev->pdev); #endif ret = -i915_drm_thaw(dev); if (ret != 0) return (ret); drm_kms_helper_poll_enable(dev); ret = bus_generic_resume(kdev); DRM_DEBUG_KMS("finished resume %d\n", ret); return (ret); } static int i915_probe(device_t kdev) { return drm_probe(kdev, i915_pciidlist); } int i915_modeset; static int i915_attach(device_t kdev) { struct drm_device *dev; dev = device_get_softc(kdev); if (i915_modeset == 1) i915_driver_info.driver_features |= DRIVER_MODESET; dev->driver = &i915_driver_info; return (drm_attach(kdev, i915_pciidlist)); } static struct fb_info * i915_fb_helper_getinfo(device_t kdev) { struct intel_fbdev *ifbdev; drm_i915_private_t *dev_priv; struct drm_device *dev; struct fb_info *info; dev = device_get_softc(kdev); dev_priv = dev->dev_private; ifbdev = dev_priv->fbdev; if (ifbdev == NULL) return (NULL); info = ifbdev->helper.fbdev; return (info); } const struct intel_device_info * i915_get_device_id(int device) { const struct intel_gfx_device_id *did; for (did = &pciidlist[0]; did->device != 0; did++) { if (did->device != device) continue; return (did->info); } return (NULL); } static device_method_t i915_methods[] = { /* Device interface */ DEVMETHOD(device_probe, i915_probe), DEVMETHOD(device_attach, i915_attach), DEVMETHOD(device_suspend, i915_suspend), DEVMETHOD(device_resume, i915_resume), DEVMETHOD(device_detach, drm_detach), /* Framebuffer service methods */ DEVMETHOD(fb_getinfo, i915_fb_helper_getinfo), DEVMETHOD_END }; static driver_t i915_driver = { "drmn", i915_methods, sizeof(struct drm_device) }; extern devclass_t drm_devclass; DRIVER_MODULE_ORDERED(i915kms, vgapci, i915_driver, drm_devclass, 0, 0, SI_ORDER_ANY); MODULE_DEPEND(i915kms, drmn, 1, 1, 1); MODULE_DEPEND(i915kms, agp, 1, 1, 1); MODULE_DEPEND(i915kms, iicbus, 1, 1, 1); MODULE_DEPEND(i915kms, iic, 1, 1, 1); MODULE_DEPEND(i915kms, iicbb, 1, 1, 1); int intel_iommu_enabled = 0; TUNABLE_INT("drm.i915.intel_iommu_enabled", &intel_iommu_enabled); +int intel_iommu_gfx_mapped = 0; +TUNABLE_INT("drm.i915.intel_iommu_gfx_mapped", &intel_iommu_gfx_mapped); +int i915_prefault_disable; +TUNABLE_INT("drm.i915.prefault_disable", &i915_prefault_disable); int i915_semaphores = -1; TUNABLE_INT("drm.i915.semaphores", &i915_semaphores); static int i915_try_reset = 1; TUNABLE_INT("drm.i915.try_reset", &i915_try_reset); unsigned int i915_lvds_downclock = 0; TUNABLE_INT("drm.i915.lvds_downclock", &i915_lvds_downclock); int i915_vbt_sdvo_panel_type = -1; TUNABLE_INT("drm.i915.vbt_sdvo_panel_type", &i915_vbt_sdvo_panel_type); unsigned int i915_powersave = 1; TUNABLE_INT("drm.i915.powersave", &i915_powersave); int i915_enable_fbc = 0; TUNABLE_INT("drm.i915.enable_fbc", &i915_enable_fbc); int i915_enable_rc6 = 0; TUNABLE_INT("drm.i915.enable_rc6", &i915_enable_rc6); +int i915_lvds_channel_mode; +TUNABLE_INT("drm.i915.lvds_channel_mode", &i915_lvds_channel_mode); int i915_panel_use_ssc = -1; TUNABLE_INT("drm.i915.panel_use_ssc", &i915_panel_use_ssc); int i915_panel_ignore_lid = 0; TUNABLE_INT("drm.i915.panel_ignore_lid", &i915_panel_ignore_lid); +int i915_panel_invert_brightness; +TUNABLE_INT("drm.i915.panel_invert_brightness", &i915_panel_invert_brightness); int i915_modeset = 1; TUNABLE_INT("drm.i915.modeset", &i915_modeset); int i915_enable_ppgtt = -1; TUNABLE_INT("drm.i915.enable_ppgtt", &i915_enable_ppgtt); int i915_enable_hangcheck = 1; TUNABLE_INT("drm.i915.enable_hangcheck", &i915_enable_hangcheck); #define PCI_VENDOR_INTEL 0x8086 #define INTEL_PCH_DEVICE_ID_MASK 0xff00 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 +#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 -void -intel_detect_pch(struct drm_device *dev) +void intel_detect_pch(struct drm_device *dev) { struct drm_i915_private *dev_priv; device_t pch; uint32_t id; dev_priv = dev->dev_private; pch = pci_find_class(PCIC_BRIDGE, PCIS_BRIDGE_ISA); if (pch != NULL && pci_get_vendor(pch) == PCI_VENDOR_INTEL) { id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK; if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_IBX; + dev_priv->num_pch_pll = 2; DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_CPT; + dev_priv->num_pch_pll = 2; DRM_DEBUG_KMS("Found CougarPoint PCH\n"); } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { /* PantherPoint is CPT compatible */ dev_priv->pch_type = PCH_CPT; + dev_priv->num_pch_pll = 2; DRM_DEBUG_KMS("Found PatherPoint PCH\n"); + } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { + dev_priv->pch_type = PCH_LPT; + dev_priv->num_pch_pll = 0; + DRM_DEBUG_KMS("Found LynxPoint PCH\n"); } else DRM_DEBUG_KMS("No PCH detected\n"); + KASSERT(dev_priv->num_pch_pll <= I915_NUM_PLLS, + ("num_pch_pll %d\n", dev_priv->num_pch_pll)); } else DRM_DEBUG_KMS("No Intel PCI-ISA bridge found\n"); } +bool i915_semaphore_is_enabled(struct drm_device *dev) +{ + if (INTEL_INFO(dev)->gen < 6) + return 0; + + if (i915_semaphores >= 0) + return i915_semaphores; + + /* Enable semaphores on SNB when IO remapping is off */ + if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) + return false; + + return 1; +} + void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) { int count; count = 0; while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) DELAY(10); I915_WRITE_NOTRACE(FORCEWAKE, 1); POSTING_READ(FORCEWAKE); count = 0; while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0) DELAY(10); } void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) { int count; count = 0; while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1)) DELAY(10); - I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1); + I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1)); POSTING_READ(FORCEWAKE_MT); count = 0; while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0) DELAY(10); } void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) { mtx_lock(&dev_priv->gt_lock); if (dev_priv->forcewake_count++ == 0) dev_priv->display.force_wake_get(dev_priv); mtx_unlock(&dev_priv->gt_lock); } static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) { u32 gtfifodbg; gtfifodbg = I915_READ_NOTRACE(GTFIFODBG); if ((gtfifodbg & GT_FIFO_CPU_ERROR_MASK) != 0) { printf("MMIO read or write has been dropped %x\n", gtfifodbg); I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK); } } void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE, 0); /* The below doubles as a POSTING_READ */ gen6_gt_check_fifodbg(dev_priv); } void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) { - I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0); + I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1)); /* The below doubles as a POSTING_READ */ gen6_gt_check_fifodbg(dev_priv); } void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) { mtx_lock(&dev_priv->gt_lock); if (--dev_priv->forcewake_count == 0) dev_priv->display.force_wake_put(dev_priv); mtx_unlock(&dev_priv->gt_lock); } int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) { int ret = 0; if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { int loop = 500; u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { DELAY(10); fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); } if (loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES) { printf("%s loop\n", __func__); ++ret; } dev_priv->gt_fifo_count = fifo; } dev_priv->gt_fifo_count--; return (ret); } +void vlv_force_wake_get(struct drm_i915_private *dev_priv) +{ + int count; + + count = 0; + + /* Already awake? */ + if ((I915_READ(0x130094) & 0xa1) == 0xa1) + return; + + I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff); + POSTING_READ(FORCEWAKE_VLV); + + count = 0; + while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0) + DELAY(10); +} + +void vlv_force_wake_put(struct drm_i915_private *dev_priv) +{ + I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000); + /* FIXME: confirm VLV behavior with Punit folks */ + POSTING_READ(FORCEWAKE_VLV); +} + static int i8xx_do_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int onems; if (IS_I85X(dev)) return -ENODEV; onems = hz / 1000; if (onems == 0) onems = 1; I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830); POSTING_READ(D_STATE); if (IS_I830(dev) || IS_845G(dev)) { I915_WRITE(DEBUG_RESET_I830, DEBUG_RESET_DISPLAY | DEBUG_RESET_RENDER | DEBUG_RESET_FULL); POSTING_READ(DEBUG_RESET_I830); pause("i8xxrst1", onems); I915_WRITE(DEBUG_RESET_I830, 0); POSTING_READ(DEBUG_RESET_I830); } pause("i8xxrst2", onems); I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830); POSTING_READ(D_STATE); return 0; } static int i965_reset_complete(struct drm_device *dev) { u8 gdrst; gdrst = pci_read_config(dev->device, I965_GDRST, 1); - return (gdrst & 0x1); + return (gdrst & GRDOM_RESET_ENABLE) == 0; } static int i965_do_reset(struct drm_device *dev) { + int ret; u8 gdrst; /* * Set the domains we want to reset (GRDOM/bits 2 and 3) as * well as the reset bit (GR/bit 0). Setting the GR bit * triggers the reset; when done, the hardware will clear it. */ gdrst = pci_read_config(dev->device, I965_GDRST, 1); pci_write_config(dev->device, I965_GDRST, gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE, 1); - return (_intel_wait_for(dev, i965_reset_complete(dev), 500, 1, - "915rst")); + ret = wait_for(i965_reset_complete(dev), 500); + if (ret) + return ret; + + /* We can't reset render&media without also resetting display ... */ + gdrst = pci_read_config(dev->device, I965_GDRST, 1); + pci_write_config(dev->device, I965_GDRST, + gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE, 1); + + return wait_for(i965_reset_complete(dev), 500); } static int ironlake_do_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv; u32 gdrst; + int ret; dev_priv = dev->dev_private; gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, - gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE); - return (_intel_wait_for(dev, - (I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1) != 0, - 500, 1, "915rst")); + gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE); + ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); + if (ret) + return ret; + + /* We can't reset render&media without also resetting display ... */ + gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); + I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, + gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE); + return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); } static int gen6_do_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv; int ret; dev_priv = dev->dev_private; /* Hold gt_lock across reset to prevent any register access * with forcewake not set correctly */ mtx_lock(&dev_priv->gt_lock); /* Reset the chip */ /* GEN6_GDRST is not in the gt power well, no need to check * for fifo space for the write or forcewake the chip for * the read */ I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL); /* Spin waiting for the device to ack the reset request */ ret = _intel_wait_for(dev, (I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, - 500, 1, "915rst"); + 500, 0, "915rst"); /* If reset with a user forcewake, try to restore, otherwise turn it off */ if (dev_priv->forcewake_count) dev_priv->display.force_wake_get(dev_priv); else dev_priv->display.force_wake_put(dev_priv); /* Restore fifo count */ dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); mtx_unlock(&dev_priv->gt_lock); return (ret); } -int intel_gpu_reset(struct drm_device *dev) +int +intel_gpu_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int ret = -ENODEV; switch (INTEL_INFO(dev)->gen) { case 7: case 6: ret = gen6_do_reset(dev); break; case 5: ret = ironlake_do_reset(dev); break; case 4: ret = i965_do_reset(dev); break; case 2: ret = i8xx_do_reset(dev); break; } /* Also reset the gpu hangman. */ if (dev_priv->stop_rings) { DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n"); dev_priv->stop_rings = 0; if (ret == -ENODEV) { DRM_ERROR("Reset not implemented, but ignoring " "error for simulated gpu hangs\n"); ret = 0; } } return ret; } -int -i915_reset(struct drm_device *dev) +int i915_reset(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - /* - * We really should only reset the display subsystem if we actually - * need to - */ - bool need_display = true; int ret; if (!i915_try_reset) return (0); if (!sx_try_xlock(&dev->dev_struct_lock)) return (-EBUSY); + dev_priv->stop_rings = 0; + i915_gem_reset(dev); ret = -ENODEV; - if (time_second - dev_priv->last_gpu_reset < 5) { + if (time_second - dev_priv->last_gpu_reset < 5) DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); - } else + else ret = intel_gpu_reset(dev); dev_priv->last_gpu_reset = time_second; if (ret) { DRM_ERROR("Failed to reset chip.\n"); DRM_UNLOCK(dev); return (ret); } if (drm_core_check_feature(dev, DRIVER_MODESET) || !dev_priv->mm.suspended) { + struct intel_ring_buffer *ring; + int i; + dev_priv->mm.suspended = 0; i915_gem_init_swizzling(dev); - dev_priv->rings[RCS].init(&dev_priv->rings[RCS]); - if (HAS_BSD(dev)) - dev_priv->rings[VCS].init(&dev_priv->rings[VCS]); - if (HAS_BLT(dev)) - dev_priv->rings[BCS].init(&dev_priv->rings[BCS]); + for_each_ring(ring, dev_priv, i) + ring->init(ring); i915_gem_context_init(dev); i915_gem_init_ppgtt(dev); + DRM_UNLOCK(dev); + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + intel_modeset_init_hw(dev); + + DRM_LOCK(dev); drm_irq_uninstall(dev); - drm_mode_config_reset(dev); DRM_UNLOCK(dev); drm_irq_install(dev); - DRM_LOCK(dev); - } - DRM_UNLOCK(dev); + } else + DRM_UNLOCK(dev); - if (need_display) { - sx_xlock(&dev->mode_config.mutex); - drm_helper_resume_force_mode(dev); - sx_xunlock(&dev->mode_config.mutex); - } - return (0); } + +/* We give fast paths for the really cool registers */ +#define NEEDS_FORCE_WAKE(dev_priv, reg) \ + (((dev_priv)->info->gen >= 6) && \ + ((reg) < 0x40000) && \ + ((reg) != FORCEWAKE)) && \ + (!IS_VALLEYVIEW((dev_priv)->dev)) #define __i915_read(x, y) \ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ u##x val = 0; \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ mtx_lock(&dev_priv->gt_lock); \ if (dev_priv->forcewake_count == 0) \ dev_priv->display.force_wake_get(dev_priv); \ val = DRM_READ##y(dev_priv->mmio_map, reg); \ if (dev_priv->forcewake_count == 0) \ dev_priv->display.force_wake_put(dev_priv); \ mtx_unlock(&dev_priv->gt_lock); \ } else { \ val = DRM_READ##y(dev_priv->mmio_map, reg); \ } \ trace_i915_reg_rw(false, reg, val, sizeof(val)); \ return val; \ } __i915_read(8, 8) __i915_read(16, 16) __i915_read(32, 32) __i915_read(64, 64) #undef __i915_read #define __i915_write(x, y) \ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ u32 __fifo_ret = 0; \ trace_i915_reg_rw(true, reg, val, sizeof(val)); \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ } \ DRM_WRITE##y(dev_priv->mmio_map, reg, val); \ if (__predict_false(__fifo_ret)) { \ gen6_gt_check_fifodbg(dev_priv); \ } \ } __i915_write(8, 8) __i915_write(16, 16) __i915_write(32, 32) __i915_write(64, 64) #undef __i915_write Index: head/sys/dev/drm2/i915/i915_drv.h =================================================================== --- head/sys/dev/drm2/i915/i915_drv.h (revision 277486) +++ head/sys/dev/drm2/i915/i915_drv.h (revision 277487) @@ -1,1524 +1,1574 @@ /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- */ /* * * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include __FBSDID("$FreeBSD$"); #ifndef _I915_DRV_H_ #define _I915_DRV_H_ #include #include #include #include #include /* General customization: */ #define DRIVER_AUTHOR "Tungsten Graphics, Inc." #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" #define DRIVER_DATE "20080730" MALLOC_DECLARE(DRM_I915_GEM); enum pipe { PIPE_A = 0, PIPE_B, PIPE_C, I915_MAX_PIPES }; #define pipe_name(p) ((p) + 'A') #define I915_NUM_PIPE 2 enum plane { PLANE_A = 0, PLANE_B, PLANE_C, }; #define plane_name(p) ((p) + 'A') -#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) +enum port { + PORT_A = 0, + PORT_B, + PORT_C, + PORT_D, + PORT_E, + I915_MAX_PORTS +}; +#define port_name(p) ((p) + 'A') +#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) + + #define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) +struct intel_pch_pll { + int refcount; /* count of number of CRTCs sharing this PLL */ + int active; /* count of number of active CRTCs (i.e. DPMS on) */ + bool on; /* is the PLL actually active? Disabled during modeset */ + int pll_reg; + int fp0_reg; + int fp1_reg; +}; +#define I915_NUM_PLLS 2 + /* Interface history: * * 1.1: Original. * 1.2: Add Power Management * 1.3: Add vblank support * 1.4: Fix cmdbuffer path, add heap destroy * 1.5: Add vblank pipe configuration * 1.6: - New ioctl for scheduling buffer swaps on vertical blank * - Support vertical blank on secondary display pipe */ #define DRIVER_MAJOR 1 #define DRIVER_MINOR 6 #define DRIVER_PATCHLEVEL 0 #define WATCH_COHERENCY 0 #define WATCH_BUF 0 #define WATCH_EXEC 0 #define WATCH_LRU 0 #define WATCH_RELOC 0 #define WATCH_INACTIVE 0 #define WATCH_PWRITE 0 #define I915_GEM_PHYS_CURSOR_0 1 #define I915_GEM_PHYS_CURSOR_1 2 #define I915_GEM_PHYS_OVERLAY_REGS 3 #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) struct drm_i915_gem_phys_object { int id; drm_dma_handle_t *handle; struct drm_i915_gem_object *cur_obj; }; struct drm_i915_private; struct drm_i915_display_funcs { void (*dpms)(struct drm_crtc *crtc, int mode); bool (*fbc_enabled)(struct drm_device *dev); void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); void (*disable_fbc)(struct drm_device *dev); int (*get_display_clock_speed)(struct drm_device *dev); int (*get_fifo_size)(struct drm_device *dev, int plane); void (*update_wm)(struct drm_device *dev); void (*update_sprite_wm)(struct drm_device *dev, int pipe, uint32_t sprite_width, int pixel_size); + void (*sanitize_pm)(struct drm_device *dev); + void (*update_linetime_wm)(struct drm_device *dev, int pipe, + struct drm_display_mode *mode); int (*crtc_mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb); + void (*off)(struct drm_crtc *crtc); void (*write_eld)(struct drm_connector *connector, struct drm_crtc *crtc); void (*fdi_link_train)(struct drm_crtc *crtc); void (*init_clock_gating)(struct drm_device *dev); void (*init_pch_clock_gating)(struct drm_device *dev); int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj); void (*force_wake_get)(struct drm_i915_private *dev_priv); void (*force_wake_put)(struct drm_i915_private *dev_priv); int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y); /* clock updates for mode set */ /* cursor updates */ /* render clock increase/decrease */ /* display clock increase/decrease */ /* pll clock increase/decrease */ }; struct intel_device_info { u8 gen; u8 is_mobile:1; u8 is_i85x:1; u8 is_i915g:1; u8 is_i945gm:1; u8 is_g33:1; u8 need_gfx_hws:1; u8 is_g4x:1; u8 is_pineview:1; u8 is_broadwater:1; u8 is_crestline:1; u8 is_ivybridge:1; + u8 is_valleyview:1; + u8 has_pch_split:1; + u8 is_haswell:1; u8 has_fbc:1; u8 has_pipe_cxsr:1; u8 has_hotplug:1; u8 cursor_needs_physical:1; u8 has_overlay:1; u8 overlay_needs_physical:1; u8 supports_tv:1; u8 has_bsd_ring:1; u8 has_blt_ring:1; u8 has_llc:1; }; #define I915_PPGTT_PD_ENTRIES 512 #define I915_PPGTT_PT_ENTRIES 1024 struct i915_hw_ppgtt { unsigned num_pd_entries; vm_page_t *pt_pages; uint32_t pd_offset; vm_paddr_t *pt_dma_addr; vm_paddr_t scratch_page_dma_addr; }; /* This must match up with the value previously used for execbuf2.rsvd1. */ #define DEFAULT_CONTEXT_ID 0 struct i915_hw_context { uint32_t id; bool is_initialized; struct drm_i915_file_private *file_priv; struct intel_ring_buffer *ring; struct drm_i915_gem_object *obj; }; enum no_fbc_reason { FBC_NO_OUTPUT, /* no outputs enabled to compress */ FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ FBC_MODE_TOO_LARGE, /* mode too large for compression */ FBC_BAD_PLANE, /* fbc not supported on plane */ FBC_NOT_TILED, /* buffer not tiled */ FBC_MULTIPLE_PIPES, /* more than one pipe active */ FBC_MODULE_PARAM, }; struct mem_block { struct mem_block *next; struct mem_block *prev; int start; int size; struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ }; struct opregion_header; struct opregion_acpi; struct opregion_swsci; struct opregion_asle; struct intel_opregion { struct opregion_header *header; struct opregion_acpi *acpi; struct opregion_swsci *swsci; struct opregion_asle *asle; void *vbt; u32 *lid_state; }; #define OPREGION_SIZE (8*1024) #define I915_FENCE_REG_NONE -1 #define I915_MAX_NUM_FENCES 16 /* 16 fences + sign bit for FENCE_REG_NONE */ #define I915_MAX_NUM_FENCE_BITS 5 struct drm_i915_fence_reg { struct list_head lru_list; struct drm_i915_gem_object *obj; - uint32_t setup_seqno; int pin_count; }; struct sdvo_device_mapping { u8 initialized; u8 dvo_port; u8 slave_addr; u8 dvo_wiring; u8 i2c_pin; u8 ddc_pin; }; enum intel_pch { PCH_IBX, /* Ibexpeak PCH */ PCH_CPT, /* Cougarpoint PCH */ + PCH_LPT, /* Lynxpoint PCH */ }; #define QUIRK_PIPEA_FORCE (1<<0) #define QUIRK_LVDS_SSC_DISABLE (1<<1) +#define QUIRK_INVERT_BRIGHTNESS (1<<2) struct intel_fbdev; struct intel_fbc_work; typedef struct drm_i915_private { struct drm_device *dev; - device_t *gmbus_bridge; - device_t *bbbus_bridge; - device_t *gmbus; - device_t *bbbus; + device_t gmbus_bridge[GMBUS_NUM_PORTS + 1]; + device_t bbbus_bridge[GMBUS_NUM_PORTS + 1]; + device_t gmbus[GMBUS_NUM_PORTS + 1]; + device_t bbbus[GMBUS_NUM_PORTS + 1]; /** gmbus_sx protects against concurrent usage of the single hw gmbus * controller on different i2c buses. */ struct sx gmbus_sx; + uint32_t gpio_mmio_base; - int has_gem; int relative_constants_mode; drm_local_map_t *sarea; drm_local_map_t *mmio_map; /** gt_fifo_count and the subsequent register write are synchronized * with dev->struct_mutex. */ unsigned gt_fifo_count; /** forcewake_count is protected by gt_lock */ unsigned forcewake_count; /** gt_lock is also taken in irq contexts. */ struct mtx gt_lock; drm_i915_sarea_t *sarea_priv; /* drm_i915_ring_buffer_t ring; */ struct intel_ring_buffer rings[I915_NUM_RINGS]; uint32_t next_seqno; drm_dma_handle_t *status_page_dmah; void *hw_status_page; dma_addr_t dma_status_page; uint32_t counter; unsigned int status_gfx_addr; - drm_local_map_t hws_map; struct drm_gem_object *hws_obj; struct drm_i915_gem_object *pwrctx; struct drm_i915_gem_object *renderctx; unsigned int cpp; int back_offset; int front_offset; int current_page; int page_flipping; atomic_t irq_received; u32 trace_irq_seqno; /** Cached value of IER to avoid reads in updating the bitfield */ u32 pipestat[2]; u32 irq_mask; u32 gt_irq_mask; u32 pch_irq_mask; struct mtx irq_lock; + struct mtx dpio_lock; + u32 hotplug_supported_mask; - int tex_lru_log_granularity; - int allow_batchbuffer; unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; - int vblank_pipe; int num_pipe; + int num_pch_pll; /* For hangcheck timer */ #define DRM_I915_HANGCHECK_PERIOD ((1500 /* in ms */ * hz) / 1000) int hangcheck_count; - uint32_t last_acthd; - uint32_t last_acthd_bsd; - uint32_t last_acthd_blt; + uint32_t last_acthd[I915_NUM_RINGS]; uint32_t last_instdone; uint32_t last_instdone1; + unsigned int stop_rings; + struct intel_opregion opregion; /* overlay */ struct intel_overlay *overlay; bool sprite_scaling_enabled; /* LVDS info */ int backlight_level; /* restore backlight to this value */ bool backlight_enabled; struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ /* Feature bits from the VBIOS */ unsigned int int_tv_support:1; unsigned int lvds_dither:1; unsigned int lvds_vbt:1; unsigned int int_crt_support:1; unsigned int lvds_use_ssc:1; unsigned int display_clock_mode:1; int lvds_ssc_freq; + unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ + unsigned int lvds_val; /* used for checking LVDS channel mode */ struct { int rate; int lanes; int preemphasis; int vswing; bool initialized; bool support; int bpp; struct edp_power_seq pps; } edp; bool no_aux_handshake; int crt_ddc_pin; struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ int num_fence_regs; /* 8 on pre-965, 16 otherwise */ /* PCH chipset type */ enum intel_pch pch_type; /* Display functions */ struct drm_i915_display_funcs display; unsigned long quirks; /* Register state */ bool modeset_on_lid; u8 saveLBB; u32 saveDSPACNTR; u32 saveDSPBCNTR; u32 saveDSPARB; u32 saveHWS; u32 savePIPEACONF; u32 savePIPEBCONF; u32 savePIPEASRC; u32 savePIPEBSRC; u32 saveFPA0; u32 saveFPA1; u32 saveDPLL_A; u32 saveDPLL_A_MD; u32 saveHTOTAL_A; u32 saveHBLANK_A; u32 saveHSYNC_A; u32 saveVTOTAL_A; u32 saveVBLANK_A; u32 saveVSYNC_A; u32 saveBCLRPAT_A; u32 saveTRANSACONF; u32 saveTRANS_HTOTAL_A; u32 saveTRANS_HBLANK_A; u32 saveTRANS_HSYNC_A; u32 saveTRANS_VTOTAL_A; u32 saveTRANS_VBLANK_A; u32 saveTRANS_VSYNC_A; u32 savePIPEASTAT; u32 saveDSPASTRIDE; u32 saveDSPASIZE; u32 saveDSPAPOS; u32 saveDSPAADDR; u32 saveDSPASURF; u32 saveDSPATILEOFF; u32 savePFIT_PGM_RATIOS; u32 saveBLC_HIST_CTL; u32 saveBLC_PWM_CTL; u32 saveBLC_PWM_CTL2; u32 saveBLC_CPU_PWM_CTL; u32 saveBLC_CPU_PWM_CTL2; u32 saveFPB0; u32 saveFPB1; u32 saveDPLL_B; u32 saveDPLL_B_MD; u32 saveHTOTAL_B; u32 saveHBLANK_B; u32 saveHSYNC_B; u32 saveVTOTAL_B; u32 saveVBLANK_B; u32 saveVSYNC_B; u32 saveBCLRPAT_B; u32 saveTRANSBCONF; u32 saveTRANS_HTOTAL_B; u32 saveTRANS_HBLANK_B; u32 saveTRANS_HSYNC_B; u32 saveTRANS_VTOTAL_B; u32 saveTRANS_VBLANK_B; u32 saveTRANS_VSYNC_B; u32 savePIPEBSTAT; u32 saveDSPBSTRIDE; u32 saveDSPBSIZE; u32 saveDSPBPOS; u32 saveDSPBADDR; u32 saveDSPBSURF; u32 saveDSPBTILEOFF; u32 saveVGA0; u32 saveVGA1; u32 saveVGA_PD; u32 saveVGACNTRL; u32 saveADPA; u32 saveLVDS; u32 savePP_ON_DELAYS; u32 savePP_OFF_DELAYS; u32 saveDVOA; u32 saveDVOB; u32 saveDVOC; u32 savePP_ON; u32 savePP_OFF; u32 savePP_CONTROL; u32 savePP_DIVISOR; u32 savePFIT_CONTROL; u32 save_palette_a[256]; u32 save_palette_b[256]; u32 saveDPFC_CB_BASE; u32 saveFBC_CFB_BASE; u32 saveFBC_LL_BASE; u32 saveFBC_CONTROL; u32 saveFBC_CONTROL2; u32 saveIER; u32 saveIIR; u32 saveIMR; u32 saveDEIER; u32 saveDEIMR; u32 saveGTIER; u32 saveGTIMR; u32 saveFDI_RXA_IMR; u32 saveFDI_RXB_IMR; u32 saveCACHE_MODE_0; u32 saveMI_ARB_STATE; u32 saveSWF0[16]; u32 saveSWF1[16]; u32 saveSWF2[3]; u8 saveMSR; u8 saveSR[8]; u8 saveGR[25]; u8 saveAR_INDEX; u8 saveAR[21]; u8 saveDACMASK; u8 saveCR[37]; uint64_t saveFENCE[I915_MAX_NUM_FENCES]; u32 saveCURACNTR; u32 saveCURAPOS; u32 saveCURABASE; u32 saveCURBCNTR; u32 saveCURBPOS; u32 saveCURBBASE; u32 saveCURSIZE; u32 saveDP_B; u32 saveDP_C; u32 saveDP_D; u32 savePIPEA_GMCH_DATA_M; u32 savePIPEB_GMCH_DATA_M; u32 savePIPEA_GMCH_DATA_N; u32 savePIPEB_GMCH_DATA_N; u32 savePIPEA_DP_LINK_M; u32 savePIPEB_DP_LINK_M; u32 savePIPEA_DP_LINK_N; u32 savePIPEB_DP_LINK_N; u32 saveFDI_RXA_CTL; u32 saveFDI_TXA_CTL; u32 saveFDI_RXB_CTL; u32 saveFDI_TXB_CTL; u32 savePFA_CTL_1; u32 savePFB_CTL_1; u32 savePFA_WIN_SZ; u32 savePFB_WIN_SZ; u32 savePFA_WIN_POS; u32 savePFB_WIN_POS; u32 savePCH_DREF_CONTROL; u32 saveDISP_ARB_CTL; u32 savePIPEA_DATA_M1; u32 savePIPEA_DATA_N1; u32 savePIPEA_LINK_M1; u32 savePIPEA_LINK_N1; u32 savePIPEB_DATA_M1; u32 savePIPEB_DATA_N1; u32 savePIPEB_LINK_M1; u32 savePIPEB_LINK_N1; u32 saveMCHBAR_RENDER_STANDBY; u32 savePCH_PORT_HOTPLUG; struct { /** Memory allocator for GTT stolen memory */ struct drm_mm stolen; /** Memory allocator for GTT */ struct drm_mm gtt_space; /** List of all objects in gtt_space. Used to restore gtt * mappings on resume */ struct list_head gtt_list; /** Usable portion of the GTT for GEM */ unsigned long gtt_start; unsigned long gtt_mappable_end; unsigned long gtt_end; /** PPGTT used for aliasing the PPGTT with the GTT */ struct i915_hw_ppgtt *aliasing_ppgtt; /** * List of objects currently involved in rendering from the * ringbuffer. * * Includes buffers having the contents of their GPU caches * flushed, not necessarily primitives. last_rendering_seqno * represents when the rendering involved will be completed. * * A reference is held on the buffer while on this list. */ struct list_head active_list; /** * List of objects which are not in the ringbuffer but which * still have a write_domain which needs to be flushed before * unbinding. * * A reference is held on the buffer while on this list. */ struct list_head flushing_list; /** * LRU list of objects which are not in the ringbuffer and * are ready to unbind, but are still in the GTT. * * last_rendering_seqno is 0 while an object is in this list. * * A reference is not held on the buffer while on this list, * as merely being GTT-bound shouldn't prevent its being * freed, and we'll pull it off the list in the free path. */ struct list_head inactive_list; - /** - * LRU list of objects which are not in the ringbuffer but - * are still pinned in the GTT. - */ - struct list_head pinned_list; - /** LRU list of objects with fence regs on them. */ struct list_head fence_list; /** - * List of objects currently pending being freed. - * - * These objects are no longer in use, but due to a signal - * we were prevented from freeing them at the appointed time. - */ - struct list_head deferred_free_list; - - /** * We leave the user IRQ off as much as possible, * but this means that requests will finish and never * be retired once the system goes idle. Set a timer to * fire periodically while the ring is running. When it * fires, go retire requests. */ struct timeout_task retire_task; /** * Are we in a non-interruptible section of code like * modesetting? */ bool interruptible; uint32_t next_gem_seqno; /** * Waiting sequence number, if any */ uint32_t waiting_gem_seqno; /** * Last seq seen at irq time */ uint32_t irq_gem_seqno; /** * Flag if the X Server, and thus DRM, is not currently in * control of the device. * * This is set between LeaveVT and EnterVT. It needs to be * replaced with a semaphore. It also needs to be * transitioned away from for kernel modesetting. */ int suspended; /** * Flag if the hardware appears to be wedged. * * This is set when attempts to idle the device timeout. * It prevents command submission from occuring and makes * every pending request fail */ int wedged; /** Bit 6 swizzling required for X tiling */ uint32_t bit_6_swizzle_x; /** Bit 6 swizzling required for Y tiling */ uint32_t bit_6_swizzle_y; /* storage for physical objects */ struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; /* accounting, useful for userland debugging */ size_t gtt_total; size_t mappable_gtt_total; size_t object_memory; u32 object_count; struct intel_gtt gtt; eventhandler_tag i915_lowmem; } mm; const struct intel_device_info *info; + /* Old dri1 support infrastructure, beware the dragons ya fools entering + * here! */ + struct { + unsigned allow_batchbuffer : 1; + u32 *gfx_hws_cpu_addr; + } dri1; + + /* Kernel Modesetting */ + struct sdvo_device_mapping sdvo_mappings[2]; /* indicate whether the LVDS_BORDER should be enabled or not */ unsigned int lvds_border_bits; /* Panel fitter placement and size for Ironlake+ */ u32 pch_pf_pos, pch_pf_size; struct drm_crtc *plane_to_crtc_mapping[3]; struct drm_crtc *pipe_to_crtc_mapping[3]; /* wait_queue_head_t pending_flip_queue; XXXKIB */ - bool flip_pending_is_done; + struct intel_pch_pll pch_plls[I915_NUM_PLLS]; + /* Reclocking support */ bool render_reclock_avail; bool lvds_downclock_avail; /* indicates the reduced downclock for LVDS*/ int lvds_downclock; struct task idle_task; struct callout idle_callout; bool busy; u16 orig_clock; int child_dev_num; struct child_device_config *child_dev; struct drm_connector *int_lvds_connector; struct drm_connector *int_edp_connector; device_t bridge_dev; bool mchbar_need_disable; int mch_res_rid; struct resource *mch_res; struct mtx rps_lock; u32 pm_iir; struct task rps_task; u8 cur_delay; u8 min_delay; u8 max_delay; u8 fmax; u8 fstart; u64 last_count1; unsigned long last_time1; unsigned long chipset_power; u64 last_count2; struct timespec last_time2; unsigned long gfx_power; int c_m; int r_t; u8 corr; struct mtx *mchdev_lock; enum no_fbc_reason no_fbc_reason; - unsigned int stop_rings; + struct drm_mm_node *compressed_fb; + struct drm_mm_node *compressed_llb; unsigned long cfb_size; unsigned int cfb_fb; int cfb_plane; int cfb_y; struct intel_fbc_work *fbc_work; unsigned int fsb_freq, mem_freq, is_ddr3; struct taskqueue *tq; struct task error_task; struct task hotplug_task; int error_completion; struct mtx error_completion_lock; + /* Protected by dev->error_lock. */ struct drm_i915_error_state *first_error; struct mtx error_lock; struct callout hangcheck_timer; unsigned long last_gpu_reset; struct intel_fbdev *fbdev; struct drm_property *broadcast_rgb_property; struct drm_property *force_audio_property; bool hw_contexts_disabled; uint32_t hw_context_size; } drm_i915_private_t; /* Iterate over initialised rings */ #define for_each_ring(ring__, dev_priv__, i__) \ for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ if (((ring__) = &(dev_priv__)->rings[(i__)]), intel_ring_initialized((ring__))) enum hdmi_force_audio { HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ HDMI_AUDIO_OFF, /* force turn off HDMI audio */ HDMI_AUDIO_AUTO, /* trust EDID */ HDMI_AUDIO_ON, /* force turn on HDMI audio */ }; enum i915_cache_level { I915_CACHE_NONE, I915_CACHE_LLC, I915_CACHE_LLC_MLC, /* gen6+ */ }; enum intel_chip_family { CHIP_I8XX = 0x01, CHIP_I9XX = 0x02, CHIP_I915 = 0x04, CHIP_I965 = 0x08, }; /** driver private structure attached to each drm_gem_object */ struct drm_i915_gem_object { struct drm_gem_object base; /** Current space allocated to this object in the GTT, if any. */ struct drm_mm_node *gtt_space; struct list_head gtt_list; /** This object's place on the active/flushing/inactive lists */ struct list_head ring_list; struct list_head mm_list; /** This object's place on GPU write list */ struct list_head gpu_write_list; /** This object's place in the batchbuffer or on the eviction list */ struct list_head exec_list; /** * This is set if the object is on the active or flushing lists * (has pending rendering), and is not set if it's on inactive (ready * to be unbound). */ unsigned int active:1; /** * This is set if the object has been written to since last bound * to the GTT */ unsigned int dirty:1; /** * This is set if the object has been written to since the last * GPU flush. */ unsigned int pending_gpu_write:1; /** * Fence register bits (if any) for this object. Will be set * as needed when mapped into the GTT. * Protected by dev->struct_mutex. */ signed int fence_reg:I915_MAX_NUM_FENCE_BITS; /** * Advice: are the backing pages purgeable? */ unsigned int madv:2; /** * Current tiling mode for the object. */ unsigned int tiling_mode:2; - unsigned int tiling_changed:1; + /** + * Whether the tiling parameters for the currently associated fence + * register have changed. Note that for the purposes of tracking + * tiling changes we also treat the unfenced register, the register + * slot that the object occupies whilst it executes a fenced + * command (such as BLT on gen2/3), as a "fence". + */ + unsigned int fence_dirty:1; /** How many users have pinned this object in GTT space. The following * users can each hold at most one reference: pwrite/pread, pin_ioctl * (via user_pin_count), execbuffer (objects are not allowed multiple * times for the same batchbuffer), and the framebuffer code. When * switching/pageflipping, the framebuffer code has at most two buffers * pinned per crtc. * * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 * bits with absolutely no headroom. So use 4 bits. */ unsigned int pin_count:4; #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf /** * Is the object at the current location in the gtt mappable and * fenceable? Used to avoid costly recalculations. */ unsigned int map_and_fenceable:1; /** * Whether the current gtt mapping needs to be mappable (and isn't just * mappable by accident). Track pin and fault separate for a more * accurate mappable working set. */ unsigned int fault_mappable:1; unsigned int pin_mappable:1; + unsigned int pin_display:1; /* * Is the GPU currently using a fence to access this buffer, */ unsigned int pending_fenced_gpu_access:1; unsigned int fenced_gpu_access:1; unsigned int cache_level:2; unsigned int has_aliasing_ppgtt_mapping:1; unsigned int has_global_gtt_mapping:1; vm_page_t *pages; + int pages_pin_count; /** * DMAR support */ struct sglist *sg_list; /** * Used for performing relocations during execbuffer insertion. */ LIST_ENTRY(drm_i915_gem_object) exec_node; unsigned long exec_handle; struct drm_i915_gem_exec_object2 *exec_entry; /** * Current offset of the object in GTT space. * * This is the same as gtt_space->start */ uint32_t gtt_offset; - /** Breadcrumb of last rendering to the buffer. */ - uint32_t last_rendering_seqno; struct intel_ring_buffer *ring; + /** Breadcrumb of last rendering to the buffer. */ + uint32_t last_rendering_seqno; /** Breadcrumb of last fenced GPU access to the buffer. */ uint32_t last_fenced_seqno; - struct intel_ring_buffer *last_fenced_ring; /** Current tiling stride for the object, if it's tiled. */ uint32_t stride; /** Record of address bit 17 of each page at last unbind. */ unsigned long *bit_17; - /** - * If present, while GEM_DOMAIN_CPU is in the read domain this array - * flags which individual pages are valid. - */ - uint8_t *page_cpu_valid; - /** User space pin count and filp owning the pin */ uint32_t user_pin_count; struct drm_file *pin_filp; /** for phy allocated objects */ struct drm_i915_gem_phys_object *phys_obj; /** * Number of crtcs where this object is currently the fb, but * will be page flipped away on the next vblank. When it * reaches 0, dev_priv->pending_flip_queue will be woken up. */ int pending_flip; }; #define to_intel_bo(x) __containerof(x, struct drm_i915_gem_object, base) /** * Request queue structure. * * The request queue allows us to note sequence numbers that have been emitted * and may be associated with active buffers to be retired. * * By keeping this list, we can avoid having to do questionable * sequence-number comparisons on buffer last_rendering_seqnos, and associate * an emission time with seqnos for tracking how far ahead of the GPU we are. */ struct drm_i915_gem_request { /** On Which ring this request was generated */ struct intel_ring_buffer *ring; /** GEM sequence number associated with this request. */ uint32_t seqno; /** Postion in the ringbuffer of the end of the request */ u32 tail; /** Time at which this request was emitted, in jiffies. */ unsigned long emitted_jiffies; /** global list entry for this request */ struct list_head list; struct drm_i915_file_private *file_priv; /** file_priv list entry for this request */ struct list_head client_list; }; struct drm_i915_file_private { struct { struct list_head request_list; struct mtx lck; } mm; struct drm_gem_names context_idr; }; struct drm_i915_error_state { + u_int ref; u32 eir; u32 pgtbl_er; + u32 ier; + bool waiting[I915_NUM_RINGS]; u32 pipestat[I915_MAX_PIPES]; u32 tail[I915_NUM_RINGS]; u32 head[I915_NUM_RINGS]; u32 ipeir[I915_NUM_RINGS]; u32 ipehr[I915_NUM_RINGS]; u32 instdone[I915_NUM_RINGS]; u32 acthd[I915_NUM_RINGS]; u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1]; /* our own tracking of ring head and tail */ u32 cpu_ring_head[I915_NUM_RINGS]; u32 cpu_ring_tail[I915_NUM_RINGS]; u32 error; /* gen6+ */ u32 instpm[I915_NUM_RINGS]; u32 instps[I915_NUM_RINGS]; u32 instdone1; u32 seqno[I915_NUM_RINGS]; u64 bbaddr; u32 fault_reg[I915_NUM_RINGS]; u32 done_reg; u32 faddr[I915_NUM_RINGS]; u64 fence[I915_MAX_NUM_FENCES]; struct timeval time; struct drm_i915_error_ring { struct drm_i915_error_object { int page_count; u32 gtt_offset; u32 *pages[0]; } *ringbuffer, *batchbuffer; struct drm_i915_error_request { long jiffies; u32 seqno; u32 tail; } *requests; int num_requests; } ring[I915_NUM_RINGS]; struct drm_i915_error_buffer { u32 size; u32 name; u32 seqno; u32 gtt_offset; u32 read_domains; u32 write_domain; s32 fence_reg:I915_MAX_NUM_FENCE_BITS; s32 pinned:2; u32 tiling:2; u32 dirty:1; u32 purgeable:1; s32 ring:4; u32 cache_level:2; } *active_bo, *pinned_bo; u32 active_bo_count, pinned_bo_count; struct intel_overlay_error_state *overlay; struct intel_display_error_state *display; }; /** * RC6 is a special power stage which allows the GPU to enter an very * low-voltage mode when idle, using down to 0V while at this stage. This * stage is entered automatically when the GPU is idle when RC6 support is * enabled, and as soon as new workload arises GPU wakes up automatically as well. * * There are different RC6 modes available in Intel GPU, which differentiate * among each other with the latency required to enter and leave RC6 and * voltage consumed by the GPU in different states. * * The combination of the following flags define which states GPU is allowed * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and * RC6pp is deepest RC6. Their support by hardware varies according to the * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one * which brings the most power savings; deeper states save more power, but * require higher latency to switch to and wake up. */ #define INTEL_RC6_ENABLE (1<<0) #define INTEL_RC6p_ENABLE (1<<1) #define INTEL_RC6pp_ENABLE (1<<2) extern int intel_iommu_enabled; extern struct drm_ioctl_desc i915_ioctls[]; extern struct drm_driver_info i915_driver_info; extern struct cdev_pager_ops i915_gem_pager_ops; extern unsigned int i915_fbpercrtc; extern int i915_panel_ignore_lid; +extern int i915_panel_invert_brightness; extern unsigned int i915_powersave; +extern int i915_prefault_disable; extern int i915_semaphores; extern unsigned int i915_lvds_downclock; +extern int i915_lvds_channel_mode; extern int i915_panel_use_ssc; extern int i915_vbt_sdvo_panel_type; extern int i915_enable_rc6; extern int i915_enable_fbc; extern int i915_enable_ppgtt; extern int i915_enable_hangcheck; const struct intel_device_info *i915_get_device_id(int device); -extern int intel_gpu_reset(struct drm_device *dev); int i915_reset(struct drm_device *dev); +extern int intel_gpu_reset(struct drm_device *dev); /* i915_debug.c */ int i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx, struct sysctl_oid *top); void i915_sysctl_cleanup(struct drm_device *dev); /* i915_dma.c */ int i915_batchbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv); +void i915_update_dri1_breadcrumb(struct drm_device *dev); extern void i915_kernel_lost_context(struct drm_device * dev); extern int i915_driver_load(struct drm_device *, unsigned long flags); extern int i915_driver_unload(struct drm_device *); extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); extern void i915_driver_lastclose(struct drm_device * dev); extern void i915_driver_preclose(struct drm_device *dev, struct drm_file *file_priv); extern void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv); extern int i915_driver_device_is_agp(struct drm_device * dev); extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); extern int i915_emit_box(struct drm_device *dev, struct drm_clip_rect __user *boxes, int i, int DR1, int DR4); int i915_emit_box_p(struct drm_device *dev, struct drm_clip_rect *box, int DR1, int DR4); unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); unsigned long i915_mch_val(struct drm_i915_private *dev_priv); void i915_update_gfx_val(struct drm_i915_private *dev_priv); unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); unsigned long i915_read_mch_val(void); bool i915_gpu_raise(void); bool i915_gpu_lower(void); bool i915_gpu_busy(void); bool i915_gpu_turbo_disable(void); /* i915_irq.c */ extern int i915_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int i915_irq_wait(struct drm_device *dev, void *data, - struct drm_file *file_priv); - extern void intel_irq_init(struct drm_device *dev); -extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int i915_vblank_swap(struct drm_device *dev, void *data, - struct drm_file *file_priv); void intel_enable_asle(struct drm_device *dev); void i915_hangcheck_elapsed(void *context); void i915_handle_error(struct drm_device *dev, bool wedged); +void i915_error_state_free(struct drm_i915_error_state *error); void i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); void i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); void i915_destroy_error_state(struct drm_device *dev); /* i915_gem.c */ int i915_gem_create(struct drm_file *file, struct drm_device *dev, uint64_t size, uint32_t *handle_p); int i915_gem_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_pread_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_execbuffer2(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_pin_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_get_tiling(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void i915_gem_load(struct drm_device *dev); void i915_gem_unload(struct drm_device *dev); int i915_gem_init_object(struct drm_gem_object *obj); void i915_gem_free_object(struct drm_gem_object *obj); int i915_gem_object_pin(struct drm_i915_gem_object *obj, uint32_t alignment, bool map_and_fenceable); void i915_gem_object_unpin(struct drm_i915_gem_object *obj); int i915_gem_object_unbind(struct drm_i915_gem_object *obj); void i915_gem_lastclose(struct drm_device *dev); uint32_t i915_get_gem_seqno(struct drm_device *dev); -static inline void +static inline bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) { if (obj->fence_reg != I915_FENCE_REG_NONE) { struct drm_i915_private *dev_priv = obj->base.dev->dev_private; dev_priv->fence_regs[obj->fence_reg].pin_count++; - } + return true; + } else + return false; } static inline void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) { if (obj->fence_reg != I915_FENCE_REG_NONE) { struct drm_i915_private *dev_priv = obj->base.dev->dev_private; dev_priv->fence_regs[obj->fence_reg].pin_count--; } } void i915_gem_retire_requests(struct drm_device *dev); void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); void i915_gem_clflush_object(struct drm_i915_gem_object *obj); struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, size_t size); -int i915_gem_do_init(struct drm_device *dev, unsigned long start, - unsigned long mappable_end, unsigned long end); uint32_t i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, uint32_t size, int tiling_mode); int i915_mutex_lock_interruptible(struct drm_device *dev); int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write); +int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, + bool write); int i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, struct intel_ring_buffer *pipelined); +void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); int i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); int i915_gem_flush_ring(struct intel_ring_buffer *ring, uint32_t invalidate_domains, uint32_t flush_domains); void i915_gem_release_mmap(struct drm_i915_gem_object *obj); int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj); +int i915_gem_object_sync(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *to); int i915_gem_object_put_fence(struct drm_i915_gem_object *obj); int i915_gem_idle(struct drm_device *dev); +int i915_gem_init(struct drm_device *dev); int i915_gem_init_hw(struct drm_device *dev); void i915_gem_init_swizzling(struct drm_device *dev); void i915_gem_init_ppgtt(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev); -int i915_gpu_idle(struct drm_device *dev, bool do_retire); +int i915_gpu_idle(struct drm_device *dev); void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, struct intel_ring_buffer *ring, uint32_t seqno); int i915_add_request(struct intel_ring_buffer *ring, struct drm_file *file, struct drm_i915_gem_request *request); -int i915_gem_object_get_fence(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *pipelined); +int i915_gem_object_get_fence(struct drm_i915_gem_object *obj); void i915_gem_reset(struct drm_device *dev); -int i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno, - bool do_retire); +int i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno); int i915_gem_mmap(struct drm_device *dev, uint64_t offset, int prot); int i915_gem_fault(struct drm_device *dev, uint64_t offset, int prot, uint64_t *phys); void i915_gem_release(struct drm_device *dev, struct drm_file *file); int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, enum i915_cache_level cache_level); /* i915_gem_context.c */ void i915_gem_context_init(struct drm_device *dev); void i915_gem_context_fini(struct drm_device *dev); void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); int i915_switch_context(struct intel_ring_buffer *ring, struct drm_file *file, int to_id); int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file); void i915_gem_free_all_phys_object(struct drm_device *dev); void i915_gem_detach_phys_object(struct drm_device *dev, struct drm_i915_gem_object *obj); int i915_gem_attach_phys_object(struct drm_device *dev, struct drm_i915_gem_object *obj, int id, int align); int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset); int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle); /* i915_gem_tiling.c */ void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); +void i915_gem_object_do_bit_17_swizzle_page(struct drm_i915_gem_object *obj, + struct vm_page *m); /* i915_gem_evict.c */ int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment, bool mappable); int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only); -int i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only); +/* i915_gem_stolen.c */ +int i915_gem_init_stolen(struct drm_device *dev); +void i915_gem_cleanup_stolen(struct drm_device *dev); + /* i915_suspend.c */ extern int i915_save_state(struct drm_device *dev); extern int i915_restore_state(struct drm_device *dev); /* intel_iic.c */ extern int intel_setup_gmbus(struct drm_device *dev); extern void intel_teardown_gmbus(struct drm_device *dev); extern void intel_gmbus_set_speed(device_t idev, int speed); extern void intel_gmbus_force_bit(device_t idev, bool force_bit); extern void intel_iic_reset(struct drm_device *dev); +static inline bool intel_gmbus_is_port_valid(unsigned port) +{ + return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD); +} +extern device_t intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, + unsigned port); /* intel_opregion.c */ int intel_opregion_setup(struct drm_device *dev); extern void intel_opregion_init(struct drm_device *dev); extern void intel_opregion_fini(struct drm_device *dev); extern void intel_opregion_asle_intr(struct drm_device *dev); extern void intel_opregion_gse_intr(struct drm_device *dev); extern void intel_opregion_enable_asle(struct drm_device *dev); /* i915_gem_gtt.c */ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev); void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_object *obj, enum i915_cache_level cache_level); void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_object *obj); void i915_gem_restore_gtt_mappings(struct drm_device *dev); -int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); +int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); +void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, + enum i915_cache_level cache_level); void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); -void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, - enum i915_cache_level cache_level); +void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); +int i915_gem_init_global_gtt(struct drm_device *dev, unsigned long start, + unsigned long mappable_end, unsigned long end); /* modesetting */ +extern void intel_modeset_init_hw(struct drm_device *dev); extern void intel_modeset_init(struct drm_device *dev); extern void intel_modeset_gem_init(struct drm_device *dev); extern void intel_modeset_cleanup(struct drm_device *dev); extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); extern bool intel_fbc_enabled(struct drm_device *dev); extern void intel_disable_fbc(struct drm_device *dev); extern bool ironlake_set_drps(struct drm_device *dev, u8 val); extern void ironlake_init_pch_refclk(struct drm_device *dev); extern void ironlake_enable_rc6(struct drm_device *dev); extern void gen6_set_rps(struct drm_device *dev, u8 val); extern void intel_detect_pch(struct drm_device *dev); extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); +/* IPS */ +extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); +extern void intel_gpu_ips_teardown(void); +extern bool i915_semaphore_is_enabled(struct drm_device *dev); extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv); extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv); +extern void vlv_force_wake_get(struct drm_i915_private *dev_priv); +extern void vlv_force_wake_put(struct drm_i915_private *dev_priv); + extern struct intel_overlay_error_state *intel_overlay_capture_error_state( struct drm_device *dev); extern void intel_overlay_print_error_state(struct sbuf *m, struct intel_overlay_error_state *error); extern struct intel_display_error_state *intel_display_capture_error_state( struct drm_device *dev); extern void intel_display_print_error_state(struct sbuf *m, struct drm_device *dev, struct intel_display_error_state *error); static inline void trace_i915_reg_rw(boolean_t rw, int reg, uint64_t val, int sz) { CTR4(KTR_DRM_REG, "[%x/%d] %c %x", reg, sz, rw ? "w" : "r", val); } /* On SNB platform, before reading ring registers forcewake bit * must be set to prevent GT core from power down and stale values being * returned. */ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); -/* We give fast paths for the really cool registers */ -#define NEEDS_FORCE_WAKE(dev_priv, reg) \ - (((dev_priv)->info->gen >= 6) && \ - ((reg) < 0x40000) && \ - ((reg) != FORCEWAKE)) - #define __i915_read(x, y) \ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); __i915_read(8, 8) __i915_read(16, 16) __i915_read(32, 32) __i915_read(64, 64) #undef __i915_read #define __i915_write(x, y) \ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val); __i915_write(8, 8) __i915_write(16, 16) __i915_write(32, 32) __i915_write(64, 64) #undef __i915_write #define I915_READ8(reg) i915_read8(dev_priv, (reg)) #define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val)) #define I915_READ16(reg) i915_read16(dev_priv, (reg)) #define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val)) #define I915_READ16_NOTRACE(reg) DRM_READ16(dev_priv->mmio_map, (reg)) #define I915_WRITE16_NOTRACE(reg, val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val)) #define I915_READ(reg) i915_read32(dev_priv, (reg)) #define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val)) #define I915_READ_NOTRACE(reg) DRM_READ32(dev_priv->mmio_map, (reg)) #define I915_WRITE_NOTRACE(reg, val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val)) #define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val)) #define I915_READ64(reg) i915_read64(dev_priv, (reg)) #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) #define I915_VERBOSE 0 -#define LP_RING(d) (&((struct drm_i915_private *)(d))->rings[RCS]) - -#define BEGIN_LP_RING(n) \ - intel_ring_begin(LP_RING(dev_priv), (n)) - -#define OUT_RING(x) \ - intel_ring_emit(LP_RING(dev_priv), x) - -#define ADVANCE_LP_RING() \ - intel_ring_advance(LP_RING(dev_priv)) - -#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ - if (LP_RING(dev->dev_private)->obj == NULL) \ - LOCK_TEST_WITH_RETURN(dev, file); \ -} while (0) - /** * Reads a dword out of the status page, which is written to from the command * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or * MI_STORE_DATA_IMM. * * The following dwords have a reserved meaning: * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. * 0x04: ring 0 head pointer * 0x05: ring 1 head pointer (915-class) * 0x06: ring 2 head pointer (915-class) * 0x10-0x1b: Context status DWords (GM45) * 0x1f: Last written status offset. (GM45) * * The area from dword 0x20 to 0x3ff is available for driver usage. */ -#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) -#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) #define I915_GEM_HWS_INDEX 0x20 -#define I915_BREADCRUMB_INDEX 0x21 #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) #define IS_I830(dev) ((dev)->pci_device == 0x3577) #define IS_845G(dev) ((dev)->pci_device == 0x2562) #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) #define IS_I865G(dev) ((dev)->pci_device == 0x2572) #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) #define IS_I945G(dev) ((dev)->pci_device == 0x2772) #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) +#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) +#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) /* XXXKIB LEGACY */ #define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \ (dev)->pci_device == 0x2982 || \ (dev)->pci_device == 0x2992 || \ (dev)->pci_device == 0x29A2 || \ (dev)->pci_device == 0x2A02 || \ (dev)->pci_device == 0x2A12 || \ (dev)->pci_device == 0x2A42 || \ (dev)->pci_device == 0x2E02 || \ (dev)->pci_device == 0x2E12 || \ (dev)->pci_device == 0x2E22 || \ (dev)->pci_device == 0x2E32) #define IS_I965GM(dev) ((dev)->pci_device == 0x2A02) #define IS_IGDG(dev) ((dev)->pci_device == 0xa001) #define IS_IGDGM(dev) ((dev)->pci_device == 0xa011) #define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev)) #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) /* XXXKIB LEGACY END */ #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6) #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte * rows, which changed the alignment requirements and fence programming. */ #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ IS_I915GM(dev))) #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) /* dsparb controlled by hw only */ #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) -#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) +#define HAS_PCH_SPLIT(dev) (INTEL_INFO(dev)->has_pch_split) #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) +#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) #define PRIMARY_RINGBUFFER_SIZE (128*1024) static inline bool i915_seqno_passed(uint32_t seq1, uint32_t seq2) { return ((int32_t)(seq1 - seq2) >= 0); +} + +static inline void i915_gem_chipset_flush(struct drm_device *dev) +{ + if (INTEL_INFO(dev)->gen < 6) + intel_gtt_chipset_flush(); +} + +static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) +{ + /* KASSERT(obj->pages != NULL, ("pin and NULL pages")); */ + obj->pages_pin_count++; +} +static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) +{ + KASSERT(obj->pages_pin_count != 0, ("zero pages_pin_count")); + obj->pages_pin_count--; } u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring); #endif Index: head/sys/dev/drm2/i915/i915_gem.c =================================================================== --- head/sys/dev/drm2/i915/i915_gem.c (revision 277486) +++ head/sys/dev/drm2/i915/i915_gem.c (revision 277487) @@ -1,3793 +1,4272 @@ /*- * Copyright © 2008 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Authors: * Eric Anholt * * Copyright (c) 2011 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include +#include + static void i915_gem_object_flush_cpu_write_domain( struct drm_i915_gem_object *obj); static uint32_t i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode); static uint32_t i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, int tiling_mode); static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, unsigned alignment, bool map_and_fenceable); static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, int flags); static void i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj); -static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, - bool write); -static void i915_gem_object_set_to_full_cpu_read_domain( - struct drm_i915_gem_object *obj); -static int i915_gem_object_set_cpu_read_domain_range( - struct drm_i915_gem_object *obj, uint64_t offset, uint64_t size); +static void i915_gem_object_put_pages_range(struct drm_i915_gem_object *obj, + off_t start, off_t end); +static int i915_gem_object_get_pages_range(struct drm_i915_gem_object *obj, + off_t start, off_t end); static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj); static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); static int i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj); static bool i915_gem_object_is_inactive(struct drm_i915_gem_object *obj); static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj); -static vm_page_t i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex); +static vm_page_t i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex, + bool *fresh); static void i915_gem_process_flushing_list(struct intel_ring_buffer *ring, uint32_t flush_domains); -static void i915_gem_clear_fence_reg(struct drm_device *dev, - struct drm_i915_fence_reg *reg); static void i915_gem_reset_fences(struct drm_device *dev); static void i915_gem_retire_task_handler(void *arg, int pending); -static int i915_gem_phys_pwrite(struct drm_device *dev, - struct drm_i915_gem_object *obj, uint64_t data_ptr, uint64_t offset, - uint64_t size, struct drm_file *file_priv); static void i915_gem_lowmem(void *arg); +static void i915_gem_write_fence(struct drm_device *dev, int reg, + struct drm_i915_gem_object *obj); +static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, + bool interruptible); +static int i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno); MALLOC_DEFINE(DRM_I915_GEM, "i915gem", "Allocations from i915 gem"); long i915_gem_wired_pages_cnt; +static bool cpu_cache_is_coherent(struct drm_device *dev, + enum i915_cache_level level) +{ + return HAS_LLC(dev) || level != I915_CACHE_NONE; +} + +static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) +{ + if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) + return true; + + return obj->pin_display; +} + +static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) +{ + if (obj->tiling_mode) + i915_gem_release_mmap(obj); + + /* As we do not have an associated fence register, we will force + * a tiling change if we ever need to acquire one. + */ + obj->fence_dirty = false; + obj->fence_reg = I915_FENCE_REG_NONE; +} + static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, size_t size) { dev_priv->mm.object_count++; dev_priv->mm.object_memory += size; } static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, size_t size) { dev_priv->mm.object_count--; dev_priv->mm.object_memory -= size; } static int i915_gem_wait_for_error(struct drm_device *dev) { struct drm_i915_private *dev_priv; int ret; dev_priv = dev->dev_private; if (!atomic_load_acq_int(&dev_priv->mm.wedged)) return (0); mtx_lock(&dev_priv->error_completion_lock); while (dev_priv->error_completion == 0) { ret = -msleep(&dev_priv->error_completion, &dev_priv->error_completion_lock, PCATCH, "915wco", 0); if (ret != 0) { mtx_unlock(&dev_priv->error_completion_lock); return (ret); } } mtx_unlock(&dev_priv->error_completion_lock); if (atomic_load_acq_int(&dev_priv->mm.wedged)) { mtx_lock(&dev_priv->error_completion_lock); dev_priv->error_completion++; mtx_unlock(&dev_priv->error_completion_lock); } return (0); } int i915_mutex_lock_interruptible(struct drm_device *dev) { struct drm_i915_private *dev_priv; int ret; dev_priv = dev->dev_private; ret = i915_gem_wait_for_error(dev); if (ret != 0) return (ret); /* * interruptible shall it be. might indeed be if dev_lock is * changed to sx */ ret = sx_xlock_sig(&dev->dev_struct_lock); if (ret != 0) return (-ret); return (0); } -static void -i915_gem_free_object_tail(struct drm_i915_gem_object *obj) +void +i915_gem_free_object(struct drm_gem_object *gem_obj) { + struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); struct drm_device *dev; drm_i915_private_t *dev_priv; - int ret; dev = obj->base.dev; dev_priv = dev->dev_private; - ret = i915_gem_object_unbind(obj); - if (ret == -ERESTART) { - list_move(&obj->mm_list, &dev_priv->mm.deferred_free_list); - return; + CTR1(KTR_DRM, "object_destroy_tail %p", obj); + + if (obj->phys_obj) + i915_gem_detach_phys_object(dev, obj); + + obj->pin_count = 0; + if (i915_gem_object_unbind(obj) == -ERESTARTSYS) { + bool was_interruptible; + + was_interruptible = dev_priv->mm.interruptible; + dev_priv->mm.interruptible = false; + + if (i915_gem_object_unbind(obj)) + printf("i915_gem_free_object: unbind\n"); + + dev_priv->mm.interruptible = was_interruptible; } - CTR1(KTR_DRM, "object_destroy_tail %p", obj); drm_gem_free_mmap_offset(&obj->base); drm_gem_object_release(&obj->base); i915_gem_info_remove_obj(dev_priv, obj->base.size); - free(obj->page_cpu_valid, DRM_I915_GEM); free(obj->bit_17, DRM_I915_GEM); free(obj, DRM_I915_GEM); } -void -i915_gem_free_object(struct drm_gem_object *gem_obj) -{ - struct drm_i915_gem_object *obj; - struct drm_device *dev; - - obj = to_intel_bo(gem_obj); - dev = obj->base.dev; - - while (obj->pin_count > 0) - i915_gem_object_unpin(obj); - - if (obj->phys_obj != NULL) - i915_gem_detach_phys_object(dev, obj); - - i915_gem_free_object_tail(obj); -} - static void init_ring_lists(struct intel_ring_buffer *ring) { INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->request_list); INIT_LIST_HEAD(&ring->gpu_write_list); } void i915_gem_load(struct drm_device *dev) { drm_i915_private_t *dev_priv; int i; dev_priv = dev->dev_private; INIT_LIST_HEAD(&dev_priv->mm.active_list); INIT_LIST_HEAD(&dev_priv->mm.flushing_list); INIT_LIST_HEAD(&dev_priv->mm.inactive_list); - INIT_LIST_HEAD(&dev_priv->mm.pinned_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list); - INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); INIT_LIST_HEAD(&dev_priv->mm.gtt_list); for (i = 0; i < I915_NUM_RINGS; i++) init_ring_lists(&dev_priv->rings[i]); for (i = 0; i < I915_MAX_NUM_FENCES; i++) INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); TIMEOUT_TASK_INIT(dev_priv->tq, &dev_priv->mm.retire_task, 0, i915_gem_retire_task_handler, dev_priv); dev_priv->error_completion = 0; /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ if (IS_GEN3(dev)) { - u32 tmp = I915_READ(MI_ARB_STATE); - if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) { - /* - * arb state is a masked write, so set bit + - * bit in mask. - */ - tmp = MI_ARB_C3_LP_WRITE_ENABLE | - (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT); - I915_WRITE(MI_ARB_STATE, tmp); - } + I915_WRITE(MI_ARB_STATE, + _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); } dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; /* Old X drivers will take 0-2 for front, back, depth buffers */ if (!drm_core_check_feature(dev, DRIVER_MODESET)) dev_priv->fence_reg_start = 3; if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) dev_priv->num_fence_regs = 16; else dev_priv->num_fence_regs = 8; /* Initialize fence registers to zero */ - for (i = 0; i < dev_priv->num_fence_regs; i++) { - i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]); - } + i915_gem_reset_fences(dev); + i915_gem_detect_bit_6_swizzle(dev); dev_priv->mm.interruptible = true; dev_priv->mm.i915_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, i915_gem_lowmem, dev, EVENTHANDLER_PRI_ANY); } int -i915_gem_do_init(struct drm_device *dev, unsigned long start, - unsigned long mappable_end, unsigned long end) -{ - drm_i915_private_t *dev_priv; - unsigned long mappable; - int error; - - dev_priv = dev->dev_private; - mappable = min(end, mappable_end) - start; - - drm_mm_init(&dev_priv->mm.gtt_space, start, end - start); - - dev_priv->mm.gtt_start = start; - dev_priv->mm.gtt_mappable_end = mappable_end; - dev_priv->mm.gtt_end = end; - dev_priv->mm.gtt_total = end - start; - dev_priv->mm.mappable_gtt_total = mappable; - - /* Take over this portion of the GTT */ - intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE); - device_printf(dev->device, - "taking over the fictitious range 0x%lx-0x%lx\n", - dev->agp->base + start, dev->agp->base + start + mappable); - error = -vm_phys_fictitious_reg_range(dev->agp->base + start, - dev->agp->base + start + mappable, VM_MEMATTR_WRITE_COMBINING); - return (error); -} - -int i915_gem_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_init *args; drm_i915_private_t *dev_priv; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + dev_priv = dev->dev_private; args = data; if (args->gtt_start >= args->gtt_end || (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1)) return (-EINVAL); if (mtx_initialized(&dev_priv->mm.gtt_space.unused_lock)) return (-EBUSY); + + /* GEM with user mode setting was never supported on ilk and later. */ + if (INTEL_INFO(dev)->gen >= 5) + return -ENODEV; + /* * XXXKIB. The second-time initialization should be guarded * against. */ - return (i915_gem_do_init(dev, args->gtt_start, args->gtt_end, + return (i915_gem_init_global_gtt(dev, args->gtt_start, args->gtt_end, args->gtt_end)); } int i915_gem_idle(struct drm_device *dev) { drm_i915_private_t *dev_priv; int ret; dev_priv = dev->dev_private; if (dev_priv->mm.suspended) return (0); - ret = i915_gpu_idle(dev, true); + ret = i915_gpu_idle(dev); if (ret != 0) return (ret); + i915_gem_retire_requests(dev); /* Under UMS, be paranoid and evict. */ if (!drm_core_check_feature(dev, DRIVER_MODESET)) { - ret = i915_gem_evict_inactive(dev, false); + ret = i915_gem_evict_everything(dev, false); if (ret != 0) return ret; } i915_gem_reset_fences(dev); /* Hack! Don't let anybody do execbuf while we don't control the chip. * We need to replace this with a semaphore, or something. * And not confound mm.suspended! */ dev_priv->mm.suspended = 1; callout_stop(&dev_priv->hangcheck_timer); i915_kernel_lost_context(dev); i915_gem_cleanup_ringbuffer(dev); /* Cancel the retire work handler, which should be idle now. */ taskqueue_cancel_timeout(dev_priv->tq, &dev_priv->mm.retire_task, NULL); return (ret); } void i915_gem_init_swizzling(struct drm_device *dev) { drm_i915_private_t *dev_priv; dev_priv = dev->dev_private; if (INTEL_INFO(dev)->gen < 5 || dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) return; I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | DISP_TILE_SURFACE_SWIZZLING); if (IS_GEN5(dev)) return; + I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); if (IS_GEN6(dev)) - I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_SNB)); + I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); else - I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_IVB)); + I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); } -void -i915_gem_init_ppgtt(struct drm_device *dev) +void i915_gem_init_ppgtt(struct drm_device *dev) { drm_i915_private_t *dev_priv; struct i915_hw_ppgtt *ppgtt; uint32_t pd_offset, pd_entry; vm_paddr_t pt_addr; struct intel_ring_buffer *ring; u_int first_pd_entry_in_global_pt, i; dev_priv = dev->dev_private; ppgtt = dev_priv->mm.aliasing_ppgtt; if (ppgtt == NULL) return; first_pd_entry_in_global_pt = 512 * 1024 - I915_PPGTT_PD_ENTRIES; for (i = 0; i < ppgtt->num_pd_entries; i++) { pt_addr = VM_PAGE_TO_PHYS(ppgtt->pt_pages[i]); pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); pd_entry |= GEN6_PDE_VALID; intel_gtt_write(first_pd_entry_in_global_pt + i, pd_entry); } intel_gtt_read_pte(first_pd_entry_in_global_pt); pd_offset = ppgtt->pd_offset; pd_offset /= 64; /* in cachelines, */ pd_offset <<= 16; if (INTEL_INFO(dev)->gen == 6) { - uint32_t ecochk = I915_READ(GAM_ECOCHK); + uint32_t ecochk, gab_ctl, ecobits; + + ecobits = I915_READ(GAC_ECO_BITS); + I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); + + gab_ctl = I915_READ(GAB_CTL); + I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); + + ecochk = I915_READ(GAM_ECOCHK); I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); - I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE)); + I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); } else if (INTEL_INFO(dev)->gen >= 7) { I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B); /* GFX_MODE is per-ring on gen7+ */ } - for (i = 0; i < I915_NUM_RINGS; i++) { - ring = &dev_priv->rings[i]; - + for_each_ring(ring, dev_priv, i) { if (INTEL_INFO(dev)->gen >= 7) I915_WRITE(RING_MODE_GEN7(ring), - GFX_MODE_ENABLE(GFX_PPGTT_ENABLE)); + _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset); } } int i915_gem_init_hw(struct drm_device *dev) { drm_i915_private_t *dev_priv; int ret; dev_priv = dev->dev_private; i915_gem_init_swizzling(dev); ret = intel_init_render_ring_buffer(dev); if (ret != 0) return (ret); if (HAS_BSD(dev)) { ret = intel_init_bsd_ring_buffer(dev); if (ret != 0) goto cleanup_render_ring; } if (HAS_BLT(dev)) { ret = intel_init_blt_ring_buffer(dev); if (ret != 0) goto cleanup_bsd_ring; } dev_priv->next_seqno = 1; i915_gem_context_init(dev); i915_gem_init_ppgtt(dev); return (0); cleanup_bsd_ring: intel_cleanup_ring_buffer(&dev_priv->rings[VCS]); cleanup_render_ring: intel_cleanup_ring_buffer(&dev_priv->rings[RCS]); return (ret); } +static bool +intel_enable_ppgtt(struct drm_device *dev) +{ + if (i915_enable_ppgtt >= 0) + return i915_enable_ppgtt; + + /* Disable ppgtt on SNB if VT-d is on. */ + if (INTEL_INFO(dev)->gen == 6 && intel_iommu_enabled) + return false; + + return true; +} + +int i915_gem_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long gtt_size, mappable_size; + int ret; + + gtt_size = dev_priv->mm.gtt.gtt_total_entries << PAGE_SHIFT; + mappable_size = dev_priv->mm.gtt.gtt_mappable_entries << PAGE_SHIFT; + + DRM_LOCK(dev); + if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { + /* PPGTT pdes are stolen from global gtt ptes, so shrink the + * aperture accordingly when using aliasing ppgtt. */ + gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; + + i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size); + + ret = i915_gem_init_aliasing_ppgtt(dev); + if (ret) { + DRM_UNLOCK(dev); + return ret; + } + } else { + /* Let GEM Manage all of the aperture. + * + * However, leave one page at the end still bound to the scratch + * page. There are a number of places where the hardware + * apparently prefetches past the end of the object, and we've + * seen multiple hangs with the GPU head pointer stuck in a + * batchbuffer bound at the last page of the aperture. One page + * should be enough to keep any prefetching inside of the + * aperture. + */ + i915_gem_init_global_gtt(dev, 0, mappable_size, + gtt_size); + } + + ret = i915_gem_init_hw(dev); + DRM_UNLOCK(dev); + if (ret != 0) { + i915_gem_cleanup_aliasing_ppgtt(dev); + return (ret); + } + + /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */ + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + dev_priv->dri1.allow_batchbuffer = 1; + return 0; +} + int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_private *dev_priv; struct drm_i915_gem_get_aperture *args; struct drm_i915_gem_object *obj; size_t pinned; dev_priv = dev->dev_private; args = data; - if (!(dev->driver->driver_features & DRIVER_GEM)) - return (-ENODEV); - pinned = 0; DRM_LOCK(dev); - list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) - pinned += obj->gtt_space->size; + list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) + if (obj->pin_count) + pinned += obj->gtt_space->size; DRM_UNLOCK(dev); args->aper_size = dev_priv->mm.gtt_total; args->aper_available_size = args->aper_size - pinned; return (0); } int i915_gem_object_pin(struct drm_i915_gem_object *obj, uint32_t alignment, bool map_and_fenceable) { - struct drm_device *dev; - struct drm_i915_private *dev_priv; int ret; - dev = obj->base.dev; - dev_priv = dev->dev_private; + if (obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT) + return (-EBUSY); - KASSERT(obj->pin_count != DRM_I915_GEM_OBJECT_MAX_PIN_COUNT, - ("Max pin count")); - if (obj->gtt_space != NULL) { if ((alignment && obj->gtt_offset & (alignment - 1)) || (map_and_fenceable && !obj->map_and_fenceable)) { DRM_DEBUG("bo is already pinned with incorrect alignment:" " offset=%x, req.alignment=%x, req.map_and_fenceable=%d," " obj->map_and_fenceable=%d\n", obj->gtt_offset, alignment, map_and_fenceable, obj->map_and_fenceable); ret = i915_gem_object_unbind(obj); if (ret != 0) return (ret); } } if (obj->gtt_space == NULL) { ret = i915_gem_object_bind_to_gtt(obj, alignment, map_and_fenceable); if (ret) return (ret); } - if (obj->pin_count++ == 0 && !obj->active) - list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list); + if (!obj->has_global_gtt_mapping && map_and_fenceable) + i915_gem_gtt_bind_object(obj, obj->cache_level); + + obj->pin_count++; obj->pin_mappable |= map_and_fenceable; -#if 1 - KIB_NOTYET(); -#else - WARN_ON(i915_verify_lists(dev)); -#endif - return (0); + return 0; } void i915_gem_object_unpin(struct drm_i915_gem_object *obj) { - struct drm_device *dev; - drm_i915_private_t *dev_priv; - - dev = obj->base.dev; - dev_priv = dev->dev_private; - -#if 1 - KIB_NOTYET(); -#else - WARN_ON(i915_verify_lists(dev)); -#endif KASSERT(obj->pin_count != 0, ("zero pin count")); KASSERT(obj->gtt_space != NULL, ("No gtt mapping")); - if (--obj->pin_count == 0) { - if (!obj->active) - list_move_tail(&obj->mm_list, - &dev_priv->mm.inactive_list); + if (--obj->pin_count == 0) obj->pin_mappable = false; - } -#if 1 - KIB_NOTYET(); -#else - WARN_ON(i915_verify_lists(dev)); -#endif } int i915_gem_pin_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_pin *args; struct drm_i915_gem_object *obj; struct drm_gem_object *gobj; int ret; args = data; ret = i915_mutex_lock_interruptible(dev); if (ret != 0) return ret; gobj = drm_gem_object_lookup(dev, file, args->handle); if (gobj == NULL) { ret = -ENOENT; goto unlock; } obj = to_intel_bo(gobj); if (obj->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to pin a purgeable buffer\n"); ret = -EINVAL; goto out; } if (obj->pin_filp != NULL && obj->pin_filp != file) { DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", args->handle); ret = -EINVAL; goto out; } obj->user_pin_count++; obj->pin_filp = file; if (obj->user_pin_count == 1) { ret = i915_gem_object_pin(obj, args->alignment, true); if (ret != 0) goto out; } /* XXX - flush the CPU caches for pinned objects * as the X server doesn't manage domains yet */ i915_gem_object_flush_cpu_write_domain(obj); args->offset = obj->gtt_offset; out: drm_gem_object_unreference(&obj->base); unlock: DRM_UNLOCK(dev); return (ret); } int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_pin *args; struct drm_i915_gem_object *obj; int ret; args = data; ret = i915_mutex_lock_interruptible(dev); if (ret != 0) return (ret); obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (&obj->base == NULL) { ret = -ENOENT; goto unlock; } if (obj->pin_filp != file) { DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", args->handle); ret = -EINVAL; goto out; } obj->user_pin_count--; if (obj->user_pin_count == 0) { obj->pin_filp = NULL; i915_gem_object_unpin(obj); } out: drm_gem_object_unreference(&obj->base); unlock: DRM_UNLOCK(dev); return (ret); } int i915_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_busy *args; struct drm_i915_gem_object *obj; - struct drm_i915_gem_request *request; int ret; args = data; ret = i915_mutex_lock_interruptible(dev); if (ret != 0) return ret; obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (&obj->base == NULL) { ret = -ENOENT; goto unlock; } args->busy = obj->active; if (args->busy) { if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain); - } else if (obj->ring->outstanding_lazy_request == - obj->last_rendering_seqno) { - request = malloc(sizeof(*request), DRM_I915_GEM, - M_WAITOK | M_ZERO); - ret = i915_add_request(obj->ring, NULL, request); - if (ret != 0) - free(request, DRM_I915_GEM); + } else { + ret = i915_gem_check_olr(obj->ring, + obj->last_rendering_seqno); } i915_gem_retire_requests_ring(obj->ring); args->busy = obj->active; } drm_gem_object_unreference(&obj->base); unlock: DRM_UNLOCK(dev); return (ret); } static int i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) { struct drm_i915_private *dev_priv; struct drm_i915_file_private *file_priv; unsigned long recent_enough; struct drm_i915_gem_request *request; struct intel_ring_buffer *ring; u32 seqno; int ret; dev_priv = dev->dev_private; if (atomic_load_acq_int(&dev_priv->mm.wedged)) return (-EIO); file_priv = file->driver_priv; recent_enough = ticks - (20 * hz / 1000); ring = NULL; seqno = 0; mtx_lock(&file_priv->mm.lck); list_for_each_entry(request, &file_priv->mm.request_list, client_list) { if (time_after_eq(request->emitted_jiffies, recent_enough)) break; ring = request->ring; seqno = request->seqno; } mtx_unlock(&file_priv->mm.lck); if (seqno == 0) return (0); - ret = 0; - mtx_lock(&ring->irq_lock); - if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { - if (ring->irq_get(ring)) { - while (ret == 0 && - !(i915_seqno_passed(ring->get_seqno(ring), seqno) || - atomic_load_acq_int(&dev_priv->mm.wedged))) - ret = -msleep(ring, &ring->irq_lock, PCATCH, - "915thr", 0); - ring->irq_put(ring); - if (ret == 0 && atomic_load_acq_int(&dev_priv->mm.wedged)) - ret = -EIO; - } else if (_intel_wait_for(dev, - i915_seqno_passed(ring->get_seqno(ring), seqno) || - atomic_load_acq_int(&dev_priv->mm.wedged), 3000, 0, "915rtr")) { - ret = -EBUSY; - } - } - mtx_unlock(&ring->irq_lock); - + ret = __wait_seqno(ring, seqno, true); if (ret == 0) taskqueue_enqueue_timeout(dev_priv->tq, &dev_priv->mm.retire_task, 0); return (ret); } int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { return (i915_gem_ring_throttle(dev, file_priv)); } int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_i915_gem_madvise *args; struct drm_i915_gem_object *obj; int ret; args = data; switch (args->madv) { case I915_MADV_DONTNEED: case I915_MADV_WILLNEED: break; default: return (-EINVAL); } ret = i915_mutex_lock_interruptible(dev); if (ret != 0) return (ret); obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle)); if (&obj->base == NULL) { ret = -ENOENT; goto unlock; } if (obj->pin_count != 0) { ret = -EINVAL; goto out; } if (obj->madv != I915_MADV_PURGED_INTERNAL) obj->madv = args->madv; if (i915_gem_object_is_purgeable(obj) && obj->gtt_space == NULL) i915_gem_object_truncate(obj); args->retained = obj->madv != I915_MADV_PURGED_INTERNAL; out: drm_gem_object_unreference(&obj->base); unlock: DRM_UNLOCK(dev); return (ret); } void i915_gem_cleanup_ringbuffer(struct drm_device *dev) { drm_i915_private_t *dev_priv; + struct intel_ring_buffer *ring; int i; dev_priv = dev->dev_private; - for (i = 0; i < I915_NUM_RINGS; i++) - intel_cleanup_ring_buffer(&dev_priv->rings[i]); + for_each_ring(ring, dev_priv, i) + intel_cleanup_ring_buffer(ring); } int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv; - int ret, i; + int ret; if (drm_core_check_feature(dev, DRIVER_MODESET)) return (0); dev_priv = dev->dev_private; if (atomic_load_acq_int(&dev_priv->mm.wedged) != 0) { DRM_ERROR("Reenabling wedged hardware, good luck\n"); atomic_store_rel_int(&dev_priv->mm.wedged, 0); } dev_priv->mm.suspended = 0; ret = i915_gem_init_hw(dev); if (ret != 0) { return (ret); } KASSERT(list_empty(&dev_priv->mm.active_list), ("active list")); KASSERT(list_empty(&dev_priv->mm.flushing_list), ("flushing list")); KASSERT(list_empty(&dev_priv->mm.inactive_list), ("inactive list")); - for (i = 0; i < I915_NUM_RINGS; i++) { - KASSERT(list_empty(&dev_priv->rings[i].active_list), - ("ring %d active list", i)); - KASSERT(list_empty(&dev_priv->rings[i].request_list), - ("ring %d request list", i)); - } - DRM_UNLOCK(dev); ret = drm_irq_install(dev); DRM_LOCK(dev); if (ret) goto cleanup_ringbuffer; return (0); cleanup_ringbuffer: i915_gem_cleanup_ringbuffer(dev); dev_priv->mm.suspended = 1; return (ret); } int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { if (drm_core_check_feature(dev, DRIVER_MODESET)) return 0; drm_irq_uninstall(dev); return (i915_gem_idle(dev)); } int i915_gem_create(struct drm_file *file, struct drm_device *dev, uint64_t size, uint32_t *handle_p) { struct drm_i915_gem_object *obj; uint32_t handle; int ret; size = roundup(size, PAGE_SIZE); if (size == 0) return (-EINVAL); obj = i915_gem_alloc_object(dev, size); if (obj == NULL) return (-ENOMEM); handle = 0; ret = drm_gem_handle_create(file, &obj->base, &handle); if (ret != 0) { drm_gem_object_release(&obj->base); i915_gem_info_remove_obj(dev->dev_private, obj->base.size); free(obj, DRM_I915_GEM); return (-ret); } /* drop reference from allocate - handle holds it now */ drm_gem_object_unreference(&obj->base); CTR2(KTR_DRM, "object_create %p %x", obj, size); *handle_p = handle; return (0); } int i915_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { /* have to work out size/pitch and return them */ args->pitch = roundup2(args->width * ((args->bpp + 7) / 8), 64); args->size = args->pitch * args->height; return (i915_gem_create(file, dev, args->size, &args->handle)); } int i915_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, uint32_t handle) { return (drm_gem_handle_delete(file, handle)); } int i915_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_create *args = data; return (i915_gem_create(file, dev, args->size, &args->handle)); } +#define __user +#define __force +#define __iomem +#define to_user_ptr(x) ((void *)(uintptr_t)(x)) +#define offset_in_page(x) ((x) & PAGE_MASK) +#define page_to_phys(x) VM_PAGE_TO_PHYS(x) +static inline long +__copy_to_user(void __user *to, const void *from, unsigned long n) +{ + return (copyout(from, to, n) != 0 ? n : 0); +} +static inline int +__copy_to_user_inatomic(void __user *to, const void *from, unsigned n) +{ + return (copyout_nofault(from, to, n) != 0 ? n : 0); +} +static inline unsigned long +__copy_from_user(void *to, const void __user *from, unsigned long n) +{ + return ((copyin(__DECONST(void *, from), to, n) != 0 ? n : 0)); +} +#define copy_from_user(to, from, n) __copy_from_user((to), (from), (n)) +static inline unsigned long +__copy_from_user_inatomic_nocache(void *to, const void __user *from, + unsigned long n) +{ + + /* + * XXXKIB. Equivalent Linux function is implemented using + * MOVNTI for aligned moves. For unaligned head and tail, + * normal move is performed. As such, it is not incorrect, if + * only somewhat slower, to use normal copyin. All uses + * except shmem_pwrite_fast() have the destination mapped WC. + */ + return ((copyin_nofault(__DECONST(void *, from), to, n) != 0 ? n : 0)); +} +static inline int +fault_in_multipages_readable(const char __user *uaddr, int size) +{ + char c; + int ret = 0; + const char __user *end = uaddr + size - 1; + + if (unlikely(size == 0)) + return ret; + + while (uaddr <= end) { + ret = copyin(uaddr, &c, 1); + if (ret != 0) + return -EFAULT; + uaddr += PAGE_SIZE; + } + + /* Check whether the range spilled into the next page. */ + if (((unsigned long)uaddr & ~PAGE_MASK) == + ((unsigned long)end & ~PAGE_MASK)) { + ret = copyin(end, &c, 1); + } + + return -ret; +} + +static inline int +fault_in_multipages_writeable(char __user *uaddr, int size) +{ + int ret = 0; + char __user *end = uaddr + size - 1; + + if (unlikely(size == 0)) + return ret; + + /* + * Writing zeroes into userspace here is OK, because we know that if + * the zero gets there, we'll be overwriting it. + */ + while (uaddr <= end) { + ret = subyte(uaddr, 0); + if (ret != 0) + return -EFAULT; + uaddr += PAGE_SIZE; + } + + /* Check whether the range spilled into the next page. */ + if (((unsigned long)uaddr & ~PAGE_MASK) == + ((unsigned long)end & ~PAGE_MASK)) + ret = subyte(end, 0); + + return ret; +} + +static inline int +__copy_to_user_swizzled(char __user *cpu_vaddr, + const char *gpu_vaddr, int gpu_offset, + int length) +{ + int ret, cpu_offset = 0; + + while (length > 0) { + int cacheline_end = roundup2(gpu_offset + 1, 64); + int this_length = min(cacheline_end - gpu_offset, length); + int swizzled_gpu_offset = gpu_offset ^ 64; + + ret = __copy_to_user(cpu_vaddr + cpu_offset, + gpu_vaddr + swizzled_gpu_offset, + this_length); + if (ret) + return ret + length; + + cpu_offset += this_length; + gpu_offset += this_length; + length -= this_length; + } + + return 0; +} + +static inline int +__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset, + const char __user *cpu_vaddr, + int length) +{ + int ret, cpu_offset = 0; + + while (length > 0) { + int cacheline_end = roundup2(gpu_offset + 1, 64); + int this_length = min(cacheline_end - gpu_offset, length); + int swizzled_gpu_offset = gpu_offset ^ 64; + + ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset, + cpu_vaddr + cpu_offset, + this_length); + if (ret) + return ret + length; + + cpu_offset += this_length; + gpu_offset += this_length; + length -= this_length; + } + + return 0; +} + static int -i915_gem_swap_io(struct drm_device *dev, struct drm_i915_gem_object *obj, - uint64_t data_ptr, uint64_t size, uint64_t offset, enum uio_rw rw, - struct drm_file *file) +i915_gem_phys_pwrite(struct drm_device *dev, + struct drm_i915_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) { - vm_object_t vm_obj; - vm_page_t m; - struct sf_buf *sf; - vm_offset_t mkva; - vm_pindex_t obj_pi; - int cnt, do_bit17_swizzling, length, obj_po, ret, swizzled_po; + void *vaddr = (char *)obj->phys_obj->handle->vaddr + args->offset; + char __user *user_data = to_user_ptr(args->data_ptr); - if (obj->gtt_offset != 0 && rw == UIO_READ) - do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); - else - do_bit17_swizzling = 0; + if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { + unsigned long unwritten; - obj->dirty = 1; - vm_obj = obj->base.vm_obj; - ret = 0; + /* The physical object once assigned is fixed for the lifetime + * of the obj, so we can safely drop the lock and continue + * to access vaddr. + */ + DRM_UNLOCK(dev); + unwritten = copy_from_user(vaddr, user_data, args->size); + DRM_LOCK(dev); + if (unwritten) + return -EFAULT; + } - VM_OBJECT_WLOCK(vm_obj); - vm_object_pip_add(vm_obj, 1); - while (size > 0) { - obj_pi = OFF_TO_IDX(offset); - obj_po = offset & PAGE_MASK; + i915_gem_chipset_flush(dev); + return 0; +} - m = i915_gem_wire_page(vm_obj, obj_pi); - VM_OBJECT_WUNLOCK(vm_obj); +/* Per-page copy function for the shmem pread fastpath. + * Flushes invalid cachelines before reading the target if + * needs_clflush is set. */ +static int +shmem_pread_fast(vm_page_t page, int shmem_page_offset, int page_length, + char __user *user_data, + bool page_do_bit17_swizzling, bool needs_clflush) +{ + char *vaddr; + struct sf_buf *sf; + int ret; - sched_pin(); - sf = sf_buf_alloc(m, SFB_CPUPRIVATE); - mkva = sf_buf_kva(sf); - length = min(size, PAGE_SIZE - obj_po); - while (length > 0) { - if (do_bit17_swizzling && - (VM_PAGE_TO_PHYS(m) & (1 << 17)) != 0) { - cnt = roundup2(obj_po + 1, 64); - cnt = min(cnt - obj_po, length); - swizzled_po = obj_po ^ 64; - } else { - cnt = length; - swizzled_po = obj_po; - } - if (rw == UIO_READ) - ret = -copyout_nofault( - (char *)mkva + swizzled_po, - (void *)(uintptr_t)data_ptr, cnt); - else - ret = -copyin_nofault( - (void *)(uintptr_t)data_ptr, - (char *)mkva + swizzled_po, cnt); - if (ret != 0) - break; - data_ptr += cnt; - size -= cnt; - length -= cnt; - offset += cnt; - obj_po += cnt; - } - sf_buf_free(sf); + if (unlikely(page_do_bit17_swizzling)) + return -EINVAL; + + sched_pin(); + sf = sf_buf_alloc(page, SFB_NOWAIT | SFB_CPUPRIVATE); + if (sf == NULL) { sched_unpin(); - VM_OBJECT_WLOCK(vm_obj); - if (rw == UIO_WRITE) - vm_page_dirty(m); - vm_page_reference(m); - vm_page_lock(m); - vm_page_unwire(m, PQ_ACTIVE); - vm_page_unlock(m); - atomic_add_long(&i915_gem_wired_pages_cnt, -1); + return (-EFAULT); + } + vaddr = (char *)sf_buf_kva(sf); + if (needs_clflush) + drm_clflush_virt_range(vaddr + shmem_page_offset, + page_length); + ret = __copy_to_user_inatomic(user_data, + vaddr + shmem_page_offset, + page_length); + sf_buf_free(sf); + sched_unpin(); - if (ret != 0) - break; + return ret ? -EFAULT : 0; +} + +static void +shmem_clflush_swizzled_range(char *addr, unsigned long length, + bool swizzled) +{ + if (unlikely(swizzled)) { + unsigned long start = (unsigned long) addr; + unsigned long end = (unsigned long) addr + length; + + /* For swizzling simply ensure that we always flush both + * channels. Lame, but simple and it works. Swizzled + * pwrite/pread is far from a hotpath - current userspace + * doesn't use it at all. */ + start = rounddown2(start, 128); + end = roundup2(end, 128); + + drm_clflush_virt_range((void *)start, end - start); + } else { + drm_clflush_virt_range(addr, length); } - vm_object_pip_wakeup(vm_obj); - VM_OBJECT_WUNLOCK(vm_obj); - return (ret); } +/* Only difference to the fast-path function is that this can handle bit17 + * and uses non-atomic copy and kmap functions. */ static int -i915_gem_gtt_write(struct drm_device *dev, struct drm_i915_gem_object *obj, - uint64_t data_ptr, uint64_t size, uint64_t offset, struct drm_file *file) +shmem_pread_slow(vm_page_t page, int shmem_page_offset, int page_length, + char __user *user_data, + bool page_do_bit17_swizzling, bool needs_clflush) { - vm_offset_t mkva; - vm_pindex_t obj_pi; - int obj_po, ret; + char *vaddr; + struct sf_buf *sf; + int ret; - obj_pi = OFF_TO_IDX(offset); - obj_po = offset & PAGE_MASK; + sf = sf_buf_alloc(page, 0); + vaddr = (char *)sf_buf_kva(sf); + if (needs_clflush) + shmem_clflush_swizzled_range(vaddr + shmem_page_offset, + page_length, + page_do_bit17_swizzling); - mkva = (vm_offset_t)pmap_mapdev_attr(dev->agp->base + obj->gtt_offset + - IDX_TO_OFF(obj_pi), size, PAT_WRITE_COMBINING); - ret = -copyin_nofault((void *)(uintptr_t)data_ptr, (char *)mkva + - obj_po, size); - pmap_unmapdev(mkva, size); - return (ret); + if (page_do_bit17_swizzling) + ret = __copy_to_user_swizzled(user_data, + vaddr, shmem_page_offset, + page_length); + else + ret = __copy_to_user(user_data, + vaddr + shmem_page_offset, + page_length); + sf_buf_free(sf); + + return ret ? - EFAULT : 0; } static int -i915_gem_obj_io(struct drm_device *dev, uint32_t handle, uint64_t data_ptr, - uint64_t size, uint64_t offset, enum uio_rw rw, struct drm_file *file) +i915_gem_shmem_pread(struct drm_device *dev, + struct drm_i915_gem_object *obj, + struct drm_i915_gem_pread *args, + struct drm_file *file) { - struct drm_i915_gem_object *obj; - vm_page_t *ma; - vm_offset_t start, end; - int npages, ret; + char __user *user_data; + ssize_t remain, sremain; + off_t offset, soffset; + int shmem_page_offset, page_length, ret = 0; + int obj_do_bit17_swizzling, page_do_bit17_swizzling; + int prefaulted = 0; + int needs_clflush = 0; - if (size == 0) - return (0); - start = trunc_page(data_ptr); - end = round_page(data_ptr + size); - npages = howmany(end - start, PAGE_SIZE); - ma = malloc(npages * sizeof(vm_page_t), DRM_I915_GEM, M_WAITOK | - M_ZERO); - npages = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map, - (vm_offset_t)data_ptr, size, - (rw == UIO_READ ? VM_PROT_WRITE : 0 ) | VM_PROT_READ, ma, npages); - if (npages == -1) { - ret = -EFAULT; - goto free_ma; + user_data = to_user_ptr(args->data_ptr); + sremain = remain = args->size; + + obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); + + if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { + /* If we're not in the cpu read domain, set ourself into the gtt + * read domain and manually flush cachelines (if required). This + * optimizes for the case when the gpu will dirty the data + * anyway again before the next pread happens. */ + needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level); + ret = i915_gem_object_set_to_gtt_domain(obj, false); + if (ret) + return ret; } + soffset = offset = args->offset; + ret = i915_gem_object_get_pages_range(obj, soffset, soffset + sremain); + if (ret) + return ret; + + i915_gem_object_pin_pages(obj); + + VM_OBJECT_WLOCK(obj->base.vm_obj); + for (vm_page_t page = vm_page_find_least(obj->base.vm_obj, + OFF_TO_IDX(offset));; page = vm_page_next(page)) { + VM_OBJECT_WUNLOCK(obj->base.vm_obj); + + if (remain <= 0) + break; + + /* Operation in this page + * + * shmem_page_offset = offset within page in shmem file + * page_length = bytes to copy for this page + */ + shmem_page_offset = offset_in_page(offset); + page_length = remain; + if ((shmem_page_offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - shmem_page_offset; + + page_do_bit17_swizzling = obj_do_bit17_swizzling && + (page_to_phys(page) & (1 << 17)) != 0; + + ret = shmem_pread_fast(page, shmem_page_offset, page_length, + user_data, page_do_bit17_swizzling, + needs_clflush); + if (ret == 0) + goto next_page; + + DRM_UNLOCK(dev); + + if (likely(!i915_prefault_disable) && !prefaulted) { + ret = fault_in_multipages_writeable(user_data, remain); + /* Userspace is tricking us, but we've already clobbered + * its pages with the prefault and promised to write the + * data up to the first fault. Hence ignore any errors + * and just continue. */ + (void)ret; + prefaulted = 1; + } + + ret = shmem_pread_slow(page, shmem_page_offset, page_length, + user_data, page_do_bit17_swizzling, + needs_clflush); + + DRM_LOCK(dev); + +next_page: + vm_page_reference(page); + + if (ret) + goto out; + + remain -= page_length; + user_data += page_length; + offset += page_length; + VM_OBJECT_WLOCK(obj->base.vm_obj); + } + +out: + i915_gem_object_unpin_pages(obj); + i915_gem_object_put_pages_range(obj, soffset, soffset + sremain); + + return ret; +} + +/** + * Reads data from the object referenced by handle. + * + * On error, the contents of *data are undefined. + */ +int +i915_gem_pread_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_pread *args = data; + struct drm_i915_gem_object *obj; + int ret = 0; + + if (args->size == 0) + return 0; + + if (!useracc(to_user_ptr(args->data_ptr), args->size, VM_PROT_WRITE)) + return -EFAULT; + ret = i915_mutex_lock_interruptible(dev); - if (ret != 0) - goto unlocked; + if (ret) + return ret; - obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (&obj->base == NULL) { ret = -ENOENT; goto unlock; } - if (offset > obj->base.size || size > obj->base.size - offset) { + + /* Bounds check source. */ + if (args->offset > obj->base.size || + args->size > obj->base.size - args->offset) { ret = -EINVAL; goto out; } - if (rw == UIO_READ) { - CTR3(KTR_DRM, "object_pread %p %jx %jx", obj, offset, size); - ret = i915_gem_object_set_cpu_read_domain_range(obj, - offset, size); - if (ret != 0) - goto out; - ret = i915_gem_swap_io(dev, obj, data_ptr, size, offset, - UIO_READ, file); - } else { - if (obj->phys_obj) { - CTR3(KTR_DRM, "object_phys_write %p %jx %jx", obj, - offset, size); - ret = i915_gem_phys_pwrite(dev, obj, data_ptr, offset, - size, file); - } else if (obj->gtt_space && - obj->base.write_domain != I915_GEM_DOMAIN_CPU) { - CTR3(KTR_DRM, "object_gtt_write %p %jx %jx", obj, - offset, size); - ret = i915_gem_object_pin(obj, 0, true); - if (ret != 0) - goto out; - ret = i915_gem_object_set_to_gtt_domain(obj, true); - if (ret != 0) - goto out_unpin; - ret = i915_gem_object_put_fence(obj); - if (ret != 0) - goto out_unpin; - ret = i915_gem_gtt_write(dev, obj, data_ptr, size, - offset, file); -out_unpin: - i915_gem_object_unpin(obj); - } else { - CTR3(KTR_DRM, "object_pwrite %p %jx %jx", obj, - offset, size); - ret = i915_gem_object_set_to_cpu_domain(obj, true); - if (ret != 0) - goto out; - ret = i915_gem_swap_io(dev, obj, data_ptr, size, offset, - UIO_WRITE, file); - } +#if 1 + KIB_NOTYET(); +#else + /* prime objects have no backing filp to GEM pread/pwrite + * pages from. + */ + if (!obj->base.filp) { + ret = -EINVAL; + goto out; } +#endif + + CTR3(KTR_DRM, "pread %p %jx %jx", obj, args->offset, args->size); + + ret = i915_gem_shmem_pread(dev, obj, args, file); + out: drm_gem_object_unreference(&obj->base); unlock: DRM_UNLOCK(dev); -unlocked: - vm_page_unhold_pages(ma, npages); -free_ma: - free(ma, DRM_I915_GEM); - return (ret); + return ret; } -int -i915_gem_pread_ioctl(struct drm_device *dev, void *data, struct drm_file *file) +/* This is the fast write path which cannot handle + * page faults in the source data + */ + +static inline int +fast_user_write(struct drm_device *dev, + off_t page_base, int page_offset, + char __user *user_data, + int length) { - struct drm_i915_gem_pread *args; + void __iomem *vaddr_atomic; + void *vaddr; + unsigned long unwritten; - args = data; - return (i915_gem_obj_io(dev, args->handle, args->data_ptr, args->size, - args->offset, UIO_READ, file)); + vaddr_atomic = pmap_mapdev_attr(dev->agp->base + page_base, + length, PAT_WRITE_COMBINING); + /* We can use the cpu mem copy function because this is X86. */ + vaddr = (char *)vaddr_atomic + page_offset; + unwritten = __copy_from_user_inatomic_nocache(vaddr, + user_data, length); + pmap_unmapdev((vm_offset_t)vaddr_atomic, length); + return unwritten; } +/** + * This is the fast pwrite path, where we copy the data directly from the + * user into the GTT, uncached. + */ +static int +i915_gem_gtt_pwrite_fast(struct drm_device *dev, + struct drm_i915_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file) +{ + ssize_t remain; + off_t offset, page_base; + char __user *user_data; + int page_offset, page_length, ret; + + ret = i915_gem_object_pin(obj, 0, true); + /* XXXKIB ret = i915_gem_obj_ggtt_pin(obj, 0, true, true); */ + if (ret != 0) + goto out; + + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) + goto out_unpin; + + ret = i915_gem_object_put_fence(obj); + if (ret) + goto out_unpin; + + user_data = to_user_ptr(args->data_ptr); + remain = args->size; + + offset = obj->gtt_offset + args->offset; + + while (remain > 0) { + /* Operation in this page + * + * page_base = page offset within aperture + * page_offset = offset within page + * page_length = bytes to copy for this page + */ + page_base = offset & ~PAGE_MASK; + page_offset = offset_in_page(offset); + page_length = remain; + if ((page_offset + remain) > PAGE_SIZE) + page_length = PAGE_SIZE - page_offset; + + /* If we get a fault while copying data, then (presumably) our + * source page isn't available. Return the error and we'll + * retry in the slow path. + */ + if (fast_user_write(dev, page_base, + page_offset, user_data, page_length)) { + ret = -EFAULT; + goto out_unpin; + } + + remain -= page_length; + user_data += page_length; + offset += page_length; + } + +out_unpin: + i915_gem_object_unpin(obj); +out: + return ret; +} + +/* Per-page copy function for the shmem pwrite fastpath. + * Flushes invalid cachelines before writing to the target if + * needs_clflush_before is set and flushes out any written cachelines after + * writing if needs_clflush is set. */ +static int +shmem_pwrite_fast(vm_page_t page, int shmem_page_offset, int page_length, + char __user *user_data, + bool page_do_bit17_swizzling, + bool needs_clflush_before, + bool needs_clflush_after) +{ + char *vaddr; + struct sf_buf *sf; + int ret; + + if (unlikely(page_do_bit17_swizzling)) + return -EINVAL; + + sched_pin(); + sf = sf_buf_alloc(page, SFB_NOWAIT | SFB_CPUPRIVATE); + if (sf == NULL) { + sched_unpin(); + return (-EFAULT); + } + vaddr = (char *)sf_buf_kva(sf); + if (needs_clflush_before) + drm_clflush_virt_range(vaddr + shmem_page_offset, + page_length); + ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset, + user_data, + page_length); + if (needs_clflush_after) + drm_clflush_virt_range(vaddr + shmem_page_offset, + page_length); + sf_buf_free(sf); + sched_unpin(); + + return ret ? -EFAULT : 0; +} + +/* Only difference to the fast-path function is that this can handle bit17 + * and uses non-atomic copy and kmap functions. */ +static int +shmem_pwrite_slow(vm_page_t page, int shmem_page_offset, int page_length, + char __user *user_data, + bool page_do_bit17_swizzling, + bool needs_clflush_before, + bool needs_clflush_after) +{ + char *vaddr; + struct sf_buf *sf; + int ret; + + sf = sf_buf_alloc(page, 0); + vaddr = (char *)sf_buf_kva(sf); + if (unlikely(needs_clflush_before || page_do_bit17_swizzling)) + shmem_clflush_swizzled_range(vaddr + shmem_page_offset, + page_length, + page_do_bit17_swizzling); + if (page_do_bit17_swizzling) + ret = __copy_from_user_swizzled(vaddr, shmem_page_offset, + user_data, + page_length); + else + ret = __copy_from_user(vaddr + shmem_page_offset, + user_data, + page_length); + if (needs_clflush_after) + shmem_clflush_swizzled_range(vaddr + shmem_page_offset, + page_length, + page_do_bit17_swizzling); + sf_buf_free(sf); + + return ret ? -EFAULT : 0; +} + +static int +i915_gem_shmem_pwrite(struct drm_device *dev, + struct drm_i915_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file) +{ + ssize_t remain, sremain; + off_t offset, soffset; + char __user *user_data; + int shmem_page_offset, page_length, ret = 0; + int obj_do_bit17_swizzling, page_do_bit17_swizzling; + int hit_slowpath = 0; + int needs_clflush_after = 0; + int needs_clflush_before = 0; + + user_data = to_user_ptr(args->data_ptr); + sremain = remain = args->size; + + obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); + + if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { + /* If we're not in the cpu write domain, set ourself into the gtt + * write domain and manually flush cachelines (if required). This + * optimizes for the case when the gpu will use the data + * right away and we therefore have to clflush anyway. */ + needs_clflush_after = cpu_write_needs_clflush(obj); + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) + return ret; + } + /* Same trick applies to invalidate partially written cachelines read + * before writing. */ + if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) + needs_clflush_before = + !cpu_cache_is_coherent(dev, obj->cache_level); + + soffset = offset = args->offset; + ret = i915_gem_object_get_pages_range(obj, soffset, soffset + sremain); + if (ret) + return ret; + + i915_gem_object_pin_pages(obj); + + obj->dirty = 1; + + VM_OBJECT_WLOCK(obj->base.vm_obj); + for (vm_page_t page = vm_page_find_least(obj->base.vm_obj, + OFF_TO_IDX(offset));; page = vm_page_next(page)) { + VM_OBJECT_WUNLOCK(obj->base.vm_obj); + int partial_cacheline_write; + + if (remain <= 0) + break; + + /* Operation in this page + * + * shmem_page_offset = offset within page in shmem file + * page_length = bytes to copy for this page + */ + shmem_page_offset = offset_in_page(offset); + + page_length = remain; + if ((shmem_page_offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - shmem_page_offset; + + /* If we don't overwrite a cacheline completely we need to be + * careful to have up-to-date data by first clflushing. Don't + * overcomplicate things and flush the entire patch. */ + partial_cacheline_write = needs_clflush_before && + ((shmem_page_offset | page_length) + & (cpu_clflush_line_size - 1)); + + page_do_bit17_swizzling = obj_do_bit17_swizzling && + (page_to_phys(page) & (1 << 17)) != 0; + + ret = shmem_pwrite_fast(page, shmem_page_offset, page_length, + user_data, page_do_bit17_swizzling, + partial_cacheline_write, + needs_clflush_after); + if (ret == 0) + goto next_page; + + hit_slowpath = 1; + DRM_UNLOCK(dev); + ret = shmem_pwrite_slow(page, shmem_page_offset, page_length, + user_data, page_do_bit17_swizzling, + partial_cacheline_write, + needs_clflush_after); + + DRM_LOCK(dev); + +next_page: + vm_page_dirty(page); + vm_page_reference(page); + + if (ret) + goto out; + + remain -= page_length; + user_data += page_length; + offset += page_length; + VM_OBJECT_WLOCK(obj->base.vm_obj); + } + +out: + i915_gem_object_unpin_pages(obj); + i915_gem_object_put_pages_range(obj, soffset, soffset + sremain); + + if (hit_slowpath) { + /* + * Fixup: Flush cpu caches in case we didn't flush the dirty + * cachelines in-line while writing and the object moved + * out of the cpu write domain while we've dropped the lock. + */ + if (!needs_clflush_after && + obj->base.write_domain != I915_GEM_DOMAIN_CPU) { + i915_gem_clflush_object(obj); + i915_gem_chipset_flush(dev); + } + } + + if (needs_clflush_after) + i915_gem_chipset_flush(dev); + + return ret; +} + +/** + * Writes data to the object referenced by handle. + * + * On error, the contents of the buffer that were to be modified are undefined. + */ int -i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, struct drm_file *file) +i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) { - struct drm_i915_gem_pwrite *args; + struct drm_i915_gem_pwrite *args = data; + struct drm_i915_gem_object *obj; + int ret; - args = data; - return (i915_gem_obj_io(dev, args->handle, args->data_ptr, args->size, - args->offset, UIO_WRITE, file)); + if (args->size == 0) + return 0; + + if (!useracc(to_user_ptr(args->data_ptr), args->size, VM_PROT_READ)) + return -EFAULT; + + if (likely(!i915_prefault_disable)) { + ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr), + args->size); + if (ret) + return -EFAULT; + } + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); + if (&obj->base == NULL) { + ret = -ENOENT; + goto unlock; + } + + /* Bounds check destination. */ + if (args->offset > obj->base.size || + args->size > obj->base.size - args->offset) { + ret = -EINVAL; + goto out; + } + +#if 1 + KIB_NOTYET(); +#else + /* prime objects have no backing filp to GEM pread/pwrite + * pages from. + */ + if (!obj->base.filp) { + ret = -EINVAL; + goto out; + } +#endif + + CTR3(KTR_DRM, "pwrite %p %jx %jx", obj, args->offset, args->size); + + ret = -EFAULT; + /* We can only do the GTT pwrite on untiled buffers, as otherwise + * it would end up going through the fenced access, and we'll get + * different detiling behavior between reading and writing. + * pread/pwrite currently are reading and writing from the CPU + * perspective, requiring manual detiling by the client. + */ + if (obj->phys_obj) { + ret = i915_gem_phys_pwrite(dev, obj, args, file); + goto out; + } + + if (obj->tiling_mode == I915_TILING_NONE && + obj->base.write_domain != I915_GEM_DOMAIN_CPU && + cpu_write_needs_clflush(obj)) { + ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); + /* Note that the gtt paths might fail with non-page-backed user + * pointers (e.g. gtt mappings when moving data between + * textures). Fallback to the shmem path in that case. */ + } + + if (ret == -EFAULT || ret == -ENOSPC) + ret = i915_gem_shmem_pwrite(dev, obj, args, file); + +out: + drm_gem_object_unreference(&obj->base); +unlock: + DRM_UNLOCK(dev); + return ret; } +#undef __user +#undef __force +#undef __iomem +#undef to_user_ptr +#undef offset_in_page +#undef page_to_phys int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_set_domain *args; struct drm_i915_gem_object *obj; uint32_t read_domains; uint32_t write_domain; int ret; - if ((dev->driver->driver_features & DRIVER_GEM) == 0) - return (-ENODEV); - args = data; read_domains = args->read_domains; write_domain = args->write_domain; if ((write_domain & I915_GEM_GPU_DOMAINS) != 0 || (read_domains & I915_GEM_GPU_DOMAINS) != 0 || (write_domain != 0 && read_domains != write_domain)) return (-EINVAL); ret = i915_mutex_lock_interruptible(dev); if (ret != 0) return (ret); obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (&obj->base == NULL) { ret = -ENOENT; goto unlock; } if ((read_domains & I915_GEM_DOMAIN_GTT) != 0) { ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); if (ret == -EINVAL) ret = 0; } else ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); drm_gem_object_unreference(&obj->base); unlock: DRM_UNLOCK(dev); return (ret); } int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_sw_finish *args; struct drm_i915_gem_object *obj; int ret; args = data; - ret = 0; - if ((dev->driver->driver_features & DRIVER_GEM) == 0) - return (ENODEV); + ret = i915_mutex_lock_interruptible(dev); if (ret != 0) return (ret); obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (&obj->base == NULL) { ret = -ENOENT; goto unlock; } if (obj->pin_count != 0) i915_gem_object_flush_cpu_write_domain(obj); drm_gem_object_unreference(&obj->base); unlock: DRM_UNLOCK(dev); return (ret); } int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_mmap *args; struct drm_gem_object *obj; struct proc *p; vm_map_t map; vm_offset_t addr; vm_size_t size; int error, rv; args = data; - if ((dev->driver->driver_features & DRIVER_GEM) == 0) - return (-ENODEV); - obj = drm_gem_object_lookup(dev, file, args->handle); if (obj == NULL) return (-ENOENT); error = 0; if (args->size == 0) goto out; p = curproc; map = &p->p_vmspace->vm_map; size = round_page(args->size); PROC_LOCK(p); if (map->size + size > lim_cur(p, RLIMIT_VMEM)) { PROC_UNLOCK(p); error = ENOMEM; goto out; } PROC_UNLOCK(p); addr = 0; vm_object_reference(obj->vm_obj); DRM_UNLOCK(dev); rv = vm_map_find(map, obj->vm_obj, args->offset, &addr, args->size, 0, VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ | VM_PROT_WRITE, MAP_INHERIT_SHARE); if (rv != KERN_SUCCESS) { vm_object_deallocate(obj->vm_obj); error = -vm_mmap_to_errno(rv); } else { args->addr_ptr = (uint64_t)addr; } DRM_LOCK(dev); out: drm_gem_object_unreference(obj); return (error); } static int i915_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred, u_short *color) { *color = 0; /* XXXKIB */ return (0); } int i915_intr_pf; static int i915_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres) { struct drm_gem_object *gem_obj; struct drm_i915_gem_object *obj; struct drm_device *dev; drm_i915_private_t *dev_priv; vm_page_t m, oldm; int cause, ret; bool write; gem_obj = vm_obj->handle; obj = to_intel_bo(gem_obj); dev = obj->base.dev; dev_priv = dev->dev_private; #if 0 write = (prot & VM_PROT_WRITE) != 0; #else write = true; #endif vm_object_pip_add(vm_obj, 1); /* * Remove the placeholder page inserted by vm_fault() from the * object before dropping the object lock. If * i915_gem_release_mmap() is active in parallel on this gem * object, then it owns the drm device sx and might find the * placeholder already. Then, since the page is busy, * i915_gem_release_mmap() sleeps waiting for the busy state * of the page cleared. We will be not able to acquire drm * device lock until i915_gem_release_mmap() is able to make a * progress. */ if (*mres != NULL) { oldm = *mres; vm_page_lock(oldm); vm_page_remove(oldm); vm_page_unlock(oldm); *mres = NULL; } else oldm = NULL; VM_OBJECT_WUNLOCK(vm_obj); retry: cause = ret = 0; m = NULL; if (i915_intr_pf) { ret = i915_mutex_lock_interruptible(dev); if (ret != 0) { cause = 10; goto out; } } else DRM_LOCK(dev); /* * Since the object lock was dropped, other thread might have * faulted on the same GTT address and instantiated the * mapping for the page. Recheck. */ VM_OBJECT_WLOCK(vm_obj); m = vm_page_lookup(vm_obj, OFF_TO_IDX(offset)); if (m != NULL) { if (vm_page_busied(m)) { DRM_UNLOCK(dev); vm_page_lock(m); VM_OBJECT_WUNLOCK(vm_obj); vm_page_busy_sleep(m, "915pee"); goto retry; } goto have_page; } else VM_OBJECT_WUNLOCK(vm_obj); /* Now bind it into the GTT if needed */ if (!obj->map_and_fenceable) { ret = i915_gem_object_unbind(obj); if (ret != 0) { cause = 20; goto unlock; } } if (!obj->gtt_space) { ret = i915_gem_object_bind_to_gtt(obj, 0, true); if (ret != 0) { cause = 30; goto unlock; } ret = i915_gem_object_set_to_gtt_domain(obj, write); if (ret != 0) { cause = 40; goto unlock; } } - if (obj->tiling_mode == I915_TILING_NONE) - ret = i915_gem_object_put_fence(obj); - else - ret = i915_gem_object_get_fence(obj, NULL); + if (!obj->has_global_gtt_mapping) + i915_gem_gtt_bind_object(obj, obj->cache_level); + + ret = i915_gem_object_get_fence(obj); if (ret != 0) { cause = 50; goto unlock; } if (i915_gem_object_is_inactive(obj)) list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); obj->fault_mappable = true; VM_OBJECT_WLOCK(vm_obj); m = PHYS_TO_VM_PAGE(dev->agp->base + obj->gtt_offset + offset); KASSERT((m->flags & PG_FICTITIOUS) != 0, ("physical address %#jx not fictitious", (uintmax_t)(dev->agp->base + obj->gtt_offset + offset))); if (m == NULL) { VM_OBJECT_WUNLOCK(vm_obj); cause = 60; ret = -EFAULT; goto unlock; } KASSERT((m->flags & PG_FICTITIOUS) != 0, ("not fictitious %p", m)); KASSERT(m->wire_count == 1, ("wire_count not 1 %p", m)); if (vm_page_busied(m)) { DRM_UNLOCK(dev); vm_page_lock(m); VM_OBJECT_WUNLOCK(vm_obj); vm_page_busy_sleep(m, "915pbs"); goto retry; } if (vm_page_insert(m, vm_obj, OFF_TO_IDX(offset))) { DRM_UNLOCK(dev); VM_OBJECT_WUNLOCK(vm_obj); VM_WAIT; goto retry; } m->valid = VM_PAGE_BITS_ALL; have_page: *mres = m; vm_page_xbusy(m); CTR4(KTR_DRM, "fault %p %jx %x phys %x", gem_obj, offset, prot, m->phys_addr); DRM_UNLOCK(dev); if (oldm != NULL) { vm_page_lock(oldm); vm_page_free(oldm); vm_page_unlock(oldm); } vm_object_pip_wakeup(vm_obj); return (VM_PAGER_OK); unlock: DRM_UNLOCK(dev); out: KASSERT(ret != 0, ("i915_gem_pager_fault: wrong return")); CTR5(KTR_DRM, "fault_fail %p %jx %x err %d %d", gem_obj, offset, prot, -ret, cause); if (ret == -EAGAIN || ret == -EIO || ret == -EINTR) { kern_yield(PRI_USER); goto retry; } VM_OBJECT_WLOCK(vm_obj); vm_object_pip_wakeup(vm_obj); return (VM_PAGER_ERROR); } static void i915_gem_pager_dtor(void *handle) { struct drm_gem_object *obj; struct drm_device *dev; obj = handle; dev = obj->dev; DRM_LOCK(dev); drm_gem_free_mmap_offset(obj); i915_gem_release_mmap(to_intel_bo(obj)); drm_gem_object_unreference(obj); DRM_UNLOCK(dev); } struct cdev_pager_ops i915_gem_pager_ops = { .cdev_pg_fault = i915_gem_pager_fault, .cdev_pg_ctor = i915_gem_pager_ctor, .cdev_pg_dtor = i915_gem_pager_dtor }; int i915_gem_mmap_gtt(struct drm_file *file, struct drm_device *dev, uint32_t handle, uint64_t *offset) { struct drm_i915_private *dev_priv; struct drm_i915_gem_object *obj; int ret; - if (!(dev->driver->driver_features & DRIVER_GEM)) - return (-ENODEV); - dev_priv = dev->dev_private; ret = i915_mutex_lock_interruptible(dev); if (ret != 0) return (ret); obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); if (&obj->base == NULL) { ret = -ENOENT; goto unlock; } if (obj->base.size > dev_priv->mm.gtt_mappable_end) { ret = -E2BIG; goto out; } if (obj->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to mmap a purgeable buffer\n"); ret = -EINVAL; goto out; } ret = drm_gem_create_mmap_offset(&obj->base); if (ret != 0) goto out; *offset = DRM_GEM_MAPPING_OFF(obj->base.map_list.key) | DRM_GEM_MAPPING_KEY; out: drm_gem_object_unreference(&obj->base); unlock: DRM_UNLOCK(dev); return (ret); } int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_private *dev_priv; struct drm_i915_gem_mmap_gtt *args; dev_priv = dev->dev_private; args = data; return (i915_gem_mmap_gtt(file, dev, args->handle, &args->offset)); } struct drm_i915_gem_object * i915_gem_alloc_object(struct drm_device *dev, size_t size) { struct drm_i915_private *dev_priv; struct drm_i915_gem_object *obj; dev_priv = dev->dev_private; obj = malloc(sizeof(*obj), DRM_I915_GEM, M_WAITOK | M_ZERO); if (drm_gem_object_init(dev, &obj->base, size) != 0) { free(obj, DRM_I915_GEM); return (NULL); } obj->base.write_domain = I915_GEM_DOMAIN_CPU; obj->base.read_domains = I915_GEM_DOMAIN_CPU; if (HAS_LLC(dev)) obj->cache_level = I915_CACHE_LLC; else obj->cache_level = I915_CACHE_NONE; obj->base.driver_private = NULL; obj->fence_reg = I915_FENCE_REG_NONE; INIT_LIST_HEAD(&obj->mm_list); INIT_LIST_HEAD(&obj->gtt_list); INIT_LIST_HEAD(&obj->ring_list); INIT_LIST_HEAD(&obj->exec_list); INIT_LIST_HEAD(&obj->gpu_write_list); obj->madv = I915_MADV_WILLNEED; /* Avoid an unnecessary call to unbind on the first bind. */ obj->map_and_fenceable = true; i915_gem_info_add_obj(dev_priv, size); return (obj); } void i915_gem_clflush_object(struct drm_i915_gem_object *obj) { /* If we don't have a page list set up, then we're not pinned * to GPU, and we can ignore the cache flush because it'll happen * again at bind time. */ if (obj->pages == NULL) return; /* If the GPU is snooping the contents of the CPU cache, * we do not need to manually clear the CPU cache lines. However, * the caches are only snooped when the render cache is * flushed/invalidated. As we always have to emit invalidations * and flushes when moving into and out of the RENDER domain, correct * snooping behaviour occurs naturally as the result of our domain * tracking. */ if (obj->cache_level != I915_CACHE_NONE) return; CTR1(KTR_DRM, "object_clflush %p", obj); drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE); } static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) { uint32_t old_write_domain; if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) return; i915_gem_clflush_object(obj); intel_gtt_chipset_flush(); old_write_domain = obj->base.write_domain; obj->base.write_domain = 0; CTR3(KTR_DRM, "object_change_domain flush_cpu_write %p %x %x", obj, obj->base.read_domains, old_write_domain); } static int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) { if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) return (0); return (i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain)); } static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) { uint32_t old_write_domain; if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) return; wmb(); old_write_domain = obj->base.write_domain; obj->base.write_domain = 0; CTR3(KTR_DRM, "object_change_domain flush gtt_write %p %x %x", obj, obj->base.read_domains, old_write_domain); } int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) { + drm_i915_private_t *dev_priv = obj->base.dev->dev_private; uint32_t old_write_domain, old_read_domains; int ret; if (obj->gtt_space == NULL) return (-EINVAL); if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) return 0; ret = i915_gem_object_flush_gpu_write_domain(obj); if (ret != 0) return (ret); if (obj->pending_gpu_write || write) { ret = i915_gem_object_wait_rendering(obj); if (ret != 0) return (ret); } i915_gem_object_flush_cpu_write_domain(obj); old_write_domain = obj->base.write_domain; old_read_domains = obj->base.read_domains; KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) == 0, ("In GTT write domain")); obj->base.read_domains |= I915_GEM_DOMAIN_GTT; if (write) { obj->base.read_domains = I915_GEM_DOMAIN_GTT; obj->base.write_domain = I915_GEM_DOMAIN_GTT; obj->dirty = 1; } + /* And bump the LRU for this access */ + if (i915_gem_object_is_inactive(obj)) + list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); + CTR3(KTR_DRM, "object_change_domain set_to_gtt %p %x %x", obj, old_read_domains, old_write_domain); return (0); } int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, enum i915_cache_level cache_level) { struct drm_device *dev; drm_i915_private_t *dev_priv; int ret; if (obj->cache_level == cache_level) return 0; if (obj->pin_count) { DRM_DEBUG("can not change the cache level of pinned objects\n"); return (-EBUSY); } dev = obj->base.dev; dev_priv = dev->dev_private; if (obj->gtt_space) { ret = i915_gem_object_finish_gpu(obj); if (ret != 0) return (ret); i915_gem_object_finish_gtt(obj); /* Before SandyBridge, you could not use tiling or fence * registers with snooped memory, so relinquish any fences * currently pointing to our region in the aperture. */ if (INTEL_INFO(obj->base.dev)->gen < 6) { ret = i915_gem_object_put_fence(obj); if (ret != 0) return (ret); } - i915_gem_gtt_rebind_object(obj, cache_level); + if (obj->has_global_gtt_mapping) + i915_gem_gtt_bind_object(obj, cache_level); if (obj->has_aliasing_ppgtt_mapping) i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, obj, cache_level); } if (cache_level == I915_CACHE_NONE) { u32 old_read_domains, old_write_domain; /* If we're coming from LLC cached, then we haven't * actually been tracking whether the data is in the * CPU cache or not, since we only allow one bit set * in obj->write_domain and have been skipping the clflushes. * Just set it to the CPU cache for now. */ KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) == 0, ("obj %p in CPU write domain", obj)); KASSERT((obj->base.read_domains & ~I915_GEM_DOMAIN_CPU) == 0, ("obj %p in CPU read domain", obj)); old_read_domains = obj->base.read_domains; old_write_domain = obj->base.write_domain; obj->base.read_domains = I915_GEM_DOMAIN_CPU; obj->base.write_domain = I915_GEM_DOMAIN_CPU; CTR3(KTR_DRM, "object_change_domain set_cache_level %p %x %x", obj, old_read_domains, old_write_domain); } obj->cache_level = cache_level; return (0); } +static bool is_pin_display(struct drm_i915_gem_object *obj) +{ + /* There are 3 sources that pin objects: + * 1. The display engine (scanouts, sprites, cursors); + * 2. Reservations for execbuffer; + * 3. The user. + * + * We can ignore reservations as we hold the struct_mutex and + * are only called outside of the reservation path. The user + * can only increment pin_count once, and so if after + * subtracting the potential reference by the user, any pin_count + * remains, it must be due to another use by the display engine. + */ + return obj->pin_count - !!obj->user_pin_count; +} + int i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, struct intel_ring_buffer *pipelined) { u32 old_read_domains, old_write_domain; int ret; ret = i915_gem_object_flush_gpu_write_domain(obj); if (ret != 0) return (ret); if (pipelined != obj->ring) { - ret = i915_gem_object_wait_rendering(obj); - if (ret == -ERESTART || ret == -EINTR) + ret = i915_gem_object_sync(obj, pipelined); + if (ret) return (ret); } + obj->pin_display = true; ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE); if (ret != 0) - return (ret); + goto err_unpin_display; ret = i915_gem_object_pin(obj, alignment, true); if (ret != 0) - return (ret); + goto err_unpin_display; i915_gem_object_flush_cpu_write_domain(obj); old_write_domain = obj->base.write_domain; old_read_domains = obj->base.read_domains; KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) == 0, ("obj %p in GTT write domain", obj)); obj->base.read_domains |= I915_GEM_DOMAIN_GTT; CTR3(KTR_DRM, "object_change_domain pin_to_display_plan %p %x %x", obj, old_read_domains, obj->base.write_domain); return (0); + +err_unpin_display: + obj->pin_display = is_pin_display(obj); + return ret; } +void +i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj) +{ + i915_gem_object_unpin(obj); + obj->pin_display = is_pin_display(obj); +} + int i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj) { int ret; if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) return (0); if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain); if (ret != 0) return (ret); } ret = i915_gem_object_wait_rendering(obj); if (ret != 0) return (ret); obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; return (0); } -static int +int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) { uint32_t old_write_domain, old_read_domains; int ret; if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) return 0; ret = i915_gem_object_flush_gpu_write_domain(obj); if (ret != 0) return (ret); - ret = i915_gem_object_wait_rendering(obj); - if (ret != 0) - return (ret); + if (write || obj->pending_gpu_write) { + ret = i915_gem_object_wait_rendering(obj); + if (ret != 0) + return (ret); + } i915_gem_object_flush_gtt_write_domain(obj); - i915_gem_object_set_to_full_cpu_read_domain(obj); old_write_domain = obj->base.write_domain; old_read_domains = obj->base.read_domains; if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { i915_gem_clflush_object(obj); obj->base.read_domains |= I915_GEM_DOMAIN_CPU; } KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) == 0, ("In cpu write domain")); if (write) { obj->base.read_domains = I915_GEM_DOMAIN_CPU; obj->base.write_domain = I915_GEM_DOMAIN_CPU; } CTR3(KTR_DRM, "object_change_domain set_to_cpu %p %x %x", obj, old_read_domains, old_write_domain); return (0); } -static void -i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj) -{ - int i; - - if (obj->page_cpu_valid == NULL) - return; - - if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0) { - for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) { - if (obj->page_cpu_valid[i] != 0) - continue; - drm_clflush_pages(obj->pages + i, 1); - } - } - - free(obj->page_cpu_valid, DRM_I915_GEM); - obj->page_cpu_valid = NULL; -} - -static int -i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, - uint64_t offset, uint64_t size) -{ - uint32_t old_read_domains; - int i, ret; - - if (offset == 0 && size == obj->base.size) - return (i915_gem_object_set_to_cpu_domain(obj, 0)); - - ret = i915_gem_object_flush_gpu_write_domain(obj); - if (ret != 0) - return (ret); - ret = i915_gem_object_wait_rendering(obj); - if (ret != 0) - return (ret); - - i915_gem_object_flush_gtt_write_domain(obj); - - if (obj->page_cpu_valid == NULL && - (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0) - return (0); - - if (obj->page_cpu_valid == NULL) { - obj->page_cpu_valid = malloc(obj->base.size / PAGE_SIZE, - DRM_I915_GEM, M_WAITOK | M_ZERO); - } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) - memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE); - - for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; - i++) { - if (obj->page_cpu_valid[i]) - continue; - drm_clflush_pages(obj->pages + i, 1); - obj->page_cpu_valid[i] = 1; - } - - KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) == 0, - ("In gpu write domain")); - - old_read_domains = obj->base.read_domains; - obj->base.read_domains |= I915_GEM_DOMAIN_CPU; - - CTR3(KTR_DRM, "object_change_domain set_cpu_read %p %x %x", obj, - old_read_domains, obj->base.write_domain); - return (0); -} - static uint32_t i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) { uint32_t gtt_size; if (INTEL_INFO(dev)->gen >= 4 || tiling_mode == I915_TILING_NONE) return (size); /* Previous chips need a power-of-two fence region when tiling */ if (INTEL_INFO(dev)->gen == 3) gtt_size = 1024*1024; else gtt_size = 512*1024; while (gtt_size < size) gtt_size <<= 1; return (gtt_size); } /** * i915_gem_get_gtt_alignment - return required GTT alignment for an object * @obj: object to check * * Return the required GTT alignment for an object, taking into account * potential fence register mapping. */ static uint32_t i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, int tiling_mode) { /* * Minimum alignment is 4k (GTT page size), but might be greater * if a fence register is needed for the object. */ if (INTEL_INFO(dev)->gen >= 4 || tiling_mode == I915_TILING_NONE) return (4096); /* * Previous chips need to be aligned to the size of the smallest * fence register that can contain the object. */ return (i915_gem_get_gtt_size(dev, size, tiling_mode)); } uint32_t i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, uint32_t size, int tiling_mode) { if (tiling_mode == I915_TILING_NONE) return (4096); /* * Minimum alignment is 4k (GTT page size) for sane hw. */ if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev)) return (4096); /* * Previous hardware however needs to be aligned to a power-of-two * tile height. The simplest method for determining this is to reuse * the power-of-tile object size. */ return (i915_gem_get_gtt_size(dev, size, tiling_mode)); } static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, unsigned alignment, bool map_and_fenceable) { struct drm_device *dev; struct drm_i915_private *dev_priv; struct drm_mm_node *free_space; uint32_t size, fence_size, fence_alignment, unfenced_alignment; bool mappable, fenceable; int ret; dev = obj->base.dev; dev_priv = dev->dev_private; if (obj->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to bind a purgeable object\n"); return (-EINVAL); } fence_size = i915_gem_get_gtt_size(dev, obj->base.size, obj->tiling_mode); fence_alignment = i915_gem_get_gtt_alignment(dev, obj->base.size, obj->tiling_mode); unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(dev, obj->base.size, obj->tiling_mode); if (alignment == 0) alignment = map_and_fenceable ? fence_alignment : unfenced_alignment; if (map_and_fenceable && (alignment & (fence_alignment - 1)) != 0) { DRM_ERROR("Invalid object alignment requested %u\n", alignment); return (-EINVAL); } size = map_and_fenceable ? fence_size : obj->base.size; /* If the object is bigger than the entire aperture, reject it early * before evicting everything in a vain attempt to find space. */ if (obj->base.size > (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) { DRM_ERROR( "Attempting to bind an object larger than the aperture\n"); return (-E2BIG); } search_free: if (map_and_fenceable) free_space = drm_mm_search_free_in_range( &dev_priv->mm.gtt_space, size, alignment, 0, dev_priv->mm.gtt_mappable_end, 0); else free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, size, alignment, 0); if (free_space != NULL) { if (map_and_fenceable) obj->gtt_space = drm_mm_get_block_range_generic( free_space, size, alignment, 0, dev_priv->mm.gtt_mappable_end, 1); else obj->gtt_space = drm_mm_get_block_generic(free_space, size, alignment, 1); } if (obj->gtt_space == NULL) { ret = i915_gem_evict_something(dev, size, alignment, map_and_fenceable); if (ret != 0) return (ret); goto search_free; } ret = i915_gem_object_get_pages_gtt(obj, 0); if (ret != 0) { drm_mm_put_block(obj->gtt_space); obj->gtt_space = NULL; /* * i915_gem_object_get_pages_gtt() cannot return * ENOMEM, since we use vm_page_grab(). */ return (ret); } - ret = i915_gem_gtt_bind_object(obj); + ret = i915_gem_gtt_prepare_object(obj); if (ret != 0) { i915_gem_object_put_pages_gtt(obj); drm_mm_put_block(obj->gtt_space); obj->gtt_space = NULL; if (i915_gem_evict_everything(dev, false)) return (ret); goto search_free; } + if (!dev_priv->mm.aliasing_ppgtt) + i915_gem_gtt_bind_object(obj, obj->cache_level); + list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list); list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); KASSERT((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0, ("Object in gpu read domain")); KASSERT((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0, ("Object in gpu write domain")); obj->gtt_offset = obj->gtt_space->start; fenceable = obj->gtt_space->size == fence_size && (obj->gtt_space->start & (fence_alignment - 1)) == 0; mappable = obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; obj->map_and_fenceable = mappable && fenceable; CTR4(KTR_DRM, "object_bind %p %x %x %d", obj, obj->gtt_offset, obj->base.size, map_and_fenceable); return (0); } -static void -i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) +int +i915_gem_object_sync(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *to) { + struct intel_ring_buffer *from = obj->ring; + u32 seqno; + int ret, idx; + + if (from == NULL || to == from) + return 0; + + if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev)) + return i915_gem_object_wait_rendering(obj); + + idx = intel_ring_sync_index(from, to); + + seqno = obj->last_rendering_seqno; + if (seqno <= from->sync_seqno[idx]) + return 0; + + if (seqno == from->outstanding_lazy_request) { + struct drm_i915_gem_request *request; + + request = malloc(sizeof(*request), DRM_I915_GEM, + M_WAITOK | M_ZERO); + ret = i915_add_request(from, NULL, request); + if (ret) { + free(request, DRM_I915_GEM); + return ret; + } + seqno = request->seqno; + } + + + ret = to->sync_to(to, from, seqno); + if (!ret) + from->sync_seqno[idx] = seqno; + + return ret; +} + +static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) +{ u32 old_write_domain, old_read_domains; /* Act a barrier for all accesses through the GTT */ mb(); /* Force a pagefault for domain tracking on next user access */ i915_gem_release_mmap(obj); if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) return; old_read_domains = obj->base.read_domains; old_write_domain = obj->base.write_domain; obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT; obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT; CTR3(KTR_DRM, "object_change_domain finish gtt %p %x %x", obj, old_read_domains, old_write_domain); } int i915_gem_object_unbind(struct drm_i915_gem_object *obj) { drm_i915_private_t *dev_priv; int ret; dev_priv = obj->base.dev->dev_private; ret = 0; if (obj->gtt_space == NULL) return (0); if (obj->pin_count != 0) { DRM_ERROR("Attempting to unbind pinned buffer\n"); return (-EINVAL); } ret = i915_gem_object_finish_gpu(obj); if (ret == -ERESTART || ret == -EINTR) return (ret); i915_gem_object_finish_gtt(obj); if (ret == 0) ret = i915_gem_object_set_to_cpu_domain(obj, 1); if (ret == -ERESTART || ret == -EINTR) return (ret); if (ret != 0) { i915_gem_clflush_object(obj); obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; } ret = i915_gem_object_put_fence(obj); - if (ret == -ERESTART) + if (ret) return (ret); - i915_gem_gtt_unbind_object(obj); + if (obj->has_global_gtt_mapping) + i915_gem_gtt_unbind_object(obj); if (obj->has_aliasing_ppgtt_mapping) { i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj); obj->has_aliasing_ppgtt_mapping = 0; } + i915_gem_gtt_finish_object(obj); + i915_gem_object_put_pages_gtt(obj); list_del_init(&obj->gtt_list); list_del_init(&obj->mm_list); obj->map_and_fenceable = true; drm_mm_put_block(obj->gtt_space); obj->gtt_space = NULL; obj->gtt_offset = 0; if (i915_gem_object_is_purgeable(obj)) i915_gem_object_truncate(obj); CTR1(KTR_DRM, "object_unbind %p", obj); return (ret); } +static void +i915_gem_object_put_pages_range_locked(struct drm_i915_gem_object *obj, + vm_pindex_t si, vm_pindex_t ei) +{ + vm_object_t vm_obj; + vm_page_t m; + vm_pindex_t i; + + vm_obj = obj->base.vm_obj; + VM_OBJECT_ASSERT_LOCKED(vm_obj); + for (i = si, m = vm_page_lookup(vm_obj, i); i < ei; + m = vm_page_next(m), i++) { + KASSERT(m->pindex == i, ("pindex %jx %jx", + (uintmax_t)m->pindex, (uintmax_t)i)); + vm_page_lock(m); + vm_page_unwire(m, PQ_INACTIVE); + if (m->wire_count == 0) + atomic_add_long(&i915_gem_wired_pages_cnt, -1); + vm_page_unlock(m); + } +} + +static void +i915_gem_object_put_pages_range(struct drm_i915_gem_object *obj, + off_t start, off_t end) +{ + vm_object_t vm_obj; + + vm_obj = obj->base.vm_obj; + VM_OBJECT_WLOCK(vm_obj); + i915_gem_object_put_pages_range_locked(obj, + OFF_TO_IDX(trunc_page(start)), OFF_TO_IDX(round_page(end))); + VM_OBJECT_WUNLOCK(vm_obj); +} + static int +i915_gem_object_get_pages_range(struct drm_i915_gem_object *obj, + off_t start, off_t end) +{ + vm_object_t vm_obj; + vm_page_t m; + vm_pindex_t si, ei, i; + bool need_swizzle, fresh; + + need_swizzle = i915_gem_object_needs_bit17_swizzle(obj) != 0; + vm_obj = obj->base.vm_obj; + si = OFF_TO_IDX(trunc_page(start)); + ei = OFF_TO_IDX(round_page(end)); + VM_OBJECT_WLOCK(vm_obj); + for (i = si; i < ei; i++) { + m = i915_gem_wire_page(vm_obj, i, &fresh); + if (m == NULL) + goto failed; + if (need_swizzle && fresh) + i915_gem_object_do_bit_17_swizzle_page(obj, m); + } + VM_OBJECT_WUNLOCK(vm_obj); + return (0); +failed: + i915_gem_object_put_pages_range_locked(obj, si, i); + VM_OBJECT_WUNLOCK(vm_obj); + return (-EIO); +} + +static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, int flags) { struct drm_device *dev; vm_object_t vm_obj; vm_page_t m; - int page_count, i, j; + vm_pindex_t i, page_count; + int res; dev = obj->base.dev; KASSERT(obj->pages == NULL, ("Obj already has pages")); - page_count = obj->base.size / PAGE_SIZE; + page_count = OFF_TO_IDX(obj->base.size); obj->pages = malloc(page_count * sizeof(vm_page_t), DRM_I915_GEM, M_WAITOK); + res = i915_gem_object_get_pages_range(obj, 0, obj->base.size); + if (res != 0) { + free(obj->pages, DRM_I915_GEM); + obj->pages = NULL; + return (res); + } vm_obj = obj->base.vm_obj; VM_OBJECT_WLOCK(vm_obj); - for (i = 0; i < page_count; i++) { - if ((obj->pages[i] = i915_gem_wire_page(vm_obj, i)) == NULL) - goto failed; + for (i = 0, m = vm_page_lookup(vm_obj, 0); i < page_count; + i++, m = vm_page_next(m)) { + KASSERT(m->pindex == i, ("pindex %jx %jx", + (uintmax_t)m->pindex, (uintmax_t)i)); + obj->pages[i] = m; } VM_OBJECT_WUNLOCK(vm_obj); - if (i915_gem_object_needs_bit17_swizzle(obj)) - i915_gem_object_do_bit_17_swizzle(obj); return (0); - -failed: - for (j = 0; j < i; j++) { - m = obj->pages[j]; - vm_page_lock(m); - vm_page_unwire(m, PQ_INACTIVE); - vm_page_unlock(m); - atomic_add_long(&i915_gem_wired_pages_cnt, -1); - } - VM_OBJECT_WUNLOCK(vm_obj); - free(obj->pages, DRM_I915_GEM); - obj->pages = NULL; - return (-EIO); } #define GEM_PARANOID_CHECK_GTT 0 #if GEM_PARANOID_CHECK_GTT static void i915_gem_assert_pages_not_mapped(struct drm_device *dev, vm_page_t *ma, int page_count) { struct drm_i915_private *dev_priv; vm_paddr_t pa; unsigned long start, end; u_int i; int j; dev_priv = dev->dev_private; start = OFF_TO_IDX(dev_priv->mm.gtt_start); end = OFF_TO_IDX(dev_priv->mm.gtt_end); for (i = start; i < end; i++) { pa = intel_gtt_read_pte_paddr(i); for (j = 0; j < page_count; j++) { if (pa == VM_PAGE_TO_PHYS(ma[j])) { panic("Page %p in GTT pte index %d pte %x", ma[i], i, intel_gtt_read_pte(i)); } } } } #endif static void i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) { vm_page_t m; int page_count, i; KASSERT(obj->madv != I915_MADV_PURGED_INTERNAL, ("Purged object")); if (obj->tiling_mode != I915_TILING_NONE) i915_gem_object_save_bit_17_swizzle(obj); if (obj->madv == I915_MADV_DONTNEED) obj->dirty = 0; page_count = obj->base.size / PAGE_SIZE; VM_OBJECT_WLOCK(obj->base.vm_obj); #if GEM_PARANOID_CHECK_GTT i915_gem_assert_pages_not_mapped(obj->base.dev, obj->pages, page_count); #endif for (i = 0; i < page_count; i++) { m = obj->pages[i]; if (obj->dirty) vm_page_dirty(m); if (obj->madv == I915_MADV_WILLNEED) vm_page_reference(m); vm_page_lock(m); vm_page_unwire(obj->pages[i], PQ_ACTIVE); vm_page_unlock(m); atomic_add_long(&i915_gem_wired_pages_cnt, -1); } VM_OBJECT_WUNLOCK(obj->base.vm_obj); obj->dirty = 0; free(obj->pages, DRM_I915_GEM); obj->pages = NULL; } void i915_gem_release_mmap(struct drm_i915_gem_object *obj) { vm_object_t devobj; vm_page_t m; int i, page_count; if (!obj->fault_mappable) return; CTR3(KTR_DRM, "release_mmap %p %x %x", obj, obj->gtt_offset, OFF_TO_IDX(obj->base.size)); devobj = cdev_pager_lookup(obj); if (devobj != NULL) { page_count = OFF_TO_IDX(obj->base.size); VM_OBJECT_WLOCK(devobj); retry: for (i = 0; i < page_count; i++) { m = vm_page_lookup(devobj, i); if (m == NULL) continue; if (vm_page_sleep_if_busy(m, "915unm")) goto retry; cdev_pager_free_page(devobj, m); } VM_OBJECT_WUNLOCK(devobj); vm_object_deallocate(devobj); } obj->fault_mappable = false; } int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj) { int ret; KASSERT((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0, ("In GPU write domain")); CTR5(KTR_DRM, "object_wait_rendering %p %s %x %d %d", obj, obj->ring != NULL ? obj->ring->name : "none", obj->gtt_offset, obj->active, obj->last_rendering_seqno); if (obj->active) { - ret = i915_wait_request(obj->ring, obj->last_rendering_seqno, - true); + ret = i915_wait_request(obj->ring, obj->last_rendering_seqno); if (ret != 0) return (ret); + i915_gem_retire_requests_ring(obj->ring); } return (0); } void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, struct intel_ring_buffer *ring, uint32_t seqno) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_fence_reg *reg; obj->ring = ring; KASSERT(ring != NULL, ("NULL ring")); /* Add a reference if we're newly entering the active list. */ if (!obj->active) { drm_gem_object_reference(&obj->base); obj->active = 1; } /* Move from whatever list we were on to the tail of execution. */ list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); list_move_tail(&obj->ring_list, &ring->active_list); obj->last_rendering_seqno = seqno; if (obj->fenced_gpu_access) { obj->last_fenced_seqno = seqno; - obj->last_fenced_ring = ring; /* Bump MRU to take account of the delayed flush */ if (obj->fence_reg != I915_FENCE_REG_NONE) { reg = &dev_priv->fence_regs[obj->fence_reg]; list_move_tail(®->lru_list, &dev_priv->mm.fence_list); } } } static void i915_gem_object_move_off_active(struct drm_i915_gem_object *obj) { list_del_init(&obj->ring_list); obj->last_rendering_seqno = 0; obj->last_fenced_seqno = 0; } static void i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; KASSERT(obj->active, ("Object not active")); list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list); i915_gem_object_move_off_active(obj); } static void i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - if (obj->pin_count != 0) - list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list); - else - list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); + list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); KASSERT(list_empty(&obj->gpu_write_list), ("On gpu_write_list")); KASSERT(obj->active, ("Object not active")); obj->ring = NULL; - obj->last_fenced_ring = NULL; i915_gem_object_move_off_active(obj); obj->fenced_gpu_access = false; obj->active = 0; obj->pending_gpu_write = false; drm_gem_object_unreference(&obj->base); #if 1 KIB_NOTYET(); #else WARN_ON(i915_verify_lists(dev)); #endif } static void i915_gem_object_truncate(struct drm_i915_gem_object *obj) { vm_object_t vm_obj; vm_obj = obj->base.vm_obj; VM_OBJECT_WLOCK(vm_obj); vm_object_page_remove(vm_obj, 0, 0, false); VM_OBJECT_WUNLOCK(vm_obj); + drm_gem_free_mmap_offset(&obj->base); obj->madv = I915_MADV_PURGED_INTERNAL; } static inline int i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) { return (obj->madv == I915_MADV_DONTNEED); } static void i915_gem_process_flushing_list(struct intel_ring_buffer *ring, uint32_t flush_domains) { struct drm_i915_gem_object *obj, *next; uint32_t old_write_domain; list_for_each_entry_safe(obj, next, &ring->gpu_write_list, gpu_write_list) { if (obj->base.write_domain & flush_domains) { old_write_domain = obj->base.write_domain; obj->base.write_domain = 0; list_del_init(&obj->gpu_write_list); i915_gem_object_move_to_active(obj, ring, i915_gem_next_request_seqno(ring)); CTR3(KTR_DRM, "object_change_domain process_flush %p %x %x", obj, obj->base.read_domains, old_write_domain); } } } static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) { drm_i915_private_t *dev_priv; dev_priv = obj->base.dev->dev_private; return (dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && obj->tiling_mode != I915_TILING_NONE); } static vm_page_t -i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex) +i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex, bool *fresh) { vm_page_t m; int rv; VM_OBJECT_ASSERT_WLOCKED(object); m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL); if (m->valid != VM_PAGE_BITS_ALL) { if (vm_pager_has_page(object, pindex, NULL, NULL)) { rv = vm_pager_get_pages(object, &m, 1, 0); m = vm_page_lookup(object, pindex); if (m == NULL) return (NULL); if (rv != VM_PAGER_OK) { vm_page_lock(m); vm_page_free(m); vm_page_unlock(m); return (NULL); } + if (fresh != NULL) + *fresh = true; } else { pmap_zero_page(m); m->valid = VM_PAGE_BITS_ALL; m->dirty = 0; + if (fresh != NULL) + *fresh = false; } + } else if (fresh != NULL) { + *fresh = false; } vm_page_lock(m); vm_page_wire(m); vm_page_unlock(m); vm_page_xunbusy(m); atomic_add_long(&i915_gem_wired_pages_cnt, 1); return (m); } int i915_gem_flush_ring(struct intel_ring_buffer *ring, uint32_t invalidate_domains, uint32_t flush_domains) { int ret; if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0) return 0; CTR3(KTR_DRM, "ring_flush %s %x %x", ring->name, invalidate_domains, flush_domains); ret = ring->flush(ring, invalidate_domains, flush_domains); if (ret) return ret; if (flush_domains & I915_GEM_GPU_DOMAINS) i915_gem_process_flushing_list(ring, flush_domains); return 0; } static int -i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire) +i915_ring_idle(struct intel_ring_buffer *ring) { int ret; if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) return 0; if (!list_empty(&ring->gpu_write_list)) { ret = i915_gem_flush_ring(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); if (ret != 0) return ret; } - return (i915_wait_request(ring, i915_gem_next_request_seqno(ring), - do_retire)); + return (i915_wait_request(ring, i915_gem_next_request_seqno(ring))); } int -i915_gpu_idle(struct drm_device *dev, bool do_retire) +i915_gpu_idle(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; int ret, i; /* Flush everything onto the inactive list. */ for_each_ring(ring, dev_priv, i) { ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID); if (ret) return ret; - ret = i915_ring_idle(ring, do_retire); + ret = i915_ring_idle(ring); if (ret) return ret; + + /* Is the device fubar? */ + if (!list_empty(&ring->gpu_write_list)) + return -EBUSY; } return 0; } -int -i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno, bool do_retire) +static int +i915_gem_check_wedge(struct drm_i915_private *dev_priv) { - drm_i915_private_t *dev_priv; - struct drm_i915_gem_request *request; - uint32_t ier; - int flags, ret; - bool recovery_complete; + DRM_LOCK_ASSERT(dev_priv->dev); - KASSERT(seqno != 0, ("Zero seqno")); - - dev_priv = ring->dev->dev_private; - ret = 0; - if (atomic_load_acq_int(&dev_priv->mm.wedged) != 0) { + bool recovery_complete; /* Give the error handler a chance to run. */ mtx_lock(&dev_priv->error_completion_lock); recovery_complete = (&dev_priv->error_completion) > 0; mtx_unlock(&dev_priv->error_completion_lock); return (recovery_complete ? -EIO : -EAGAIN); } + return 0; +} + +/* + * Compare seqno against outstanding lazy request. Emit a request if they are + * equal. + */ +static int +i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) +{ + int ret = 0; + + DRM_LOCK_ASSERT(ring->dev); + if (seqno == ring->outstanding_lazy_request) { + struct drm_i915_gem_request *request; + request = malloc(sizeof(*request), DRM_I915_GEM, M_WAITOK | M_ZERO); - if (request == NULL) - return (-ENOMEM); ret = i915_add_request(ring, NULL, request); if (ret != 0) { free(request, DRM_I915_GEM); return (ret); } - seqno = request->seqno; + MPASS(seqno == request->seqno); } + return ret; +} - if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { - if (HAS_PCH_SPLIT(ring->dev)) - ier = I915_READ(DEIER) | I915_READ(GTIER); - else - ier = I915_READ(IER); - if (!ier) { - DRM_ERROR("something (likely vbetool) disabled " - "interrupts, re-enabling\n"); - ring->dev->driver->irq_preinstall(ring->dev); - ring->dev->driver->irq_postinstall(ring->dev); - } +static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, + bool interruptible) +{ + drm_i915_private_t *dev_priv = ring->dev->dev_private; + int ret = 0, flags; - CTR2(KTR_DRM, "request_wait_begin %s %d", ring->name, seqno); + if (i915_seqno_passed(ring->get_seqno(ring), seqno)) + return 0; - ring->waiting_seqno = seqno; - mtx_lock(&ring->irq_lock); - if (ring->irq_get(ring)) { - flags = dev_priv->mm.interruptible ? PCATCH : 0; - while (!i915_seqno_passed(ring->get_seqno(ring), seqno) - && !atomic_load_acq_int(&dev_priv->mm.wedged) && - ret == 0) { - ret = -msleep(ring, &ring->irq_lock, flags, - "915gwr", 0); - } - ring->irq_put(ring); - mtx_unlock(&ring->irq_lock); - } else { - mtx_unlock(&ring->irq_lock); - if (_intel_wait_for(ring->dev, - i915_seqno_passed(ring->get_seqno(ring), seqno) || - atomic_load_acq_int(&dev_priv->mm.wedged), 3000, - 0, "i915wrq") != 0) - ret = -EBUSY; - } - ring->waiting_seqno = 0; + CTR2(KTR_DRM, "request_wait_begin %s %d", ring->name, seqno); - CTR3(KTR_DRM, "request_wait_end %s %d %d", ring->name, seqno, - ret); + mtx_lock(&dev_priv->irq_lock); + if (!ring->irq_get(ring)) { + mtx_unlock(&dev_priv->irq_lock); + return (-ENODEV); } + + flags = interruptible ? PCATCH : 0; + while (!i915_seqno_passed(ring->get_seqno(ring), seqno) + && !atomic_load_acq_int(&dev_priv->mm.wedged) && + ret == 0) + ret = -msleep(ring, &dev_priv->irq_lock, flags, "915gwr", 0); + ring->irq_put(ring); + mtx_unlock(&dev_priv->irq_lock); + + CTR3(KTR_DRM, "request_wait_end %s %d %d", ring->name, seqno, ret); + + return ret; +} + +int +i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno) +{ + drm_i915_private_t *dev_priv; + int ret; + + KASSERT(seqno != 0, ("Zero seqno")); + + dev_priv = ring->dev->dev_private; + ret = 0; + + ret = i915_gem_check_wedge(dev_priv); + if (ret) + return ret; + + ret = i915_gem_check_olr(ring, seqno); + if (ret) + return ret; + + ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible); if (atomic_load_acq_int(&dev_priv->mm.wedged)) ret = -EAGAIN; - /* Directly dispatch request retiring. While we have the work queue - * to handle this, the waiter on a request often wants an associated - * buffer to have made it to the inactive list, and we would need - * a separate wait queue to handle that. - */ - if (ret == 0 && do_retire) - i915_gem_retire_requests_ring(ring); - return (ret); } static u32 i915_gem_get_seqno(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; u32 seqno = dev_priv->next_seqno; /* reserve 0 for non-seqno */ if (++dev_priv->next_seqno == 0) dev_priv->next_seqno = 1; return seqno; } u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring) { if (ring->outstanding_lazy_request == 0) ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev); return ring->outstanding_lazy_request; } int i915_add_request(struct intel_ring_buffer *ring, struct drm_file *file, struct drm_i915_gem_request *request) { drm_i915_private_t *dev_priv; struct drm_i915_file_private *file_priv; uint32_t seqno; u32 request_ring_position; int was_empty; int ret; KASSERT(request != NULL, ("NULL request in add")); DRM_LOCK_ASSERT(ring->dev); dev_priv = ring->dev->dev_private; seqno = i915_gem_next_request_seqno(ring); request_ring_position = intel_ring_get_tail(ring); ret = ring->add_request(ring, &seqno); if (ret != 0) return ret; CTR2(KTR_DRM, "request_add %s %d", ring->name, seqno); request->seqno = seqno; request->ring = ring; request->tail = request_ring_position; request->emitted_jiffies = ticks; was_empty = list_empty(&ring->request_list); list_add_tail(&request->list, &ring->request_list); if (file != NULL) { file_priv = file->driver_priv; mtx_lock(&file_priv->mm.lck); request->file_priv = file_priv; list_add_tail(&request->client_list, &file_priv->mm.request_list); mtx_unlock(&file_priv->mm.lck); } ring->outstanding_lazy_request = 0; if (!dev_priv->mm.suspended) { if (i915_enable_hangcheck) { callout_schedule(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD); } if (was_empty) taskqueue_enqueue_timeout(dev_priv->tq, &dev_priv->mm.retire_task, hz); } return (0); } static inline void i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) { struct drm_i915_file_private *file_priv = request->file_priv; if (!file_priv) return; DRM_LOCK_ASSERT(request->ring->dev); mtx_lock(&file_priv->mm.lck); if (request->file_priv != NULL) { list_del(&request->client_list); request->file_priv = NULL; } mtx_unlock(&file_priv->mm.lck); } void i915_gem_release(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv; struct drm_i915_gem_request *request; file_priv = file->driver_priv; /* Clean up our request list when the client is going away, so that * later retire_requests won't dereference our soon-to-be-gone * file_priv. */ mtx_lock(&file_priv->mm.lck); while (!list_empty(&file_priv->mm.request_list)) { request = list_first_entry(&file_priv->mm.request_list, struct drm_i915_gem_request, client_list); list_del(&request->client_list); request->file_priv = NULL; } mtx_unlock(&file_priv->mm.lck); } static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, struct intel_ring_buffer *ring) { if (ring->dev != NULL) DRM_LOCK_ASSERT(ring->dev); while (!list_empty(&ring->request_list)) { struct drm_i915_gem_request *request; request = list_first_entry(&ring->request_list, struct drm_i915_gem_request, list); list_del(&request->list); i915_gem_request_remove_from_client(request); free(request, DRM_I915_GEM); } while (!list_empty(&ring->active_list)) { struct drm_i915_gem_object *obj; obj = list_first_entry(&ring->active_list, struct drm_i915_gem_object, ring_list); obj->base.write_domain = 0; list_del_init(&obj->gpu_write_list); i915_gem_object_move_to_inactive(obj); } } static void i915_gem_reset_fences(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; for (i = 0; i < dev_priv->num_fence_regs; i++) { struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; - struct drm_i915_gem_object *obj = reg->obj; - if (!obj) - continue; + i915_gem_write_fence(dev, i, NULL); - if (obj->tiling_mode) - i915_gem_release_mmap(obj); + if (reg->obj) + i915_gem_object_fence_lost(reg->obj); - reg->obj->fence_reg = I915_FENCE_REG_NONE; - reg->obj->fenced_gpu_access = false; - reg->obj->last_fenced_seqno = 0; - reg->obj->last_fenced_ring = NULL; - i915_gem_clear_fence_reg(dev, reg); + reg->pin_count = 0; + reg->obj = NULL; + INIT_LIST_HEAD(®->lru_list); } + + INIT_LIST_HEAD(&dev_priv->mm.fence_list); } void i915_gem_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; + struct intel_ring_buffer *ring; int i; - for (i = 0; i < I915_NUM_RINGS; i++) - i915_gem_reset_ring_lists(dev_priv, &dev_priv->rings[i]); + for_each_ring(ring, dev_priv, i) + i915_gem_reset_ring_lists(dev_priv, ring); /* Remove anything from the flushing lists. The GPU cache is likely * to be lost on reset along with the data, so simply move the * lost bo to the inactive list. */ while (!list_empty(&dev_priv->mm.flushing_list)) { obj = list_first_entry(&dev_priv->mm.flushing_list, struct drm_i915_gem_object, mm_list); obj->base.write_domain = 0; list_del_init(&obj->gpu_write_list); i915_gem_object_move_to_inactive(obj); } /* Move everything out of the GPU domains to ensure we do any * necessary invalidation upon reuse. */ list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; } /* The fence registers are invalidated so clear them out */ i915_gem_reset_fences(dev); } /** * This function clears the request list as sequence numbers are passed. */ void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) { uint32_t seqno; int i; if (list_empty(&ring->request_list)) return; seqno = ring->get_seqno(ring); CTR2(KTR_DRM, "retire_request_ring %s %d", ring->name, seqno); for (i = 0; i < DRM_ARRAY_SIZE(ring->sync_seqno); i++) if (seqno >= ring->sync_seqno[i]) ring->sync_seqno[i] = 0; while (!list_empty(&ring->request_list)) { struct drm_i915_gem_request *request; request = list_first_entry(&ring->request_list, struct drm_i915_gem_request, list); if (!i915_seqno_passed(seqno, request->seqno)) break; CTR2(KTR_DRM, "retire_request_seqno_passed %s %d", ring->name, seqno); ring->last_retired_head = request->tail; list_del(&request->list); i915_gem_request_remove_from_client(request); free(request, DRM_I915_GEM); } /* Move any buffers on the active list that are no longer referenced * by the ringbuffer to the flushing/inactive lists as appropriate. */ while (!list_empty(&ring->active_list)) { struct drm_i915_gem_object *obj; obj = list_first_entry(&ring->active_list, struct drm_i915_gem_object, ring_list); if (!i915_seqno_passed(seqno, obj->last_rendering_seqno)) break; if (obj->base.write_domain != 0) i915_gem_object_move_to_flushing(obj); else i915_gem_object_move_to_inactive(obj); } if (ring->trace_irq_seqno && i915_seqno_passed(seqno, ring->trace_irq_seqno)) { - mtx_lock(&ring->irq_lock); + struct drm_i915_private *dev_priv = ring->dev->dev_private; + mtx_lock(&dev_priv->irq_lock); ring->irq_put(ring); - mtx_unlock(&ring->irq_lock); + mtx_unlock(&dev_priv->irq_lock); ring->trace_irq_seqno = 0; } } void i915_gem_retire_requests(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj, *next; + struct intel_ring_buffer *ring; int i; - if (!list_empty(&dev_priv->mm.deferred_free_list)) { - list_for_each_entry_safe(obj, next, - &dev_priv->mm.deferred_free_list, mm_list) - i915_gem_free_object_tail(obj); - } - - for (i = 0; i < I915_NUM_RINGS; i++) - i915_gem_retire_requests_ring(&dev_priv->rings[i]); + for_each_ring(ring, dev_priv, i) + i915_gem_retire_requests_ring(ring); } -static int -sandybridge_write_fence_reg(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *pipelined) +static void sandybridge_write_fence_reg(struct drm_device *dev, int reg, + struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - u32 size = obj->gtt_space->size; - int regnum = obj->fence_reg; uint64_t val; - val = (uint64_t)((obj->gtt_offset + size - 4096) & - 0xfffff000) << 32; - val |= obj->gtt_offset & 0xfffff000; - val |= (uint64_t)((obj->stride / 128) - 1) << - SANDYBRIDGE_FENCE_PITCH_SHIFT; + if (obj) { + u32 size = obj->gtt_space->size; - if (obj->tiling_mode == I915_TILING_Y) - val |= 1 << I965_FENCE_TILING_Y_SHIFT; - val |= I965_FENCE_REG_VALID; + val = (uint64_t)((obj->gtt_offset + size - 4096) & + 0xfffff000) << 32; + val |= obj->gtt_offset & 0xfffff000; + val |= (uint64_t)((obj->stride / 128) - 1) << + SANDYBRIDGE_FENCE_PITCH_SHIFT; - if (pipelined) { - int ret = intel_ring_begin(pipelined, 6); - if (ret) - return ret; - - intel_ring_emit(pipelined, MI_NOOP); - intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2)); - intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8); - intel_ring_emit(pipelined, (u32)val); - intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4); - intel_ring_emit(pipelined, (u32)(val >> 32)); - intel_ring_advance(pipelined); + if (obj->tiling_mode == I915_TILING_Y) + val |= 1 << I965_FENCE_TILING_Y_SHIFT; + val |= I965_FENCE_REG_VALID; } else - I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val); + val = 0; - return 0; + I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val); + POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8); } -static int -i965_write_fence_reg(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *pipelined) +static void i965_write_fence_reg(struct drm_device *dev, int reg, + struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - u32 size = obj->gtt_space->size; - int regnum = obj->fence_reg; uint64_t val; - val = (uint64_t)((obj->gtt_offset + size - 4096) & - 0xfffff000) << 32; - val |= obj->gtt_offset & 0xfffff000; - val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; - if (obj->tiling_mode == I915_TILING_Y) - val |= 1 << I965_FENCE_TILING_Y_SHIFT; - val |= I965_FENCE_REG_VALID; + if (obj) { + u32 size = obj->gtt_space->size; - if (pipelined) { - int ret = intel_ring_begin(pipelined, 6); - if (ret) - return ret; - - intel_ring_emit(pipelined, MI_NOOP); - intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2)); - intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8); - intel_ring_emit(pipelined, (u32)val); - intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4); - intel_ring_emit(pipelined, (u32)(val >> 32)); - intel_ring_advance(pipelined); + val = (uint64_t)((obj->gtt_offset + size - 4096) & + 0xfffff000) << 32; + val |= obj->gtt_offset & 0xfffff000; + val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; + if (obj->tiling_mode == I915_TILING_Y) + val |= 1 << I965_FENCE_TILING_Y_SHIFT; + val |= I965_FENCE_REG_VALID; } else - I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val); + val = 0; - return 0; + I915_WRITE64(FENCE_REG_965_0 + reg * 8, val); + POSTING_READ(FENCE_REG_965_0 + reg * 8); } -static int -i915_write_fence_reg(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *pipelined) +static void i915_write_fence_reg(struct drm_device *dev, int reg, + struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - u32 size = obj->gtt_space->size; - u32 fence_reg, val, pitch_val; - int tile_width; + u32 val; - if ((obj->gtt_offset & ~I915_FENCE_START_MASK) || - (size & -size) != size || (obj->gtt_offset & (size - 1))) { - printf( -"object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", - obj->gtt_offset, obj->map_and_fenceable, size); - return -EINVAL; - } + if (obj) { + u32 size = obj->gtt_space->size; + int pitch_val; + int tile_width; - if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) - tile_width = 128; - else - tile_width = 512; + if ((obj->gtt_offset & ~I915_FENCE_START_MASK) || + (size & -size) != size || + (obj->gtt_offset & (size - 1))) + printf( + "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", + obj->gtt_offset, obj->map_and_fenceable, size); - /* Note: pitch better be a power of two tile widths */ - pitch_val = obj->stride / tile_width; - pitch_val = ffs(pitch_val) - 1; + if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) + tile_width = 128; + else + tile_width = 512; - val = obj->gtt_offset; - if (obj->tiling_mode == I915_TILING_Y) - val |= 1 << I830_FENCE_TILING_Y_SHIFT; - val |= I915_FENCE_SIZE_BITS(size); - val |= pitch_val << I830_FENCE_PITCH_SHIFT; - val |= I830_FENCE_REG_VALID; + /* Note: pitch better be a power of two tile widths */ + pitch_val = obj->stride / tile_width; + pitch_val = ffs(pitch_val) - 1; - fence_reg = obj->fence_reg; - if (fence_reg < 8) - fence_reg = FENCE_REG_830_0 + fence_reg * 4; - else - fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4; - - if (pipelined) { - int ret = intel_ring_begin(pipelined, 4); - if (ret) - return ret; - - intel_ring_emit(pipelined, MI_NOOP); - intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit(pipelined, fence_reg); - intel_ring_emit(pipelined, val); - intel_ring_advance(pipelined); + val = obj->gtt_offset; + if (obj->tiling_mode == I915_TILING_Y) + val |= 1 << I830_FENCE_TILING_Y_SHIFT; + val |= I915_FENCE_SIZE_BITS(size); + val |= pitch_val << I830_FENCE_PITCH_SHIFT; + val |= I830_FENCE_REG_VALID; } else - I915_WRITE(fence_reg, val); + val = 0; - return 0; + if (reg < 8) + reg = FENCE_REG_830_0 + reg * 4; + else + reg = FENCE_REG_945_8 + (reg - 8) * 4; + + I915_WRITE(reg, val); + POSTING_READ(reg); } -static int -i830_write_fence_reg(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *pipelined) +static void i830_write_fence_reg(struct drm_device *dev, int reg, + struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - u32 size = obj->gtt_space->size; - int regnum = obj->fence_reg; uint32_t val; - uint32_t pitch_val; - if ((obj->gtt_offset & ~I830_FENCE_START_MASK) || - (size & -size) != size || (obj->gtt_offset & (size - 1))) { - printf( -"object 0x%08x not 512K or pot-size 0x%08x aligned\n", - obj->gtt_offset, size); - return -EINVAL; - } + if (obj) { + u32 size = obj->gtt_space->size; + uint32_t pitch_val; - pitch_val = obj->stride / 128; - pitch_val = ffs(pitch_val) - 1; + if ((obj->gtt_offset & ~I830_FENCE_START_MASK) || + (size & -size) != size || + (obj->gtt_offset & (size - 1))) + printf( + "object 0x%08x not 512K or pot-size 0x%08x aligned\n", + obj->gtt_offset, size); - val = obj->gtt_offset; - if (obj->tiling_mode == I915_TILING_Y) - val |= 1 << I830_FENCE_TILING_Y_SHIFT; - val |= I830_FENCE_SIZE_BITS(size); - val |= pitch_val << I830_FENCE_PITCH_SHIFT; - val |= I830_FENCE_REG_VALID; + pitch_val = obj->stride / 128; + pitch_val = ffs(pitch_val) - 1; - if (pipelined) { - int ret = intel_ring_begin(pipelined, 4); - if (ret) - return ret; - - intel_ring_emit(pipelined, MI_NOOP); - intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4); - intel_ring_emit(pipelined, val); - intel_ring_advance(pipelined); + val = obj->gtt_offset; + if (obj->tiling_mode == I915_TILING_Y) + val |= 1 << I830_FENCE_TILING_Y_SHIFT; + val |= I830_FENCE_SIZE_BITS(size); + val |= pitch_val << I830_FENCE_PITCH_SHIFT; + val |= I830_FENCE_REG_VALID; } else - I915_WRITE(FENCE_REG_830_0 + regnum * 4, val); + val = 0; - return 0; + I915_WRITE(FENCE_REG_830_0 + reg * 4, val); + POSTING_READ(FENCE_REG_830_0 + reg * 4); } -static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno) +static void i915_gem_write_fence(struct drm_device *dev, int reg, + struct drm_i915_gem_object *obj) { - return i915_seqno_passed(ring->get_seqno(ring), seqno); + switch (INTEL_INFO(dev)->gen) { + case 7: + case 6: sandybridge_write_fence_reg(dev, reg, obj); break; + case 5: + case 4: i965_write_fence_reg(dev, reg, obj); break; + case 3: i915_write_fence_reg(dev, reg, obj); break; + case 2: i830_write_fence_reg(dev, reg, obj); break; + default: break; + } } +static inline int fence_number(struct drm_i915_private *dev_priv, + struct drm_i915_fence_reg *fence) +{ + return fence - dev_priv->fence_regs; +} + +static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, + struct drm_i915_fence_reg *fence, + bool enable) +{ + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + int reg = fence_number(dev_priv, fence); + + i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); + + if (enable) { + obj->fence_reg = reg; + fence->obj = obj; + list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); + } else { + obj->fence_reg = I915_FENCE_REG_NONE; + fence->obj = NULL; + list_del_init(&fence->lru_list); + } +} + static int -i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *pipelined) +i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) { int ret; if (obj->fenced_gpu_access) { if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { - ret = i915_gem_flush_ring(obj->last_fenced_ring, 0, - obj->base.write_domain); + ret = i915_gem_flush_ring(obj->ring, + 0, obj->base.write_domain); if (ret) return ret; } obj->fenced_gpu_access = false; } - if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) { - if (!ring_passed_seqno(obj->last_fenced_ring, - obj->last_fenced_seqno)) { - ret = i915_wait_request(obj->last_fenced_ring, - obj->last_fenced_seqno, - true); - if (ret) - return ret; - } + if (obj->last_fenced_seqno) { + ret = i915_wait_request(obj->ring, + obj->last_fenced_seqno); + if (ret) + return ret; obj->last_fenced_seqno = 0; - obj->last_fenced_ring = NULL; } /* Ensure that all CPU reads are completed before installing a fence * and all writes before removing the fence. */ if (obj->base.read_domains & I915_GEM_DOMAIN_GTT) mb(); return 0; } int i915_gem_object_put_fence(struct drm_i915_gem_object *obj) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; int ret; - if (obj->tiling_mode) - i915_gem_release_mmap(obj); - - ret = i915_gem_object_flush_fence(obj, NULL); + ret = i915_gem_object_flush_fence(obj); if (ret) return ret; - if (obj->fence_reg != I915_FENCE_REG_NONE) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + if (obj->fence_reg == I915_FENCE_REG_NONE) + return 0; - if (dev_priv->fence_regs[obj->fence_reg].pin_count != 0) - printf("%s: pin_count %d\n", __func__, - dev_priv->fence_regs[obj->fence_reg].pin_count); - i915_gem_clear_fence_reg(obj->base.dev, - &dev_priv->fence_regs[obj->fence_reg]); + i915_gem_object_update_fence(obj, + &dev_priv->fence_regs[obj->fence_reg], + false); + i915_gem_object_fence_lost(obj); - obj->fence_reg = I915_FENCE_REG_NONE; - } - return 0; } static struct drm_i915_fence_reg * -i915_find_fence_reg(struct drm_device *dev, struct intel_ring_buffer *pipelined) +i915_find_fence_reg(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_fence_reg *reg, *first, *avail; + struct drm_i915_fence_reg *reg, *avail; int i; /* First try to find a free reg */ avail = NULL; for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { reg = &dev_priv->fence_regs[i]; if (!reg->obj) return reg; if (!reg->pin_count) avail = reg; } if (avail == NULL) return NULL; /* None available, try to steal one or wait for a user to finish */ - avail = first = NULL; list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { if (reg->pin_count) continue; - if (first == NULL) - first = reg; - - if (!pipelined || - !reg->obj->last_fenced_ring || - reg->obj->last_fenced_ring == pipelined) { - avail = reg; - break; - } + return reg; } - if (avail == NULL) - avail = first; - - return avail; + return NULL; } int -i915_gem_object_get_fence(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *pipelined) +i915_gem_object_get_fence(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + bool enable = obj->tiling_mode != I915_TILING_NONE; struct drm_i915_fence_reg *reg; int ret; - pipelined = NULL; + /* Have we updated the tiling parameters upon the object and so + * will need to serialise the write to the associated fence register? + */ + if (obj->fence_dirty) { + ret = i915_gem_object_flush_fence(obj); + if (ret) + return ret; + } + ret = 0; if (obj->fence_reg != I915_FENCE_REG_NONE) { reg = &dev_priv->fence_regs[obj->fence_reg]; - list_move_tail(®->lru_list, &dev_priv->mm.fence_list); - - if (obj->tiling_changed) { - ret = i915_gem_object_flush_fence(obj, pipelined); - if (ret) - return ret; - - if (!obj->fenced_gpu_access && !obj->last_fenced_seqno) - pipelined = NULL; - - if (pipelined) { - reg->setup_seqno = - i915_gem_next_request_seqno(pipelined); - obj->last_fenced_seqno = reg->setup_seqno; - obj->last_fenced_ring = pipelined; - } - - goto update; + if (!obj->fence_dirty) { + list_move_tail(®->lru_list, + &dev_priv->mm.fence_list); + return 0; } + } else if (enable) { + reg = i915_find_fence_reg(dev); + if (reg == NULL) + return -EDEADLK; - if (!pipelined) { - if (reg->setup_seqno) { - if (!ring_passed_seqno(obj->last_fenced_ring, - reg->setup_seqno)) { - ret = i915_wait_request( - obj->last_fenced_ring, - reg->setup_seqno, - true); - if (ret) - return ret; - } + if (reg->obj) { + struct drm_i915_gem_object *old = reg->obj; - reg->setup_seqno = 0; - } - } else if (obj->last_fenced_ring && - obj->last_fenced_ring != pipelined) { - ret = i915_gem_object_flush_fence(obj, pipelined); + ret = i915_gem_object_flush_fence(old); if (ret) return ret; - } - if (!obj->fenced_gpu_access && !obj->last_fenced_seqno) - pipelined = NULL; - KASSERT(pipelined || reg->setup_seqno == 0, ("!pipelined")); - - if (obj->tiling_changed) { - if (pipelined) { - reg->setup_seqno = - i915_gem_next_request_seqno(pipelined); - obj->last_fenced_seqno = reg->setup_seqno; - obj->last_fenced_ring = pipelined; - } - goto update; + i915_gem_object_fence_lost(old); } - + } else return 0; - } - reg = i915_find_fence_reg(dev, pipelined); - if (reg == NULL) - return -EDEADLK; + i915_gem_object_update_fence(obj, reg, enable); + obj->fence_dirty = false; - ret = i915_gem_object_flush_fence(obj, pipelined); - if (ret) - return ret; - - if (reg->obj) { - struct drm_i915_gem_object *old = reg->obj; - - drm_gem_object_reference(&old->base); - - if (old->tiling_mode) - i915_gem_release_mmap(old); - - ret = i915_gem_object_flush_fence(old, pipelined); - if (ret) { - drm_gem_object_unreference(&old->base); - return ret; - } - - if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0) - pipelined = NULL; - - old->fence_reg = I915_FENCE_REG_NONE; - old->last_fenced_ring = pipelined; - old->last_fenced_seqno = - pipelined ? i915_gem_next_request_seqno(pipelined) : 0; - - drm_gem_object_unreference(&old->base); - } else if (obj->last_fenced_seqno == 0) - pipelined = NULL; - - reg->obj = obj; - list_move_tail(®->lru_list, &dev_priv->mm.fence_list); - obj->fence_reg = reg - dev_priv->fence_regs; - obj->last_fenced_ring = pipelined; - - reg->setup_seqno = - pipelined ? i915_gem_next_request_seqno(pipelined) : 0; - obj->last_fenced_seqno = reg->setup_seqno; - -update: - obj->tiling_changed = false; - switch (INTEL_INFO(dev)->gen) { - case 7: - case 6: - ret = sandybridge_write_fence_reg(obj, pipelined); - break; - case 5: - case 4: - ret = i965_write_fence_reg(obj, pipelined); - break; - case 3: - ret = i915_write_fence_reg(obj, pipelined); - break; - case 2: - ret = i830_write_fence_reg(obj, pipelined); - break; - } - - return ret; + return 0; } -static void -i915_gem_clear_fence_reg(struct drm_device *dev, struct drm_i915_fence_reg *reg) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - uint32_t fence_reg = reg - dev_priv->fence_regs; - - switch (INTEL_INFO(dev)->gen) { - case 7: - case 6: - I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0); - break; - case 5: - case 4: - I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0); - break; - case 3: - if (fence_reg >= 8) - fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4; - else - case 2: - fence_reg = FENCE_REG_830_0 + fence_reg * 4; - - I915_WRITE(fence_reg, 0); - break; - } - - list_del_init(®->lru_list); - reg->obj = NULL; - reg->setup_seqno = 0; - reg->pin_count = 0; -} - int i915_gem_init_object(struct drm_gem_object *obj) { printf("i915_gem_init_object called\n"); return (0); } static bool i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) { - return (obj->gtt_space && !obj->active && obj->pin_count == 0); + return !obj->active; } static void i915_gem_retire_task_handler(void *arg, int pending) { drm_i915_private_t *dev_priv; struct drm_device *dev; + struct intel_ring_buffer *ring; bool idle; int i; dev_priv = arg; dev = dev_priv->dev; /* Come back later if the device is busy... */ if (!sx_try_xlock(&dev->dev_struct_lock)) { taskqueue_enqueue_timeout(dev_priv->tq, &dev_priv->mm.retire_task, hz); return; } CTR0(KTR_DRM, "retire_task"); i915_gem_retire_requests(dev); /* Send a periodic flush down the ring so we don't hold onto GEM * objects indefinitely. */ idle = true; - for (i = 0; i < I915_NUM_RINGS; i++) { + for_each_ring(ring, dev_priv, i) { struct intel_ring_buffer *ring = &dev_priv->rings[i]; if (!list_empty(&ring->gpu_write_list)) { struct drm_i915_gem_request *request; int ret; ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS); request = malloc(sizeof(*request), DRM_I915_GEM, M_WAITOK | M_ZERO); if (ret || request == NULL || i915_add_request(ring, NULL, request)) free(request, DRM_I915_GEM); } idle &= list_empty(&ring->request_list); } if (!dev_priv->mm.suspended && !idle) taskqueue_enqueue_timeout(dev_priv->tq, &dev_priv->mm.retire_task, hz); DRM_UNLOCK(dev); } void i915_gem_lastclose(struct drm_device *dev) { int ret; if (drm_core_check_feature(dev, DRIVER_MODESET)) return; ret = i915_gem_idle(dev); if (ret != 0) DRM_ERROR("failed to idle hardware: %d\n", ret); } static int i915_gem_init_phys_object(struct drm_device *dev, int id, int size, int align) { drm_i915_private_t *dev_priv; struct drm_i915_gem_phys_object *phys_obj; int ret; dev_priv = dev->dev_private; if (dev_priv->mm.phys_objs[id - 1] != NULL || size == 0) return (0); phys_obj = malloc(sizeof(struct drm_i915_gem_phys_object), DRM_I915_GEM, M_WAITOK | M_ZERO); phys_obj->id = id; phys_obj->handle = drm_pci_alloc(dev, size, align, ~0); if (phys_obj->handle == NULL) { ret = -ENOMEM; goto free_obj; } pmap_change_attr((vm_offset_t)phys_obj->handle->vaddr, size / PAGE_SIZE, PAT_WRITE_COMBINING); dev_priv->mm.phys_objs[id - 1] = phys_obj; return (0); free_obj: free(phys_obj, DRM_I915_GEM); return (ret); } static void i915_gem_free_phys_object(struct drm_device *dev, int id) { drm_i915_private_t *dev_priv; struct drm_i915_gem_phys_object *phys_obj; dev_priv = dev->dev_private; if (dev_priv->mm.phys_objs[id - 1] == NULL) return; phys_obj = dev_priv->mm.phys_objs[id - 1]; if (phys_obj->cur_obj != NULL) i915_gem_detach_phys_object(dev, phys_obj->cur_obj); drm_pci_free(dev, phys_obj->handle); free(phys_obj, DRM_I915_GEM); dev_priv->mm.phys_objs[id - 1] = NULL; } void i915_gem_free_all_phys_object(struct drm_device *dev) { int i; for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++) i915_gem_free_phys_object(dev, i); } void i915_gem_detach_phys_object(struct drm_device *dev, struct drm_i915_gem_object *obj) { vm_page_t m; struct sf_buf *sf; char *vaddr, *dst; int i, page_count; if (obj->phys_obj == NULL) return; vaddr = obj->phys_obj->handle->vaddr; page_count = obj->base.size / PAGE_SIZE; VM_OBJECT_WLOCK(obj->base.vm_obj); for (i = 0; i < page_count; i++) { - m = i915_gem_wire_page(obj->base.vm_obj, i); + m = i915_gem_wire_page(obj->base.vm_obj, i, NULL); if (m == NULL) continue; /* XXX */ VM_OBJECT_WUNLOCK(obj->base.vm_obj); sf = sf_buf_alloc(m, 0); if (sf != NULL) { dst = (char *)sf_buf_kva(sf); memcpy(dst, vaddr + IDX_TO_OFF(i), PAGE_SIZE); sf_buf_free(sf); } drm_clflush_pages(&m, 1); VM_OBJECT_WLOCK(obj->base.vm_obj); vm_page_reference(m); vm_page_lock(m); vm_page_dirty(m); vm_page_unwire(m, PQ_INACTIVE); vm_page_unlock(m); atomic_add_long(&i915_gem_wired_pages_cnt, -1); } VM_OBJECT_WUNLOCK(obj->base.vm_obj); intel_gtt_chipset_flush(); obj->phys_obj->cur_obj = NULL; obj->phys_obj = NULL; } int i915_gem_attach_phys_object(struct drm_device *dev, struct drm_i915_gem_object *obj, int id, int align) { drm_i915_private_t *dev_priv; vm_page_t m; struct sf_buf *sf; char *dst, *src; int i, page_count, ret; if (id > I915_MAX_PHYS_OBJECT) return (-EINVAL); if (obj->phys_obj != NULL) { if (obj->phys_obj->id == id) return (0); i915_gem_detach_phys_object(dev, obj); } dev_priv = dev->dev_private; if (dev_priv->mm.phys_objs[id - 1] == NULL) { ret = i915_gem_init_phys_object(dev, id, obj->base.size, align); if (ret != 0) { DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->base.size); return (ret); } } /* bind to the object */ obj->phys_obj = dev_priv->mm.phys_objs[id - 1]; obj->phys_obj->cur_obj = obj; page_count = obj->base.size / PAGE_SIZE; VM_OBJECT_WLOCK(obj->base.vm_obj); ret = 0; for (i = 0; i < page_count; i++) { - m = i915_gem_wire_page(obj->base.vm_obj, i); + m = i915_gem_wire_page(obj->base.vm_obj, i, NULL); if (m == NULL) { ret = -EIO; break; } VM_OBJECT_WUNLOCK(obj->base.vm_obj); sf = sf_buf_alloc(m, 0); src = (char *)sf_buf_kva(sf); dst = (char *)obj->phys_obj->handle->vaddr + IDX_TO_OFF(i); memcpy(dst, src, PAGE_SIZE); sf_buf_free(sf); VM_OBJECT_WLOCK(obj->base.vm_obj); vm_page_reference(m); vm_page_lock(m); vm_page_unwire(m, PQ_INACTIVE); vm_page_unlock(m); atomic_add_long(&i915_gem_wired_pages_cnt, -1); } VM_OBJECT_WUNLOCK(obj->base.vm_obj); return (0); } static int -i915_gem_phys_pwrite(struct drm_device *dev, struct drm_i915_gem_object *obj, - uint64_t data_ptr, uint64_t offset, uint64_t size, - struct drm_file *file_priv) -{ - char *user_data, *vaddr; - int ret; - - vaddr = (char *)obj->phys_obj->handle->vaddr + offset; - user_data = (char *)(uintptr_t)data_ptr; - - if (copyin_nofault(user_data, vaddr, size) != 0) { - /* The physical object once assigned is fixed for the lifetime - * of the obj, so we can safely drop the lock and continue - * to access vaddr. - */ - DRM_UNLOCK(dev); - ret = -copyin(user_data, vaddr, size); - DRM_LOCK(dev); - if (ret != 0) - return (ret); - } - - intel_gtt_chipset_flush(); - return (0); -} - -static int i915_gpu_is_active(struct drm_device *dev) { drm_i915_private_t *dev_priv; dev_priv = dev->dev_private; return (!list_empty(&dev_priv->mm.flushing_list) || !list_empty(&dev_priv->mm.active_list)); } static void i915_gem_lowmem(void *arg) { struct drm_device *dev; struct drm_i915_private *dev_priv; struct drm_i915_gem_object *obj, *next; int cnt, cnt_fail, cnt_total; dev = arg; dev_priv = dev->dev_private; if (!sx_try_xlock(&dev->dev_struct_lock)) return; CTR0(KTR_DRM, "gem_lowmem"); rescan: /* first scan for clean buffers */ i915_gem_retire_requests(dev); cnt_total = cnt_fail = cnt = 0; list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list) { if (i915_gem_object_is_purgeable(obj)) { if (i915_gem_object_unbind(obj) != 0) cnt_total++; } else cnt_total++; } /* second pass, evict/count anything still on the inactive list */ list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list) { if (i915_gem_object_unbind(obj) == 0) cnt++; else cnt_fail++; } if (cnt_fail > cnt_total / 100 && i915_gpu_is_active(dev)) { /* * We are desperate for pages, so as a last resort, wait * for the GPU to finish and discard whatever we can. * This has a dramatic impact to reduce the number of * OOM-killer events whilst running the GPU aggressively. */ - if (i915_gpu_idle(dev, true) == 0) + if (i915_gpu_idle(dev) == 0) goto rescan; } DRM_UNLOCK(dev); } void i915_gem_unload(struct drm_device *dev) { struct drm_i915_private *dev_priv; dev_priv = dev->dev_private; EVENTHANDLER_DEREGISTER(vm_lowmem, dev_priv->mm.i915_lowmem); } Index: head/sys/dev/drm2/i915/i915_gem_context.c =================================================================== --- head/sys/dev/drm2/i915/i915_gem_context.c (revision 277486) +++ head/sys/dev/drm2/i915/i915_gem_context.c (revision 277487) @@ -1,549 +1,549 @@ /* * Copyright © 2011-2012 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Authors: * Ben Widawsky * */ /* * This file implements HW context support. On gen5+ a HW context consists of an * opaque GPU object which is referenced at times of context saves and restores. * With RC6 enabled, the context is also referenced as the GPU enters and exists * from RC6 (GPU has it's own internal power context, except on gen5). Though * something like a context does exist for the media ring, the code only * supports contexts for the render ring. * * In software, there is a distinction between contexts created by the user, * and the default HW context. The default HW context is used by GPU clients * that do not request setup of their own hardware context. The default * context's state is never restored to help prevent programming errors. This * would happen if a client ran and piggy-backed off another clients GPU state. * The default context only exists to give the GPU some offset to load as the * current to invoke a save of the context we actually care about. In fact, the * code could likely be constructed, albeit in a more complicated fashion, to * never use the default context, though that limits the driver's ability to * swap out, and/or destroy other contexts. * * All other contexts are created as a request by the GPU client. These contexts * store GPU state, and thus allow GPU clients to not re-emit state (and * potentially query certain state) at any time. The kernel driver makes * certain that the appropriate commands are inserted. * * The context life cycle is semi-complicated in that context BOs may live * longer than the context itself because of the way the hardware, and object * tracking works. Below is a very crude representation of the state machine * describing the context life. * refcount pincount active * S0: initial state 0 0 0 * S1: context created 1 0 0 * S2: context is currently running 2 1 X * S3: GPU referenced, but not current 2 0 1 * S4: context is current, but destroyed 1 1 0 * S5: like S3, but destroyed 1 0 1 * * The most common (but not all) transitions: * S0->S1: client creates a context * S1->S2: client submits execbuf with context * S2->S3: other clients submits execbuf with context * S3->S1: context object was retired * S3->S2: clients submits another execbuf * S2->S4: context destroy called with current context * S3->S5->S0: destroy path * S4->S5->S0: destroy path on current context * * There are two confusing terms used above: * The "current context" means the context which is currently running on the * GPU. The GPU has loaded it's state already and has stored away the gtt * offset of the BO. The GPU is not actively referencing the data at this * offset, but it will on the next context switch. The only way to avoid this * is to do a GPU reset. * * An "active context' is one which was previously the "current context" and is * on the active list waiting for the next context switch to occur. Until this * happens, the object must remain at the same gtt offset. It is therefore * possible to destroy a context, but it is still active. * */ #include __FBSDID("$FreeBSD$"); #include #include #include "i915_drv.h" /* This is a HW constraint. The value below is the largest known requirement * I've seen in a spec to date, and that was a workaround for a non-shipping * part. It should be safe to decrease this, but it's more future proof as is. */ #define CONTEXT_ALIGN (64<<10) static struct i915_hw_context * i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); static int do_switch(struct i915_hw_context *to); static int get_context_size(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; u32 reg; switch (INTEL_INFO(dev)->gen) { case 6: reg = I915_READ(CXT_SIZE); ret = GEN6_CXT_TOTAL_SIZE(reg) * 64; break; case 7: reg = I915_READ(GEN7_CXT_SIZE); #ifdef FREEBSD_WIP if (IS_HASWELL(dev)) ret = HSW_CXT_TOTAL_SIZE(reg) * 64; else #endif ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; break; default: panic("i915_gem_context: Unsupported Intel GPU generation %d", INTEL_INFO(dev)->gen); } return ret; } static void do_destroy(struct i915_hw_context *ctx) { #if defined(INVARIANTS) struct drm_device *dev = ctx->obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; #endif if (ctx->file_priv) drm_gem_names_remove(&ctx->file_priv->context_idr, ctx->id); else KASSERT(ctx == dev_priv->rings[RCS].default_context, ("i915_gem_context: ctx != default_context")); drm_gem_object_unreference(&ctx->obj->base); free(ctx, DRM_I915_GEM); } static int create_hw_context(struct drm_device *dev, struct drm_i915_file_private *file_priv, struct i915_hw_context **ret_ctx) { struct drm_i915_private *dev_priv = dev->dev_private; struct i915_hw_context *ctx; int ret, id; ctx = malloc(sizeof(*ctx), DRM_I915_GEM, M_NOWAIT | M_ZERO); if (ctx == NULL) return (-ENOMEM); ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size); if (ctx->obj == NULL) { free(ctx, DRM_I915_GEM); DRM_DEBUG_DRIVER("Context object allocated failed\n"); return (-ENOMEM); } if (INTEL_INFO(dev)->gen >= 7) { ret = i915_gem_object_set_cache_level(ctx->obj, I915_CACHE_LLC_MLC); if (ret) goto err_out; } /* The ring associated with the context object is handled by the normal * object tracking code. We give an initial ring value simple to pass an * assertion in the context switch code. */ ctx->ring = &dev_priv->rings[RCS]; /* Default context will never have a file_priv */ if (file_priv == NULL) { *ret_ctx = ctx; return (0); } ctx->file_priv = file_priv; again: id = 0; ret = drm_gem_name_create(&file_priv->context_idr, ctx, &id); if (ret == 0) ctx->id = id; if (ret == -EAGAIN) goto again; else if (ret) goto err_out; *ret_ctx = ctx; return (0); err_out: do_destroy(ctx); return (ret); } static inline bool is_default_context(struct i915_hw_context *ctx) { return (ctx == ctx->ring->default_context); } /** * The default context needs to exist per ring that uses contexts. It stores the * context state of the GPU for applications that don't utilize HW contexts, as * well as an idle case. */ static int create_default_context(struct drm_i915_private *dev_priv) { struct i915_hw_context *ctx; int ret; DRM_LOCK_ASSERT(dev_priv->dev); ret = create_hw_context(dev_priv->dev, NULL, &ctx); if (ret != 0) return (ret); /* We may need to do things with the shrinker which require us to * immediately switch back to the default context. This can cause a * problem as pinning the default context also requires GTT space which * may not be available. To avoid this we always pin the * default context. */ dev_priv->rings[RCS].default_context = ctx; ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false); if (ret) goto err_destroy; ret = do_switch(ctx); if (ret) goto err_unpin; DRM_DEBUG_DRIVER("Default HW context loaded\n"); return 0; err_unpin: i915_gem_object_unpin(ctx->obj); err_destroy: do_destroy(ctx); return ret; } void i915_gem_context_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t ctx_size; if (!HAS_HW_CONTEXTS(dev)) { dev_priv->hw_contexts_disabled = true; return; } /* If called from reset, or thaw... we've been here already */ if (dev_priv->hw_contexts_disabled || dev_priv->rings[RCS].default_context) return; ctx_size = get_context_size(dev); dev_priv->hw_context_size = get_context_size(dev); dev_priv->hw_context_size = roundup(dev_priv->hw_context_size, 4096); if (ctx_size <= 0 || ctx_size > (1<<20)) { dev_priv->hw_contexts_disabled = true; return; } if (create_default_context(dev_priv)) { dev_priv->hw_contexts_disabled = true; return; } DRM_DEBUG_DRIVER("HW context support initialized\n"); } void i915_gem_context_fini(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; if (dev_priv->hw_contexts_disabled) return; /* The only known way to stop the gpu from accessing the hw context is * to reset it. Do this as the very last operation to avoid confusing * other code, leading to spurious errors. */ intel_gpu_reset(dev); i915_gem_object_unpin(dev_priv->rings[RCS].default_context->obj); do_destroy(dev_priv->rings[RCS].default_context); } static int context_idr_cleanup(uint32_t id, void *p, void *data) { struct i915_hw_context *ctx = p; KASSERT(id != DEFAULT_CONTEXT_ID, ("i915_gem_context: id == DEFAULT_CONTEXT_ID in cleanup")); do_destroy(ctx); return 0; } void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; //DRM_LOCK(dev); /* Called from preclose(), the lock is already owned. */ drm_gem_names_foreach(&file_priv->context_idr, context_idr_cleanup, NULL); drm_gem_names_fini(&file_priv->context_idr); //DRM_UNLOCK(dev); } static struct i915_hw_context * i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id) { return (struct i915_hw_context *)drm_gem_find_ptr(&file_priv->context_idr, id); } static inline int mi_set_context(struct intel_ring_buffer *ring, struct i915_hw_context *new_context, u32 hw_flags) { int ret; /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value * explicitly, so we rely on the value at ring init, stored in * itlb_before_ctx_switch. */ if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) { ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0); if (ret) return ret; } ret = intel_ring_begin(ring, 6); if (ret) return ret; if (IS_GEN7(ring->dev)) intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE); else intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_SET_CONTEXT); intel_ring_emit(ring, new_context->obj->gtt_offset | MI_MM_SPACE_GTT | MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN | hw_flags); /* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP */ intel_ring_emit(ring, MI_NOOP); if (IS_GEN7(ring->dev)) intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE); else intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); return ret; } static int do_switch(struct i915_hw_context *to) { struct intel_ring_buffer *ring = to->ring; struct drm_i915_gem_object *from_obj = ring->last_context_obj; u32 hw_flags = 0; int ret; KASSERT(!(from_obj != NULL && from_obj->pin_count == 0), ("i915_gem_context: invalid \"from\" context")); if (from_obj == to->obj) return 0; ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false); if (ret) return ret; /* Clear this page out of any CPU caches for coherent swap-in/out. Note * that thanks to write = false in this call and us not setting any gpu * write domains when putting a context object onto the active list * (when switching away from it), this won't block. * XXX: We need a real interface to do this instead of trickery. */ ret = i915_gem_object_set_to_gtt_domain(to->obj, false); if (ret) { i915_gem_object_unpin(to->obj); return ret; } if (!to->obj->has_global_gtt_mapping) - i915_gem_gtt_bind_object(to->obj); + i915_gem_gtt_bind_object(to->obj, to->obj->cache_level); if (!to->is_initialized || is_default_context(to)) hw_flags |= MI_RESTORE_INHIBIT; else if (from_obj == to->obj) /* not yet expected */ hw_flags |= MI_FORCE_RESTORE; ret = mi_set_context(ring, to, hw_flags); if (ret) { i915_gem_object_unpin(to->obj); return ret; } /* The backing object for the context is done after switching to the * *next* context. Therefore we cannot retire the previous context until * the next context has already started running. In fact, the below code * is a bit suboptimal because the retiring can occur simply after the * MI_SET_CONTEXT instead of when the next seqno has completed. */ if (from_obj != NULL) { from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; i915_gem_object_move_to_active(from_obj, ring, i915_gem_next_request_seqno(ring)); /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the * whole damn pipeline, we don't need to explicitly mark the * object dirty. The only exception is that the context must be * correct in case the object gets swapped out. Ideally we'd be * able to defer doing this until we know the object would be * swapped, but there is no way to do that yet. */ from_obj->dirty = 1; KASSERT(from_obj->ring == ring, ("i915_gem_context: from_ring != ring")); i915_gem_object_unpin(from_obj); drm_gem_object_unreference(&from_obj->base); } drm_gem_object_reference(&to->obj->base); ring->last_context_obj = to->obj; to->is_initialized = true; return 0; } /** * i915_switch_context() - perform a GPU context switch. * @ring: ring for which we'll execute the context switch * @file_priv: file_priv associated with the context, may be NULL * @id: context id number * @seqno: sequence number by which the new context will be switched to * @flags: * * The context life cycle is simple. The context refcount is incremented and * decremented by 1 and create and destroy. If the context is in use by the GPU, * it will have a refoucnt > 1. This allows us to destroy the context abstract * object while letting the normal object tracking destroy the backing BO. */ int i915_switch_context(struct intel_ring_buffer *ring, struct drm_file *file, int to_id) { struct drm_i915_private *dev_priv = ring->dev->dev_private; struct i915_hw_context *to; if (dev_priv->hw_contexts_disabled) return 0; if (ring != &dev_priv->rings[RCS]) return 0; if (to_id == DEFAULT_CONTEXT_ID) { to = ring->default_context; } else { if (file == NULL) return -EINVAL; to = i915_gem_context_get(file->driver_priv, to_id); if (to == NULL) return -ENOENT; } return do_switch(to); } int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_context_create *args = data; struct drm_i915_file_private *file_priv = file->driver_priv; struct i915_hw_context *ctx; int ret; if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; if (dev_priv->hw_contexts_disabled) return -ENODEV; ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; ret = create_hw_context(dev, file_priv, &ctx); DRM_UNLOCK(dev); if (ret != 0) return (ret); args->ctx_id = ctx->id; DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id); return 0; } int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_context_destroy *args = data; struct drm_i915_file_private *file_priv = file->driver_priv; struct i915_hw_context *ctx; int ret; if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; ctx = i915_gem_context_get(file_priv, args->ctx_id); if (!ctx) { DRM_UNLOCK(dev); return -ENOENT; } do_destroy(ctx); DRM_UNLOCK(dev); DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id); return 0; } Index: head/sys/dev/drm2/i915/i915_gem_evict.c =================================================================== --- head/sys/dev/drm2/i915/i915_gem_evict.c (revision 277486) +++ head/sys/dev/drm2/i915/i915_gem_evict.c (revision 277487) @@ -1,213 +1,207 @@ /* * Copyright © 2008-2010 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Authors: * Eric Anholt * Chris Wilson * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include static bool mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) { + if (obj->pin_count) + return false; + list_add(&obj->exec_list, unwind); return drm_mm_scan_add_block(obj->gtt_space); } int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment, bool mappable) { drm_i915_private_t *dev_priv = dev->dev_private; struct list_head eviction_list, unwind_list; struct drm_i915_gem_object *obj; int ret = 0; CTR4(KTR_DRM, "evict_something %p %d %u %d", dev, min_size, alignment, mappable); /* * The goal is to evict objects and amalgamate space in LRU order. * The oldest idle objects reside on the inactive list, which is in * retirement order. The next objects to retire are those on the (per * ring) active list that do not have an outstanding flush. Once the * hardware reports completion (the seqno is updated after the * batchbuffer has been finished) the clean buffer objects would * be retired to the inactive list. Any dirty objects would be added * to the tail of the flushing list. So after processing the clean * active objects we need to emit a MI_FLUSH to retire the flushing * list, hence the retirement order of the flushing list is in * advance of the dirty objects on the active lists. * * The retirement sequence is thus: * 1. Inactive objects (already retired) * 2. Clean active objects * 3. Flushing list * 4. Dirty active objects. * * On each list, the oldest objects lie at the HEAD with the freshest * object on the TAIL. */ INIT_LIST_HEAD(&unwind_list); if (mappable) drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size, alignment, 0, dev_priv->mm.gtt_mappable_end); else drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); /* First see if there is a large enough contiguous idle region... */ list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { if (mark_free(obj, &unwind_list)) goto found; } /* Now merge in the soon-to-be-expired objects... */ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { /* Does the object require an outstanding flush? */ - if (obj->base.write_domain || obj->pin_count) + if (obj->base.write_domain) continue; if (mark_free(obj, &unwind_list)) goto found; } /* Finally add anything with a pending flush (in order of retirement) */ list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) { - if (obj->pin_count) - continue; - if (mark_free(obj, &unwind_list)) goto found; } list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { - if (!obj->base.write_domain || obj->pin_count) + if (!obj->base.write_domain) continue; if (mark_free(obj, &unwind_list)) goto found; } /* Nothing found, clean up and bail out! */ while (!list_empty(&unwind_list)) { obj = list_first_entry(&unwind_list, struct drm_i915_gem_object, exec_list); ret = drm_mm_scan_remove_block(obj->gtt_space); KASSERT(ret == 0, ("drm_mm_scan_remove_block failed %d", ret)); list_del_init(&obj->exec_list); } /* We expect the caller to unpin, evict all and try again, or give up. * So calling i915_gem_evict_everything() is unnecessary. */ return -ENOSPC; found: /* drm_mm doesn't allow any other other operations while * scanning, therefore store to be evicted objects on a * temporary list. */ INIT_LIST_HEAD(&eviction_list); while (!list_empty(&unwind_list)) { obj = list_first_entry(&unwind_list, struct drm_i915_gem_object, exec_list); if (drm_mm_scan_remove_block(obj->gtt_space)) { list_move(&obj->exec_list, &eviction_list); drm_gem_object_reference(&obj->base); continue; } list_del_init(&obj->exec_list); } /* Unbinding will emit any required flushes */ while (!list_empty(&eviction_list)) { obj = list_first_entry(&eviction_list, struct drm_i915_gem_object, exec_list); if (ret == 0) ret = i915_gem_object_unbind(obj); list_del_init(&obj->exec_list); drm_gem_object_unreference(&obj->base); } return ret; } int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) { drm_i915_private_t *dev_priv = dev->dev_private; - int ret; + struct drm_i915_gem_object *obj, *next; bool lists_empty; + int ret; lists_empty = (list_empty(&dev_priv->mm.inactive_list) && list_empty(&dev_priv->mm.flushing_list) && list_empty(&dev_priv->mm.active_list)); if (lists_empty) return -ENOSPC; CTR2(KTR_DRM, "evict_everything %p %d", dev, purgeable_only); - /* Flush everything (on to the inactive lists) and evict */ - ret = i915_gpu_idle(dev, true); + /* The gpu_idle will flush everything in the write domain to the + * active list. Then we must move everything off the active list + * with retire requests. + */ + ret = i915_gpu_idle(dev); if (ret) return ret; + i915_gem_retire_requests(dev); + KASSERT(list_empty(&dev_priv->mm.flushing_list), ("flush list not empty")); - return i915_gem_evict_inactive(dev, purgeable_only); -} - -/** Unbinds all inactive objects. */ -int -i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj, *next; - - CTR2(KTR_DRM, "evict_inactive %p %d", dev, purgeable_only); - + /* Having flushed everything, unbind() should never raise an error */ list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list) { if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) { - int ret = i915_gem_object_unbind(obj); - if (ret) - return ret; + if (obj->pin_count == 0) + i915_gem_object_unbind(obj); } } return 0; } Index: head/sys/dev/drm2/i915/i915_gem_execbuffer.c =================================================================== --- head/sys/dev/drm2/i915/i915_gem_execbuffer.c (revision 277486) +++ head/sys/dev/drm2/i915/i915_gem_execbuffer.c (revision 277487) @@ -1,1544 +1,1539 @@ /* * Copyright © 2008,2010 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Authors: * Eric Anholt * Chris Wilson * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include struct change_domains { uint32_t invalidate_domains; uint32_t flush_domains; uint32_t flush_rings; uint32_t flips; }; /* * Set the next domain for the specified object. This * may not actually perform the necessary flushing/invaliding though, * as that may want to be batched with other set_domain operations * * This is (we hope) the only really tricky part of gem. The goal * is fairly simple -- track which caches hold bits of the object * and make sure they remain coherent. A few concrete examples may * help to explain how it works. For shorthand, we use the notation * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the * a pair of read and write domain masks. * * Case 1: the batch buffer * * 1. Allocated * 2. Written by CPU * 3. Mapped to GTT * 4. Read by GPU * 5. Unmapped from GTT * 6. Freed * * Let's take these a step at a time * * 1. Allocated * Pages allocated from the kernel may still have * cache contents, so we set them to (CPU, CPU) always. * 2. Written by CPU (using pwrite) * The pwrite function calls set_domain (CPU, CPU) and * this function does nothing (as nothing changes) * 3. Mapped by GTT * This function asserts that the object is not * currently in any GPU-based read or write domains * 4. Read by GPU * i915_gem_execbuffer calls set_domain (COMMAND, 0). * As write_domain is zero, this function adds in the * current read domains (CPU+COMMAND, 0). * flush_domains is set to CPU. * invalidate_domains is set to COMMAND * clflush is run to get data out of the CPU caches * then i915_dev_set_domain calls i915_gem_flush to * emit an MI_FLUSH and drm_agp_chipset_flush * 5. Unmapped from GTT * i915_gem_object_unbind calls set_domain (CPU, CPU) * flush_domains and invalidate_domains end up both zero * so no flushing/invalidating happens * 6. Freed * yay, done * * Case 2: The shared render buffer * * 1. Allocated * 2. Mapped to GTT * 3. Read/written by GPU * 4. set_domain to (CPU,CPU) * 5. Read/written by CPU * 6. Read/written by GPU * * 1. Allocated * Same as last example, (CPU, CPU) * 2. Mapped to GTT * Nothing changes (assertions find that it is not in the GPU) * 3. Read/written by GPU * execbuffer calls set_domain (RENDER, RENDER) * flush_domains gets CPU * invalidate_domains gets GPU * clflush (obj) * MI_FLUSH and drm_agp_chipset_flush * 4. set_domain (CPU, CPU) * flush_domains gets GPU * invalidate_domains gets CPU * wait_rendering (obj) to make sure all drawing is complete. * This will include an MI_FLUSH to get the data from GPU * to memory * clflush (obj) to invalidate the CPU cache * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?) * 5. Read/written by CPU * cache lines are loaded and dirtied * 6. Read written by GPU * Same as last GPU access * * Case 3: The constant buffer * * 1. Allocated * 2. Written by CPU * 3. Read by GPU * 4. Updated (written) by CPU again * 5. Read by GPU * * 1. Allocated * (CPU, CPU) * 2. Written by CPU * (CPU, CPU) * 3. Read by GPU * (CPU+RENDER, 0) * flush_domains = CPU * invalidate_domains = RENDER * clflush (obj) * MI_FLUSH * drm_agp_chipset_flush * 4. Updated (written) by CPU again * (CPU, CPU) * flush_domains = 0 (no previous write domain) * invalidate_domains = 0 (no new read domains) * 5. Read by GPU * (CPU+RENDER, 0) * flush_domains = CPU * invalidate_domains = RENDER * clflush (obj) * MI_FLUSH * drm_agp_chipset_flush */ static void i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, struct intel_ring_buffer *ring, struct change_domains *cd) { uint32_t invalidate_domains = 0, flush_domains = 0; /* * If the object isn't moving to a new write domain, * let the object stay in multiple read domains */ if (obj->base.pending_write_domain == 0) obj->base.pending_read_domains |= obj->base.read_domains; /* * Flush the current write domain if * the new read domains don't match. Invalidate * any read domains which differ from the old * write domain */ if (obj->base.write_domain && (((obj->base.write_domain != obj->base.pending_read_domains || obj->ring != ring)) || (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) { flush_domains |= obj->base.write_domain; invalidate_domains |= obj->base.pending_read_domains & ~obj->base.write_domain; } /* * Invalidate any read caches which may have * stale data. That is, any new read domains. */ invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains; if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) i915_gem_clflush_object(obj); if (obj->base.pending_write_domain) cd->flips |= atomic_load_acq_int(&obj->pending_flip); /* The actual obj->write_domain will be updated with * pending_write_domain after we emit the accumulated flush for all * of our domain changes in execbuffers (which clears objects' * write_domains). So if we have a current write domain that we * aren't changing, set pending_write_domain to that. */ if (flush_domains == 0 && obj->base.pending_write_domain == 0) obj->base.pending_write_domain = obj->base.write_domain; cd->invalidate_domains |= invalidate_domains; cd->flush_domains |= flush_domains; if (flush_domains & I915_GEM_GPU_DOMAINS) cd->flush_rings |= intel_ring_flag(obj->ring); if (invalidate_domains & I915_GEM_GPU_DOMAINS) cd->flush_rings |= intel_ring_flag(ring); } struct eb_objects { u_long hashmask; LIST_HEAD(, drm_i915_gem_object) *buckets; }; static struct eb_objects * eb_create(int size) { struct eb_objects *eb; eb = malloc(sizeof(*eb), DRM_I915_GEM, M_WAITOK | M_ZERO); eb->buckets = hashinit(size, DRM_I915_GEM, &eb->hashmask); return (eb); } static void eb_reset(struct eb_objects *eb) { int i; for (i = 0; i <= eb->hashmask; i++) LIST_INIT(&eb->buckets[i]); } static void eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj) { LIST_INSERT_HEAD(&eb->buckets[obj->exec_handle & eb->hashmask], obj, exec_node); } static struct drm_i915_gem_object * eb_get_object(struct eb_objects *eb, unsigned long handle) { struct drm_i915_gem_object *obj; LIST_FOREACH(obj, &eb->buckets[handle & eb->hashmask], exec_node) { if (obj->exec_handle == handle) return (obj); } return (NULL); } static void eb_destroy(struct eb_objects *eb) { free(eb->buckets, DRM_I915_GEM); free(eb, DRM_I915_GEM); } +static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) +{ + return (obj->base.write_domain == I915_GEM_DOMAIN_CPU || + obj->cache_level != I915_CACHE_NONE); +} + static int i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, struct eb_objects *eb, struct drm_i915_gem_relocation_entry *reloc) { struct drm_device *dev = obj->base.dev; struct drm_gem_object *target_obj; + struct drm_i915_gem_object *target_i915_obj; uint32_t target_offset; int ret = -EINVAL; /* we've already hold a reference to all valid objects */ target_obj = &eb_get_object(eb, reloc->target_handle)->base; if (unlikely(target_obj == NULL)) return -ENOENT; - target_offset = to_intel_bo(target_obj)->gtt_offset; + target_i915_obj = to_intel_bo(target_obj); + target_offset = target_i915_obj->gtt_offset; #if WATCH_RELOC DRM_INFO("%s: obj %p offset %08x target %d " "read %08x write %08x gtt %08x " "presumed %08x delta %08x\n", __func__, obj, (int) reloc->offset, (int) reloc->target_handle, (int) reloc->read_domains, (int) reloc->write_domain, (int) target_offset, (int) reloc->presumed_offset, reloc->delta); #endif /* The target buffer should have appeared before us in the * exec_object list, so it should have a GTT space bound by now. */ if (unlikely(target_offset == 0)) { DRM_DEBUG("No GTT space found for object %d\n", reloc->target_handle); return ret; } /* Validate that the target is in a valid r/w GPU domain */ if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { DRM_DEBUG("reloc with multiple write domains: " "obj %p target %d offset %d " "read %08x write %08x", obj, reloc->target_handle, (int) reloc->offset, reloc->read_domains, reloc->write_domain); return ret; } if (unlikely((reloc->write_domain | reloc->read_domains) & ~I915_GEM_GPU_DOMAINS)) { DRM_DEBUG("reloc with read/write non-GPU domains: " "obj %p target %d offset %d " "read %08x write %08x", obj, reloc->target_handle, (int) reloc->offset, reloc->read_domains, reloc->write_domain); return ret; } if (unlikely(reloc->write_domain && target_obj->pending_write_domain && reloc->write_domain != target_obj->pending_write_domain)) { DRM_DEBUG("Write domain conflict: " "obj %p target %d offset %d " "new %08x old %08x\n", obj, reloc->target_handle, (int) reloc->offset, reloc->write_domain, target_obj->pending_write_domain); return ret; } target_obj->pending_read_domains |= reloc->read_domains; target_obj->pending_write_domain |= reloc->write_domain; /* If the relocation already has the right value in it, no * more work needs to be done. */ if (target_offset == reloc->presumed_offset) return 0; /* Check that the relocation address is valid... */ if (unlikely(reloc->offset > obj->base.size - 4)) { DRM_DEBUG("Relocation beyond object bounds: " "obj %p target %d offset %d size %d.\n", obj, reloc->target_handle, (int) reloc->offset, (int) obj->base.size); return ret; } if (unlikely(reloc->offset & 3)) { DRM_DEBUG("Relocation not 4-byte aligned: " "obj %p target %d offset %d.\n", obj, reloc->target_handle, (int) reloc->offset); return ret; } + /* We can't wait for rendering with pagefaults disabled */ + if (obj->active && (curthread->td_pflags & TDP_NOFAULTING) != 0) + return (-EFAULT); + reloc->delta += target_offset; - if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { + if (use_cpu_reloc(obj)) { uint32_t page_offset = reloc->offset & PAGE_MASK; char *vaddr; struct sf_buf *sf; + ret = i915_gem_object_set_to_cpu_domain(obj, 1); + if (ret) + return ret; + sf = sf_buf_alloc(obj->pages[OFF_TO_IDX(reloc->offset)], SFB_NOWAIT); if (sf == NULL) return (-ENOMEM); vaddr = (void *)sf_buf_kva(sf); *(uint32_t *)(vaddr + page_offset) = reloc->delta; sf_buf_free(sf); } else { uint32_t *reloc_entry; char *reloc_page; - /* We can't wait for rendering with pagefaults disabled */ - if (obj->active && (curthread->td_pflags & TDP_NOFAULTING) != 0) - return (-EFAULT); - ret = i915_gem_object_set_to_gtt_domain(obj, 1); + ret = i915_gem_object_set_to_gtt_domain(obj, true); if (ret) return ret; + ret = i915_gem_object_put_fence(obj); + if (ret) + return ret; + /* * Map the page containing the relocation we're going * to perform. */ reloc->offset += obj->gtt_offset; reloc_page = pmap_mapdev_attr(dev->agp->base + (reloc->offset & ~PAGE_MASK), PAGE_SIZE, PAT_WRITE_COMBINING); reloc_entry = (uint32_t *)(reloc_page + (reloc->offset & PAGE_MASK)); *(volatile uint32_t *)reloc_entry = reloc->delta; pmap_unmapdev((vm_offset_t)reloc_page, PAGE_SIZE); } + /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and + * pipe_control writes because the gpu doesn't properly redirect them + * through the ppgtt for non_secure batchbuffers. */ + if (unlikely(IS_GEN6(dev) && + reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && + !target_i915_obj->has_global_gtt_mapping)) { + i915_gem_gtt_bind_object(target_i915_obj, + target_i915_obj->cache_level); + } + /* and update the user's relocation entry */ reloc->presumed_offset = target_offset; return 0; } static int i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, struct eb_objects *eb) { +#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) + struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)]; struct drm_i915_gem_relocation_entry *user_relocs; struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; - struct drm_i915_gem_relocation_entry reloc; - int i, ret; + int remain, ret; user_relocs = (void *)(uintptr_t)entry->relocs_ptr; - for (i = 0; i < entry->relocation_count; i++) { - ret = -copyin_nofault(user_relocs + i, &reloc, sizeof(reloc)); - if (ret != 0) - return (ret); + remain = entry->relocation_count; + while (remain) { + struct drm_i915_gem_relocation_entry *r = stack_reloc; + int count = remain; + if (count > DRM_ARRAY_SIZE(stack_reloc)) + count = DRM_ARRAY_SIZE(stack_reloc); + remain -= count; - ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc); + ret = -copyin_nofault(user_relocs, r, count*sizeof(r[0])); if (ret != 0) return (ret); - ret = -copyout_nofault(&reloc.presumed_offset, - &user_relocs[i].presumed_offset, - sizeof(reloc.presumed_offset)); - if (ret != 0) - return (ret); - } + do { + u64 offset = r->presumed_offset; + + ret = i915_gem_execbuffer_relocate_entry(obj, eb, r); + if (ret) + return ret; + if (r->presumed_offset != offset && + copyout_nofault(&r->presumed_offset, + &user_relocs->presumed_offset, + sizeof(r->presumed_offset))) { + return -EFAULT; + } + + user_relocs++; + r++; + } while (--count); + } +#undef N_RELOC return (0); } static int i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, struct eb_objects *eb, struct drm_i915_gem_relocation_entry *relocs) { const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; int i, ret; for (i = 0; i < entry->relocation_count; i++) { ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]); if (ret) return ret; } return 0; } static int i915_gem_execbuffer_relocate(struct drm_device *dev, struct eb_objects *eb, struct list_head *objects) { struct drm_i915_gem_object *obj; int ret, pflags; /* Try to move as many of the relocation targets off the active list * to avoid unnecessary fallbacks to the slow path, as we cannot wait * for the retirement with pagefaults disabled. */ i915_gem_retire_requests(dev); ret = 0; pflags = vm_fault_disable_pagefaults(); /* This is the fast path and we cannot handle a pagefault whilst * holding the device lock lest the user pass in the relocations * contained within a mmaped bo. For in such a case we, the page * fault handler would call i915_gem_fault() and we would try to * acquire the device lock again. Obviously this is bad. */ list_for_each_entry(obj, objects, exec_list) { ret = i915_gem_execbuffer_relocate_object(obj, eb); if (ret != 0) break; } vm_fault_enable_pagefaults(pflags); return (ret); } #define __EXEC_OBJECT_HAS_FENCE (1<<31) static int +need_reloc_mappable(struct drm_i915_gem_object *obj) +{ + struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; + return entry->relocation_count && !use_cpu_reloc(obj); +} + +static int pin_and_fence_object(struct drm_i915_gem_object *obj, struct intel_ring_buffer *ring) { struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; bool need_fence, need_mappable; int ret; need_fence = has_fenced_gpu_access && entry->flags & EXEC_OBJECT_NEEDS_FENCE && obj->tiling_mode != I915_TILING_NONE; - need_mappable = - entry->relocation_count ? true : need_fence; + need_mappable = need_fence || need_reloc_mappable(obj); ret = i915_gem_object_pin(obj, entry->alignment, need_mappable); if (ret) return ret; if (has_fenced_gpu_access) { if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { - if (obj->tiling_mode) { - ret = i915_gem_object_get_fence(obj, ring); - if (ret) - goto err_unpin; + ret = i915_gem_object_get_fence(obj); + if (ret) + goto err_unpin; + if (i915_gem_object_pin_fence(obj)) entry->flags |= __EXEC_OBJECT_HAS_FENCE; - i915_gem_object_pin_fence(obj); - } else { - ret = i915_gem_object_put_fence(obj); - if (ret) - goto err_unpin; - } + obj->pending_fenced_gpu_access = true; } } entry->offset = obj->gtt_offset; return 0; err_unpin: i915_gem_object_unpin(obj); return ret; } static int i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, struct drm_file *file, struct list_head *objects) { drm_i915_private_t *dev_priv; struct drm_i915_gem_object *obj; int ret, retry; bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; struct list_head ordered_objects; dev_priv = ring->dev->dev_private; INIT_LIST_HEAD(&ordered_objects); while (!list_empty(objects)) { struct drm_i915_gem_exec_object2 *entry; bool need_fence, need_mappable; obj = list_first_entry(objects, struct drm_i915_gem_object, exec_list); entry = obj->exec_entry; need_fence = has_fenced_gpu_access && entry->flags & EXEC_OBJECT_NEEDS_FENCE && obj->tiling_mode != I915_TILING_NONE; - need_mappable = - entry->relocation_count ? true : need_fence; + need_mappable = need_fence || need_reloc_mappable(obj); if (need_mappable) list_move(&obj->exec_list, &ordered_objects); else list_move_tail(&obj->exec_list, &ordered_objects); obj->base.pending_read_domains = 0; obj->base.pending_write_domain = 0; } list_splice(&ordered_objects, objects); /* Attempt to pin all of the buffers into the GTT. * This is done in 3 phases: * * 1a. Unbind all objects that do not match the GTT constraints for * the execbuffer (fenceable, mappable, alignment etc). * 1b. Increment pin count for already bound objects and obtain * a fence register if required. * 2. Bind new objects. * 3. Decrement pin count. * * This avoid unnecessary unbinding of later objects in order to makr * room for the earlier objects *unless* we need to defragment. */ retry = 0; do { ret = 0; /* Unbind any ill-fitting objects or pin. */ list_for_each_entry(obj, objects, exec_list) { struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; bool need_fence, need_mappable; if (!obj->gtt_space) continue; need_fence = has_fenced_gpu_access && entry->flags & EXEC_OBJECT_NEEDS_FENCE && obj->tiling_mode != I915_TILING_NONE; - need_mappable = - entry->relocation_count ? true : need_fence; + need_mappable = need_fence || need_reloc_mappable(obj); if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) || (need_mappable && !obj->map_and_fenceable)) ret = i915_gem_object_unbind(obj); else ret = pin_and_fence_object(obj, ring); if (ret) goto err; } /* Bind fresh objects */ list_for_each_entry(obj, objects, exec_list) { if (obj->gtt_space) continue; ret = pin_and_fence_object(obj, ring); if (ret) { int ret_ignore; /* This can potentially raise a harmless * -EINVAL if we failed to bind in the above * call. It cannot raise -EINTR since we know * that the bo is freshly bound and so will * not need to be flushed or waited upon. */ ret_ignore = i915_gem_object_unbind(obj); (void)ret_ignore; if (obj->gtt_space != NULL) printf("%s: gtt_space\n", __func__); break; } } /* Decrement pin count for bound objects */ list_for_each_entry(obj, objects, exec_list) { struct drm_i915_gem_exec_object2 *entry; if (!obj->gtt_space) continue; entry = obj->exec_entry; if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { i915_gem_object_unpin_fence(obj); entry->flags &= ~__EXEC_OBJECT_HAS_FENCE; } i915_gem_object_unpin(obj); /* ... and ensure ppgtt mapping exist if needed. */ if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) { i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, obj, obj->cache_level); obj->has_aliasing_ppgtt_mapping = 1; } } if (ret != -ENOSPC || retry > 1) return ret; /* First attempt, just clear anything that is purgeable. * Second attempt, clear the entire GTT. */ ret = i915_gem_evict_everything(ring->dev, retry == 0); if (ret) return ret; retry++; } while (1); err: list_for_each_entry_continue_reverse(obj, objects, exec_list) { struct drm_i915_gem_exec_object2 *entry; if (!obj->gtt_space) continue; entry = obj->exec_entry; if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { i915_gem_object_unpin_fence(obj); entry->flags &= ~__EXEC_OBJECT_HAS_FENCE; } i915_gem_object_unpin(obj); } return ret; } static int i915_gem_execbuffer_relocate_slow(struct drm_device *dev, struct drm_file *file, struct intel_ring_buffer *ring, struct list_head *objects, struct eb_objects *eb, struct drm_i915_gem_exec_object2 *exec, int count) { struct drm_i915_gem_relocation_entry *reloc; struct drm_i915_gem_object *obj; int *reloc_offset; int i, total, ret; /* We may process another execbuffer during the unlock... */ while (!list_empty(objects)) { obj = list_first_entry(objects, struct drm_i915_gem_object, exec_list); list_del_init(&obj->exec_list); drm_gem_object_unreference(&obj->base); } DRM_UNLOCK(dev); total = 0; for (i = 0; i < count; i++) total += exec[i].relocation_count; reloc_offset = malloc(count * sizeof(*reloc_offset), DRM_I915_GEM, M_WAITOK | M_ZERO); reloc = malloc(total * sizeof(*reloc), DRM_I915_GEM, M_WAITOK | M_ZERO); total = 0; for (i = 0; i < count; i++) { struct drm_i915_gem_relocation_entry *user_relocs; user_relocs = (void *)(uintptr_t)exec[i].relocs_ptr; ret = -copyin(user_relocs, reloc + total, exec[i].relocation_count * sizeof(*reloc)); if (ret != 0) { DRM_LOCK(dev); goto err; } reloc_offset[i] = total; total += exec[i].relocation_count; } ret = i915_mutex_lock_interruptible(dev); if (ret) { DRM_LOCK(dev); goto err; } /* reacquire the objects */ eb_reset(eb); for (i = 0; i < count; i++) { struct drm_i915_gem_object *obj; obj = to_intel_bo(drm_gem_object_lookup(dev, file, exec[i].handle)); if (&obj->base == NULL) { DRM_DEBUG("Invalid object handle %d at index %d\n", exec[i].handle, i); ret = -ENOENT; goto err; } list_add_tail(&obj->exec_list, objects); obj->exec_handle = exec[i].handle; obj->exec_entry = &exec[i]; eb_add_object(eb, obj); } ret = i915_gem_execbuffer_reserve(ring, file, objects); if (ret) goto err; list_for_each_entry(obj, objects, exec_list) { int offset = obj->exec_entry - exec; ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, reloc + reloc_offset[offset]); if (ret) goto err; } /* Leave the user relocations as are, this is the painfully slow path, * and we want to avoid the complication of dropping the lock whilst * having buffers reserved in the aperture and so causing spurious * ENOSPC for random operations. */ err: free(reloc, DRM_I915_GEM); free(reloc_offset, DRM_I915_GEM); return ret; } static int i915_gem_execbuffer_flush(struct drm_device *dev, uint32_t invalidate_domains, uint32_t flush_domains, uint32_t flush_rings) { drm_i915_private_t *dev_priv = dev->dev_private; int i, ret; if (flush_domains & I915_GEM_DOMAIN_CPU) intel_gtt_chipset_flush(); if (flush_domains & I915_GEM_DOMAIN_GTT) wmb(); if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { for (i = 0; i < I915_NUM_RINGS; i++) if (flush_rings & (1 << i)) { ret = i915_gem_flush_ring(&dev_priv->rings[i], invalidate_domains, flush_domains); if (ret) return ret; } } return 0; } -static bool -intel_enable_semaphores(struct drm_device *dev) -{ - if (INTEL_INFO(dev)->gen < 6) - return 0; - - if (i915_semaphores >= 0) - return i915_semaphores; - - /* Enable semaphores on SNB when IO remapping is off */ - if (INTEL_INFO(dev)->gen == 6) - return !intel_iommu_enabled; - - return 1; -} - static int -i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *to) -{ - struct intel_ring_buffer *from = obj->ring; - u32 seqno; - int ret, idx; - - if (from == NULL || to == from) - return 0; - - /* XXX gpu semaphores are implicated in various hard hangs on SNB */ - if (!intel_enable_semaphores(obj->base.dev)) - return i915_gem_object_wait_rendering(obj); - - idx = intel_ring_sync_index(from, to); - - seqno = obj->last_rendering_seqno; - if (seqno <= from->sync_seqno[idx]) - return 0; - - if (seqno == from->outstanding_lazy_request) { - struct drm_i915_gem_request *request; - - request = malloc(sizeof(*request), DRM_I915_GEM, - M_WAITOK | M_ZERO); - ret = i915_add_request(from, NULL, request); - if (ret) { - free(request, DRM_I915_GEM); - return ret; - } - - seqno = request->seqno; - } - - from->sync_seqno[idx] = seqno; - - return to->sync_to(to, from, seqno - 1); -} - -static int i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips) { u32 plane, flip_mask; int ret; /* Check for any pending flips. As we only maintain a flip queue depth * of 1, we can simply insert a WAIT for the next display flip prior * to executing the batch and avoid stalling the CPU. */ for (plane = 0; flips >> plane; plane++) { if (((flips >> plane) & 1) == 0) continue; if (plane) flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; else flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; ret = intel_ring_begin(ring, 2); if (ret) return ret; intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); } return 0; } static int i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, struct list_head *objects) { struct drm_i915_gem_object *obj; struct change_domains cd; int ret; memset(&cd, 0, sizeof(cd)); list_for_each_entry(obj, objects, exec_list) i915_gem_object_set_to_gpu_domain(obj, ring, &cd); if (cd.invalidate_domains | cd.flush_domains) { #if WATCH_EXEC DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", __func__, cd.invalidate_domains, cd.flush_domains); #endif ret = i915_gem_execbuffer_flush(ring->dev, cd.invalidate_domains, cd.flush_domains, cd.flush_rings); if (ret) return ret; } if (cd.flips) { ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips); if (ret) return ret; } list_for_each_entry(obj, objects, exec_list) { - ret = i915_gem_execbuffer_sync_rings(obj, ring); + ret = i915_gem_object_sync(obj, ring); if (ret) return ret; } return 0; } static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) { return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0; } static int validate_exec_list(struct drm_i915_gem_exec_object2 *exec, int count, vm_page_t ***map) { vm_page_t *ma; int i, length, page_count; /* XXXKIB various limits checking is missing there */ *map = malloc(count * sizeof(*ma), DRM_I915_GEM, M_WAITOK | M_ZERO); for (i = 0; i < count; i++) { /* First check for malicious input causing overflow */ if (exec[i].relocation_count > INT_MAX / sizeof(struct drm_i915_gem_relocation_entry)) return -EINVAL; length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry); if (length == 0) { (*map)[i] = NULL; continue; } /* * Since both start and end of the relocation region * may be not aligned on the page boundary, be * conservative and request a page slot for each * partial page. Thus +2. */ page_count = howmany(length, PAGE_SIZE) + 2; ma = (*map)[i] = malloc(page_count * sizeof(vm_page_t), DRM_I915_GEM, M_WAITOK | M_ZERO); if (vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map, exec[i].relocs_ptr, length, VM_PROT_READ | VM_PROT_WRITE, ma, page_count) == -1) { free(ma, DRM_I915_GEM); (*map)[i] = NULL; return (-EFAULT); } } return 0; } static void i915_gem_execbuffer_move_to_active(struct list_head *objects, struct intel_ring_buffer *ring, u32 seqno) { struct drm_i915_gem_object *obj; uint32_t old_read, old_write; list_for_each_entry(obj, objects, exec_list) { old_read = obj->base.read_domains; old_write = obj->base.write_domain; obj->base.read_domains = obj->base.pending_read_domains; obj->base.write_domain = obj->base.pending_write_domain; obj->fenced_gpu_access = obj->pending_fenced_gpu_access; i915_gem_object_move_to_active(obj, ring, seqno); if (obj->base.write_domain) { obj->dirty = 1; obj->pending_gpu_write = true; list_move_tail(&obj->gpu_write_list, &ring->gpu_write_list); - intel_mark_busy(ring->dev, obj); + if (obj->pin_count) /* check for potential scanout */ + intel_mark_busy(ring->dev, obj); } CTR3(KTR_DRM, "object_change_domain move_to_active %p %x %x", obj, old_read, old_write); } + + intel_mark_busy(ring->dev, NULL); } int i915_gem_sync_exec_requests; static void i915_gem_execbuffer_retire_commands(struct drm_device *dev, struct drm_file *file, struct intel_ring_buffer *ring) { struct drm_i915_gem_request *request; u32 invalidate; /* * Ensure that the commands in the batch buffer are * finished before the interrupt fires. * * The sampler always gets flushed on i965 (sigh). */ invalidate = I915_GEM_DOMAIN_COMMAND; if (INTEL_INFO(dev)->gen >= 4) invalidate |= I915_GEM_DOMAIN_SAMPLER; if (ring->flush(ring, invalidate, 0)) { i915_gem_next_request_seqno(ring); return; } /* Add a breadcrumb for the completion of the batch buffer */ request = malloc(sizeof(*request), DRM_I915_GEM, M_WAITOK | M_ZERO); if (request == NULL || i915_add_request(ring, file, request)) { i915_gem_next_request_seqno(ring); free(request, DRM_I915_GEM); - } else if (i915_gem_sync_exec_requests) - i915_wait_request(ring, request->seqno, true); + } else if (i915_gem_sync_exec_requests) { + i915_wait_request(ring, request->seqno); + i915_gem_retire_requests(dev); + } } static void i915_gem_fix_mi_batchbuffer_end(struct drm_i915_gem_object *batch_obj, uint32_t batch_start_offset, uint32_t batch_len) { char *mkva; uint64_t po_r, po_w; uint32_t cmd; po_r = batch_obj->base.dev->agp->base + batch_obj->gtt_offset + batch_start_offset + batch_len; if (batch_len > 0) po_r -= 4; mkva = pmap_mapdev_attr(trunc_page(po_r), 2 * PAGE_SIZE, PAT_WRITE_COMBINING); po_r &= PAGE_MASK; cmd = *(uint32_t *)(mkva + po_r); if (cmd != MI_BATCH_BUFFER_END) { /* * batch_len != 0 due to the check at the start of * i915_gem_do_execbuffer */ if (batch_obj->base.size > batch_start_offset + batch_len) { po_w = po_r + 4; /* DRM_DEBUG("batchbuffer does not end by MI_BATCH_BUFFER_END !\n"); */ } else { po_w = po_r; DRM_DEBUG("batchbuffer does not end by MI_BATCH_BUFFER_END, overwriting last bo cmd !\n"); } *(uint32_t *)(mkva + po_w) = MI_BATCH_BUFFER_END; } pmap_unmapdev((vm_offset_t)mkva, 2 * PAGE_SIZE); } int i915_fix_mi_batchbuffer_end = 0; static int i915_reset_gen7_sol_offsets(struct drm_device *dev, struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; int ret, i; if (!IS_GEN7(dev) || ring != &dev_priv->rings[RCS]) return 0; ret = intel_ring_begin(ring, 4 * 3); if (ret) return ret; for (i = 0; i < 4; i++) { intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i)); intel_ring_emit(ring, 0); } intel_ring_advance(ring); return 0; } static int i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_file *file, struct drm_i915_gem_execbuffer2 *args, struct drm_i915_gem_exec_object2 *exec) { drm_i915_private_t *dev_priv = dev->dev_private; struct list_head objects; struct eb_objects *eb; struct drm_i915_gem_object *batch_obj; struct drm_clip_rect *cliprects = NULL; struct intel_ring_buffer *ring; vm_page_t **relocs_ma; u32 ctx_id = i915_execbuffer2_get_context_id(*args); u32 exec_start, exec_len; u32 seqno; u32 mask; int ret, mode, i; if (!i915_gem_check_execbuffer(args)) { DRM_DEBUG("execbuf with invalid offset/length\n"); return -EINVAL; } if (args->batch_len == 0) return (0); ret = validate_exec_list(exec, args->buffer_count, &relocs_ma); if (ret != 0) goto pre_struct_lock_err; switch (args->flags & I915_EXEC_RING_MASK) { case I915_EXEC_DEFAULT: case I915_EXEC_RENDER: ring = &dev_priv->rings[RCS]; break; case I915_EXEC_BSD: - if (!HAS_BSD(dev)) { - DRM_DEBUG("execbuf with invalid ring (BSD)\n"); - return -EINVAL; - } ring = &dev_priv->rings[VCS]; if (ctx_id != 0) { DRM_DEBUG("Ring %s doesn't support contexts\n", ring->name); return -EPERM; } break; case I915_EXEC_BLT: - if (!HAS_BLT(dev)) { - DRM_DEBUG("execbuf with invalid ring (BLT)\n"); - return -EINVAL; - } ring = &dev_priv->rings[BCS]; if (ctx_id != 0) { DRM_DEBUG("Ring %s doesn't support contexts\n", ring->name); return -EPERM; } break; default: DRM_DEBUG("execbuf with unknown ring: %d\n", (int)(args->flags & I915_EXEC_RING_MASK)); ret = -EINVAL; goto pre_struct_lock_err; } + if (!intel_ring_initialized(ring)) { + DRM_DEBUG("execbuf with invalid ring: %d\n", + (int)(args->flags & I915_EXEC_RING_MASK)); + return -EINVAL; + } mode = args->flags & I915_EXEC_CONSTANTS_MASK; mask = I915_EXEC_CONSTANTS_MASK; switch (mode) { case I915_EXEC_CONSTANTS_REL_GENERAL: case I915_EXEC_CONSTANTS_ABSOLUTE: case I915_EXEC_CONSTANTS_REL_SURFACE: if (ring == &dev_priv->rings[RCS] && mode != dev_priv->relative_constants_mode) { if (INTEL_INFO(dev)->gen < 4) { ret = -EINVAL; goto pre_struct_lock_err; } if (INTEL_INFO(dev)->gen > 5 && mode == I915_EXEC_CONSTANTS_REL_SURFACE) { ret = -EINVAL; goto pre_struct_lock_err; } /* The HW changed the meaning on this bit on gen6 */ if (INTEL_INFO(dev)->gen >= 6) mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE; } break; default: DRM_DEBUG("execbuf with unknown constants: %d\n", mode); ret = -EINVAL; goto pre_struct_lock_err; } if (args->buffer_count < 1) { DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); ret = -EINVAL; goto pre_struct_lock_err; } if (args->num_cliprects != 0) { if (ring != &dev_priv->rings[RCS]) { DRM_DEBUG("clip rectangles are only valid with the render ring\n"); ret = -EINVAL; goto pre_struct_lock_err; } + if (INTEL_INFO(dev)->gen >= 5) { + DRM_DEBUG("clip rectangles are only valid on pre-gen5\n"); + ret = -EINVAL; + goto pre_struct_lock_err; + } + if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) { DRM_DEBUG("execbuf with %u cliprects\n", args->num_cliprects); ret = -EINVAL; goto pre_struct_lock_err; } cliprects = malloc( sizeof(*cliprects) * args->num_cliprects, DRM_I915_GEM, M_WAITOK | M_ZERO); ret = -copyin((void *)(uintptr_t)args->cliprects_ptr, cliprects, sizeof(*cliprects) * args->num_cliprects); if (ret != 0) goto pre_struct_lock_err; } ret = i915_mutex_lock_interruptible(dev); if (ret) goto pre_struct_lock_err; if (dev_priv->mm.suspended) { ret = -EBUSY; goto struct_lock_err; } eb = eb_create(args->buffer_count); if (eb == NULL) { ret = -ENOMEM; goto struct_lock_err; } /* Look up object handles */ INIT_LIST_HEAD(&objects); for (i = 0; i < args->buffer_count; i++) { struct drm_i915_gem_object *obj; obj = to_intel_bo(drm_gem_object_lookup(dev, file, exec[i].handle)); if (&obj->base == NULL) { DRM_DEBUG("Invalid object handle %d at index %d\n", exec[i].handle, i); /* prevent error path from reading uninitialized data */ ret = -ENOENT; goto err; } if (!list_empty(&obj->exec_list)) { DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", obj, exec[i].handle, i); ret = -EINVAL; goto err; } list_add_tail(&obj->exec_list, &objects); obj->exec_handle = exec[i].handle; obj->exec_entry = &exec[i]; eb_add_object(eb, obj); } /* take note of the batch buffer before we might reorder the lists */ batch_obj = list_entry(objects.prev, struct drm_i915_gem_object, exec_list); /* Move the objects en-masse into the GTT, evicting if necessary. */ ret = i915_gem_execbuffer_reserve(ring, file, &objects); if (ret) goto err; /* The objects are in their final locations, apply the relocations. */ ret = i915_gem_execbuffer_relocate(dev, eb, &objects); if (ret) { if (ret == -EFAULT) { ret = i915_gem_execbuffer_relocate_slow(dev, file, ring, &objects, eb, exec, args->buffer_count); DRM_LOCK_ASSERT(dev); } if (ret) goto err; } /* Set the pending read domains for the batch buffer to COMMAND */ if (batch_obj->base.pending_write_domain) { DRM_DEBUG("Attempting to use self-modifying batch buffer\n"); ret = -EINVAL; goto err; } batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; ret = i915_gem_execbuffer_move_to_gpu(ring, &objects); if (ret) goto err; ret = i915_switch_context(ring, file, ctx_id); if (ret) goto err; seqno = i915_gem_next_request_seqno(ring); for (i = 0; i < I915_NUM_RINGS - 1; i++) { if (seqno < ring->sync_seqno[i]) { /* The GPU can not handle its semaphore value wrapping, * so every billion or so execbuffers, we need to stall * the GPU in order to reset the counters. */ - ret = i915_gpu_idle(dev, true); + ret = i915_gpu_idle(dev); if (ret) goto err; + i915_gem_retire_requests(dev); KASSERT(ring->sync_seqno[i] == 0, ("Non-zero sync_seqno")); } } if (ring == &dev_priv->rings[RCS] && mode != dev_priv->relative_constants_mode) { ret = intel_ring_begin(ring, 4); if (ret) goto err; intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); intel_ring_emit(ring, INSTPM); intel_ring_emit(ring, mask << 16 | mode); intel_ring_advance(ring); dev_priv->relative_constants_mode = mode; } if (args->flags & I915_EXEC_GEN7_SOL_RESET) { ret = i915_reset_gen7_sol_offsets(dev, ring); if (ret) goto err; } exec_start = batch_obj->gtt_offset + args->batch_start_offset; exec_len = args->batch_len; if (i915_fix_mi_batchbuffer_end) { i915_gem_fix_mi_batchbuffer_end(batch_obj, args->batch_start_offset, args->batch_len); } CTR4(KTR_DRM, "ring_dispatch %s %d exec %x %x", ring->name, seqno, exec_start, exec_len); if (cliprects) { for (i = 0; i < args->num_cliprects; i++) { ret = i915_emit_box_p(dev, &cliprects[i], args->DR1, args->DR4); if (ret) goto err; ret = ring->dispatch_execbuffer(ring, exec_start, exec_len); if (ret) goto err; } } else { ret = ring->dispatch_execbuffer(ring, exec_start, exec_len); if (ret) goto err; } i915_gem_execbuffer_move_to_active(&objects, ring, seqno); i915_gem_execbuffer_retire_commands(dev, file, ring); err: eb_destroy(eb); while (!list_empty(&objects)) { struct drm_i915_gem_object *obj; obj = list_first_entry(&objects, struct drm_i915_gem_object, exec_list); list_del_init(&obj->exec_list); drm_gem_object_unreference(&obj->base); } struct_lock_err: DRM_UNLOCK(dev); pre_struct_lock_err: for (i = 0; i < args->buffer_count; i++) { if (relocs_ma[i] != NULL) { vm_page_unhold_pages(relocs_ma[i], howmany( exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry), PAGE_SIZE)); free(relocs_ma[i], DRM_I915_GEM); } } free(relocs_ma, DRM_I915_GEM); free(cliprects, DRM_I915_GEM); return ret; } /* * Legacy execbuffer just creates an exec2 list from the original exec object * list array and passes it to the real function. */ int i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_execbuffer *args = data; struct drm_i915_gem_execbuffer2 exec2; struct drm_i915_gem_exec_object *exec_list = NULL; struct drm_i915_gem_exec_object2 *exec2_list = NULL; int ret, i; DRM_DEBUG("buffers_ptr %d buffer_count %d len %08x\n", (int) args->buffers_ptr, args->buffer_count, args->batch_len); if (args->buffer_count < 1) { DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); return -EINVAL; } /* Copy in the exec list from userland */ /* XXXKIB user-controlled malloc size */ exec_list = malloc(sizeof(*exec_list) * args->buffer_count, DRM_I915_GEM, M_WAITOK); exec2_list = malloc(sizeof(*exec2_list) * args->buffer_count, DRM_I915_GEM, M_WAITOK); ret = -copyin((void *)(uintptr_t)args->buffers_ptr, exec_list, sizeof(*exec_list) * args->buffer_count); if (ret != 0) { DRM_DEBUG("copy %d exec entries failed %d\n", args->buffer_count, ret); free(exec_list, DRM_I915_GEM); free(exec2_list, DRM_I915_GEM); return (ret); } for (i = 0; i < args->buffer_count; i++) { exec2_list[i].handle = exec_list[i].handle; exec2_list[i].relocation_count = exec_list[i].relocation_count; exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; exec2_list[i].alignment = exec_list[i].alignment; exec2_list[i].offset = exec_list[i].offset; if (INTEL_INFO(dev)->gen < 4) exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; else exec2_list[i].flags = 0; } exec2.buffers_ptr = args->buffers_ptr; exec2.buffer_count = args->buffer_count; exec2.batch_start_offset = args->batch_start_offset; exec2.batch_len = args->batch_len; exec2.DR1 = args->DR1; exec2.DR4 = args->DR4; exec2.num_cliprects = args->num_cliprects; exec2.cliprects_ptr = args->cliprects_ptr; exec2.flags = I915_EXEC_RENDER; i915_execbuffer2_set_context_id(exec2, 0); ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ for (i = 0; i < args->buffer_count; i++) exec_list[i].offset = exec2_list[i].offset; /* ... and back out to userspace */ ret = -copyout(exec_list, (void *)(uintptr_t)args->buffers_ptr, sizeof(*exec_list) * args->buffer_count); if (ret != 0) { DRM_DEBUG("failed to copy %d exec entries " "back to user (%d)\n", args->buffer_count, ret); } } free(exec_list, DRM_I915_GEM); free(exec2_list, DRM_I915_GEM); return ret; } int i915_gem_execbuffer2(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_execbuffer2 *args = data; struct drm_i915_gem_exec_object2 *exec2_list = NULL; int ret; DRM_DEBUG("buffers_ptr %jx buffer_count %d len %08x\n", (uintmax_t)args->buffers_ptr, args->buffer_count, args->batch_len); if (args->buffer_count < 1 || args->buffer_count > UINT_MAX / sizeof(*exec2_list)) { DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count); return -EINVAL; } /* XXXKIB user-controllable malloc size */ exec2_list = malloc(sizeof(*exec2_list) * args->buffer_count, DRM_I915_GEM, M_WAITOK); ret = -copyin((void *)(uintptr_t)args->buffers_ptr, exec2_list, sizeof(*exec2_list) * args->buffer_count); if (ret != 0) { DRM_DEBUG("copy %d exec entries failed %d\n", args->buffer_count, ret); free(exec2_list, DRM_I915_GEM); return (ret); } ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ ret = -copyout(exec2_list, (void *)(uintptr_t)args->buffers_ptr, sizeof(*exec2_list) * args->buffer_count); if (ret) { DRM_DEBUG("failed to copy %d exec entries " "back to user (%d)\n", args->buffer_count, ret); } } free(exec2_list, DRM_I915_GEM); return ret; } Index: head/sys/dev/drm2/i915/i915_gem_gtt.c =================================================================== --- head/sys/dev/drm2/i915/i915_gem_gtt.c (revision 277486) +++ head/sys/dev/drm2/i915/i915_gem_gtt.c (revision 277487) @@ -1,334 +1,364 @@ /* * Copyright © 2010 Daniel Vetter * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include /* PPGTT support for Sandybdrige/Gen6 and later */ static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, unsigned first_entry, unsigned num_entries) { uint32_t *pt_vaddr; uint32_t scratch_pte; struct sf_buf *sf; unsigned act_pd, first_pte, last_pte, i; act_pd = first_entry / I915_PPGTT_PT_ENTRIES; first_pte = first_entry % I915_PPGTT_PT_ENTRIES; scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr); scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC; while (num_entries) { last_pte = first_pte + num_entries; if (last_pte > I915_PPGTT_PT_ENTRIES) last_pte = I915_PPGTT_PT_ENTRIES; sched_pin(); sf = sf_buf_alloc(ppgtt->pt_pages[act_pd], SFB_CPUPRIVATE); pt_vaddr = (uint32_t *)(uintptr_t)sf_buf_kva(sf); for (i = first_pte; i < last_pte; i++) pt_vaddr[i] = scratch_pte; sf_buf_free(sf); sched_unpin(); num_entries -= last_pte - first_pte; first_pte = 0; act_pd++; } } int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) { struct drm_i915_private *dev_priv; struct i915_hw_ppgtt *ppgtt; u_int first_pd_entry_in_global_pt, i; dev_priv = dev->dev_private; /* * ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 * entries. For aliasing ppgtt support we just steal them at the end for * now. */ first_pd_entry_in_global_pt = 512 * 1024 - I915_PPGTT_PD_ENTRIES; ppgtt = malloc(sizeof(*ppgtt), DRM_I915_GEM, M_WAITOK | M_ZERO); ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; ppgtt->pt_pages = malloc(sizeof(vm_page_t) * ppgtt->num_pd_entries, DRM_I915_GEM, M_WAITOK | M_ZERO); for (i = 0; i < ppgtt->num_pd_entries; i++) { ppgtt->pt_pages[i] = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (ppgtt->pt_pages[i] == NULL) { dev_priv->mm.aliasing_ppgtt = ppgtt; i915_gem_cleanup_aliasing_ppgtt(dev); return (-ENOMEM); } } ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt.scratch_page_dma; i915_ppgtt_clear_range(ppgtt, 0, ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES); ppgtt->pd_offset = (first_pd_entry_in_global_pt) * sizeof(uint32_t); dev_priv->mm.aliasing_ppgtt = ppgtt; return (0); } static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt, unsigned first_entry, unsigned num_entries, vm_page_t *pages, uint32_t pte_flags) { uint32_t *pt_vaddr, pte; struct sf_buf *sf; unsigned act_pd, first_pte; unsigned last_pte, i; vm_paddr_t page_addr; act_pd = first_entry / I915_PPGTT_PT_ENTRIES; first_pte = first_entry % I915_PPGTT_PT_ENTRIES; while (num_entries) { last_pte = first_pte + num_entries; if (last_pte > I915_PPGTT_PT_ENTRIES) last_pte = I915_PPGTT_PT_ENTRIES; sched_pin(); sf = sf_buf_alloc(ppgtt->pt_pages[act_pd], SFB_CPUPRIVATE); pt_vaddr = (uint32_t *)(uintptr_t)sf_buf_kva(sf); for (i = first_pte; i < last_pte; i++) { page_addr = VM_PAGE_TO_PHYS(*pages); pte = GEN6_PTE_ADDR_ENCODE(page_addr); pt_vaddr[i] = pte | pte_flags; pages++; } sf_buf_free(sf); sched_unpin(); num_entries -= last_pte - first_pte; first_pte = 0; act_pd++; } } void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_object *obj, enum i915_cache_level cache_level) { struct drm_device *dev; struct drm_i915_private *dev_priv; uint32_t pte_flags; dev = obj->base.dev; dev_priv = dev->dev_private; pte_flags = GEN6_PTE_VALID; switch (cache_level) { case I915_CACHE_LLC_MLC: pte_flags |= GEN6_PTE_CACHE_LLC_MLC; break; case I915_CACHE_LLC: pte_flags |= GEN6_PTE_CACHE_LLC; break; case I915_CACHE_NONE: pte_flags |= GEN6_PTE_UNCACHED; break; default: panic("cache mode"); } i915_ppgtt_insert_pages(ppgtt, obj->gtt_space->start >> PAGE_SHIFT, obj->base.size >> PAGE_SHIFT, obj->pages, pte_flags); } void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_object *obj) { i915_ppgtt_clear_range(ppgtt, obj->gtt_space->start >> PAGE_SHIFT, obj->base.size >> PAGE_SHIFT); } void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) { struct drm_i915_private *dev_priv; struct i915_hw_ppgtt *ppgtt; vm_page_t m; int i; dev_priv = dev->dev_private; ppgtt = dev_priv->mm.aliasing_ppgtt; if (ppgtt == NULL) return; dev_priv->mm.aliasing_ppgtt = NULL; for (i = 0; i < ppgtt->num_pd_entries; i++) { m = ppgtt->pt_pages[i]; if (m != NULL) { vm_page_unwire(m, PQ_INACTIVE); vm_page_free(m); } } free(ppgtt->pt_pages, DRM_I915_GEM); free(ppgtt, DRM_I915_GEM); } static unsigned int cache_level_to_agp_type(struct drm_device *dev, enum i915_cache_level cache_level) { switch (cache_level) { case I915_CACHE_LLC_MLC: if (INTEL_INFO(dev)->gen >= 6) return (AGP_USER_CACHED_MEMORY_LLC_MLC); /* * Older chipsets do not have this extra level of CPU * cacheing, so fallthrough and request the PTE simply * as cached. */ case I915_CACHE_LLC: return (AGP_USER_CACHED_MEMORY); default: case I915_CACHE_NONE: return (AGP_USER_MEMORY); } } static bool do_idling(struct drm_i915_private *dev_priv) { bool ret = dev_priv->mm.interruptible; if (dev_priv->mm.gtt.do_idle_maps) { dev_priv->mm.interruptible = false; - if (i915_gpu_idle(dev_priv->dev, false)) { + if (i915_gpu_idle(dev_priv->dev)) { DRM_ERROR("Couldn't idle GPU\n"); /* Wait a bit, in hopes it avoids the hang */ DELAY(10); } } return ret; } static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) { if (dev_priv->mm.gtt.do_idle_maps) dev_priv->mm.interruptible = interruptible; } void i915_gem_restore_gtt_mappings(struct drm_device *dev) { struct drm_i915_private *dev_priv; struct drm_i915_gem_object *obj; dev_priv = dev->dev_private; /* First fill our portion of the GTT with scratch pages */ intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE, (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { i915_gem_clflush_object(obj); - i915_gem_gtt_rebind_object(obj, obj->cache_level); + i915_gem_gtt_bind_object(obj, obj->cache_level); } intel_gtt_chipset_flush(); } int -i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj) +i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) { - unsigned int agp_type; - agp_type = cache_level_to_agp_type(obj->base.dev, obj->cache_level); - intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT, - obj->base.size >> PAGE_SHIFT, obj->pages, agp_type); - - obj->has_global_gtt_mapping = 1; - return (0); } void -i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, +i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, enum i915_cache_level cache_level) { struct drm_device *dev; struct drm_i915_private *dev_priv; unsigned int agp_type; dev = obj->base.dev; dev_priv = dev->dev_private; agp_type = cache_level_to_agp_type(dev, cache_level); intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT, obj->base.size >> PAGE_SHIFT, obj->pages, agp_type); - obj->has_global_gtt_mapping = 0; + obj->has_global_gtt_mapping = 1; } void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) { + + intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, + obj->base.size >> PAGE_SHIFT); + + obj->has_global_gtt_mapping = 0; +} + +void +i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) +{ struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; bool interruptible; dev = obj->base.dev; dev_priv = dev->dev_private; interruptible = do_idling(dev_priv); - intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, - obj->base.size >> PAGE_SHIFT); - undo_idling(dev_priv, interruptible); +} + +int +i915_gem_init_global_gtt(struct drm_device *dev, unsigned long start, + unsigned long mappable_end, unsigned long end) +{ + drm_i915_private_t *dev_priv; + unsigned long mappable; + int error; + + dev_priv = dev->dev_private; + mappable = min(end, mappable_end) - start; + + /* Substract the guard page ... */ + drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE); + + dev_priv->mm.gtt_start = start; + dev_priv->mm.gtt_mappable_end = mappable_end; + dev_priv->mm.gtt_end = end; + dev_priv->mm.gtt_total = end - start; + dev_priv->mm.mappable_gtt_total = mappable; + + /* ... but ensure that we clear the entire range. */ + intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE); + device_printf(dev->device, + "taking over the fictitious range 0x%lx-0x%lx\n", + dev->agp->base + start, dev->agp->base + start + mappable); + error = -vm_phys_fictitious_reg_range(dev->agp->base + start, + dev->agp->base + start + mappable, VM_MEMATTR_WRITE_COMBINING); + return (error); } Index: head/sys/dev/drm2/i915/i915_gem_stolen.c =================================================================== --- head/sys/dev/drm2/i915/i915_gem_stolen.c (nonexistent) +++ head/sys/dev/drm2/i915/i915_gem_stolen.c (revision 277487) @@ -0,0 +1,205 @@ +/* + * Copyright © 2008-2012 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * Chris Wilson + * + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include + +/* + * The BIOS typically reserves some of the system's memory for the exclusive + * use of the integrated graphics. This memory is no longer available for + * use by the OS and so the user finds that his system has less memory + * available than he put in. We refer to this memory as stolen. + * + * The BIOS will allocate its framebuffer from the stolen memory. Our + * goal is try to reuse that object for our own fbcon which must always + * be available for panics. Anything else we can reuse the stolen memory + * for is a boon. + */ + +#define PTE_ADDRESS_MASK 0xfffff000 +#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ +#define PTE_MAPPING_TYPE_UNCACHED (0 << 1) +#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */ +#define PTE_MAPPING_TYPE_CACHED (3 << 1) +#define PTE_MAPPING_TYPE_MASK (3 << 1) +#define PTE_VALID (1 << 0) + +/** + * i915_stolen_to_phys - take an offset into stolen memory and turn it into + * a physical one + * @dev: drm device + * @offset: address to translate + * + * Some chip functions require allocations from stolen space and need the + * physical address of the memory in question. + */ +static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + device_t pdev = dev_priv->bridge_dev; + u32 base; + +#if 0 + /* On the machines I have tested the Graphics Base of Stolen Memory + * is unreliable, so compute the base by subtracting the stolen memory + * from the Top of Low Usable DRAM which is where the BIOS places + * the graphics stolen memory. + */ + if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { + /* top 32bits are reserved = 0 */ + pci_read_config_dword(pdev, 0xA4, &base); + } else { + /* XXX presume 8xx is the same as i915 */ + pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base); + } +#else + if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { + u16 val; + val = pci_read_config(pdev, 0xb0, 2); + base = val >> 4 << 20; + } else { + u8 val; + val = pci_read_config(pdev, 0x9c, 1); + base = val >> 3 << 27; + } + base -= dev_priv->mm.gtt.stolen_size; +#endif + + return base + offset; +} + +static void i915_warn_stolen(struct drm_device *dev) +{ + DRM_INFO("not enough stolen space for compressed buffer, disabling\n"); + DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n"); +} + +static void i915_setup_compression(struct drm_device *dev, int size) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_mm_node *compressed_fb, *compressed_llb; + unsigned long cfb_base; + unsigned long ll_base = 0; + + /* Just in case the BIOS is doing something questionable. */ + intel_disable_fbc(dev); + + compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); + if (compressed_fb) + compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); + if (!compressed_fb) + goto err; + + cfb_base = i915_stolen_to_phys(dev, compressed_fb->start); + if (!cfb_base) + goto err_fb; + + if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) { + compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen, + 4096, 4096, 0); + if (compressed_llb) + compressed_llb = drm_mm_get_block(compressed_llb, + 4096, 4096); + if (!compressed_llb) + goto err_fb; + + ll_base = i915_stolen_to_phys(dev, compressed_llb->start); + if (!ll_base) + goto err_llb; + } + + dev_priv->cfb_size = size; + + dev_priv->compressed_fb = compressed_fb; + if (HAS_PCH_SPLIT(dev)) + I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); + else if (IS_GM45(dev)) { + I915_WRITE(DPFC_CB_BASE, compressed_fb->start); + } else { + I915_WRITE(FBC_CFB_BASE, cfb_base); + I915_WRITE(FBC_LL_BASE, ll_base); + dev_priv->compressed_llb = compressed_llb; + } + + DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", + cfb_base, ll_base, size >> 20); + return; + +err_llb: + drm_mm_put_block(compressed_llb); +err_fb: + drm_mm_put_block(compressed_fb); +err: + dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; + i915_warn_stolen(dev); +} + +static void i915_cleanup_compression(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + drm_mm_put_block(dev_priv->compressed_fb); + if (dev_priv->compressed_llb) + drm_mm_put_block(dev_priv->compressed_llb); +} + +void i915_gem_cleanup_stolen(struct drm_device *dev) +{ + if (I915_HAS_FBC(dev) && i915_powersave) + i915_cleanup_compression(dev); +} + +int i915_gem_init_stolen(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long prealloc_size = dev_priv->mm.gtt.stolen_size; + + /* Basic memrange allocator for stolen space */ + drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size); + + /* Try to set up FBC with a reasonable compressed buffer size */ + if (I915_HAS_FBC(dev) && i915_powersave) { + int cfb_size; + + /* Leave 1M for line length buffer & misc. */ + + /* Try to get a 32M buffer... */ + if (prealloc_size > (36*1024*1024)) + cfb_size = 32*1024*1024; + else /* fall back to 7/8 of the stolen space */ + cfb_size = prealloc_size * 7 / 8; + i915_setup_compression(dev, cfb_size); + } + + return 0; +} Property changes on: head/sys/dev/drm2/i915/i915_gem_stolen.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/sys/dev/drm2/i915/i915_gem_tiling.c =================================================================== --- head/sys/dev/drm2/i915/i915_gem_tiling.c (revision 277486) +++ head/sys/dev/drm2/i915/i915_gem_tiling.c (revision 277487) @@ -1,495 +1,526 @@ /* * Copyright © 2008 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Authors: * Eric Anholt * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include /** @file i915_gem_tiling.c * * Support for managing tiling state of buffer objects. * * The idea behind tiling is to increase cache hit rates by rearranging * pixel data so that a group of pixel accesses are in the same cacheline. * Performance improvement from doing this on the back/depth buffer are on * the order of 30%. * * Intel architectures make this somewhat more complicated, though, by * adjustments made to addressing of data when the memory is in interleaved * mode (matched pairs of DIMMS) to improve memory bandwidth. * For interleaved memory, the CPU sends every sequential 64 bytes * to an alternate memory channel so it can get the bandwidth from both. * * The GPU also rearranges its accesses for increased bandwidth to interleaved * memory, and it matches what the CPU does for non-tiled. However, when tiled * it does it a little differently, since one walks addresses not just in the * X direction but also Y. So, along with alternating channels when bit * 6 of the address flips, it also alternates when other bits flip -- Bits 9 * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines) * are common to both the 915 and 965-class hardware. * * The CPU also sometimes XORs in higher bits as well, to improve * bandwidth doing strided access like we do so frequently in graphics. This * is called "Channel XOR Randomization" in the MCH documentation. The result * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address * decode. * * All of this bit 6 XORing has an effect on our memory management, * as we need to make sure that the 3d driver can correctly address object * contents. * * If we don't have interleaved memory, all tiling is safe and no swizzling is * required. * * When bit 17 is XORed in, we simply refuse to tile at all. Bit * 17 is not just a page offset, so as we page an objet out and back in, * individual pages in it will have different bit 17 addresses, resulting in * each 64 bytes being swapped with its neighbor! * * Otherwise, if interleaved, we have to tell the 3d driver what the address * swizzling it needs to do is, since it's writing with the CPU to the pages * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order * to match what the GPU expects. */ /** * Detects bit 6 swizzling of address lookup between IGD access and CPU * access through main memory. */ void i915_gem_detect_bit_6_swizzle(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; if (INTEL_INFO(dev)->gen >= 6) { uint32_t dimm_c0, dimm_c1; dimm_c0 = I915_READ(MAD_DIMM_C0); dimm_c1 = I915_READ(MAD_DIMM_C1); dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; /* Enable swizzling when the channels are populated with * identically sized dimms. We don't need to check the 3rd * channel because no cpu with gpu attached ships in that * configuration. Also, swizzling only makes sense for 2 * channels anyway. */ if (dimm_c0 == dimm_c1) { swizzle_x = I915_BIT_6_SWIZZLE_9_10; swizzle_y = I915_BIT_6_SWIZZLE_9; } else { swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE; } } else if (IS_GEN5(dev)) { /* On Ironlake whatever DRAM config, GPU always do * same swizzling setup. */ swizzle_x = I915_BIT_6_SWIZZLE_9_10; swizzle_y = I915_BIT_6_SWIZZLE_9; } else if (IS_GEN2(dev)) { /* As far as we know, the 865 doesn't have these bit 6 * swizzling issues. */ swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE; } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) { uint32_t dcc; /* On 9xx chipsets, channel interleave by the CPU is * determined by DCC. For single-channel, neither the CPU * nor the GPU do swizzling. For dual channel interleaved, * the GPU's interleave is bit 9 and 10 for X tiled, and bit * 9 for Y tiled. The CPU's interleave is independent, and * can be based on either bit 11 (haven't seen this yet) or * bit 17 (common). */ dcc = I915_READ(DCC); switch (dcc & DCC_ADDRESSING_MODE_MASK) { case DCC_ADDRESSING_MODE_SINGLE_CHANNEL: case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC: swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE; break; case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: if (dcc & DCC_CHANNEL_XOR_DISABLE) { /* This is the base swizzling by the GPU for * tiled buffers. */ swizzle_x = I915_BIT_6_SWIZZLE_9_10; swizzle_y = I915_BIT_6_SWIZZLE_9; } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { /* Bit 11 swizzling by the CPU in addition. */ swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; swizzle_y = I915_BIT_6_SWIZZLE_9_11; } else { /* Bit 17 swizzling by the CPU in addition. */ swizzle_x = I915_BIT_6_SWIZZLE_9_10_17; swizzle_y = I915_BIT_6_SWIZZLE_9_17; } break; } if (dcc == 0xffffffff) { DRM_ERROR("Couldn't read from MCHBAR. " "Disabling tiling.\n"); swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; } } else { /* The 965, G33, and newer, have a very flexible memory * configuration. It will enable dual-channel mode * (interleaving) on as much memory as it can, and the GPU * will additionally sometimes enable different bit 6 * swizzling for tiled objects from the CPU. * * Here's what I found on the G965: * slot fill memory size swizzling * 0A 0B 1A 1B 1-ch 2-ch * 512 0 0 0 512 0 O * 512 0 512 0 16 1008 X * 512 0 0 512 16 1008 X * 0 512 0 512 16 1008 X * 1024 1024 1024 0 2048 1024 O * * We could probably detect this based on either the DRB * matching, which was the case for the swizzling required in * the table above, or from the 1-ch value being less than * the minimum size of a rank. */ if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) { swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE; } else { swizzle_x = I915_BIT_6_SWIZZLE_9_10; swizzle_y = I915_BIT_6_SWIZZLE_9; } } dev_priv->mm.bit_6_swizzle_x = swizzle_x; dev_priv->mm.bit_6_swizzle_y = swizzle_y; } /* Check pitch constriants for all chips & tiling formats */ static bool i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) { int tile_width; /* Linear is always fine */ if (tiling_mode == I915_TILING_NONE) return (true); if (IS_GEN2(dev) || (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) tile_width = 128; else tile_width = 512; /* check maximum stride & object size */ if (INTEL_INFO(dev)->gen >= 4) { /* i965 stores the end address of the gtt mapping in the fence * reg, so dont bother to check the size */ if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) return (false); } else { if (stride > 8192) return (false); if (IS_GEN3(dev)) { if (size > I830_FENCE_MAX_SIZE_VAL << 20) return (false); } else { if (size > I830_FENCE_MAX_SIZE_VAL << 19) return (false); } } /* 965+ just needs multiples of tile width */ if (INTEL_INFO(dev)->gen >= 4) { if (stride & (tile_width - 1)) return (false); return (true); } /* Pre-965 needs power of two tile widths */ if (stride < tile_width) return (false); if (stride & (stride - 1)) return (false); return (true); } /* Is the current GTT allocation valid for the change in tiling? */ static bool i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) { u32 size; if (tiling_mode == I915_TILING_NONE) return (true); if (INTEL_INFO(obj->base.dev)->gen >= 4) return (true); if (INTEL_INFO(obj->base.dev)->gen == 3) { if (obj->gtt_offset & ~I915_FENCE_START_MASK) return (false); } else { if (obj->gtt_offset & ~I830_FENCE_START_MASK) return (false); } /* * Previous chips need to be aligned to the size of the smallest * fence register that can contain the object. */ if (INTEL_INFO(obj->base.dev)->gen == 3) size = 1024*1024; else size = 512*1024; while (size < obj->base.size) size <<= 1; if (obj->gtt_space->size != size) return (false); if (obj->gtt_offset & (size - 1)) return (false); return (true); } /** * Sets the tiling mode of an object, returning the required swizzling of * bit 6 of addresses in the object. */ int i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_set_tiling *args = data; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; int ret; ret = 0; obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (&obj->base == NULL) return -ENOENT; if (!i915_tiling_ok(dev, args->stride, obj->base.size, args->tiling_mode)) { drm_gem_object_unreference(&obj->base); return -EINVAL; } if (obj->pin_count) { drm_gem_object_unreference(&obj->base); return -EBUSY; } if (args->tiling_mode == I915_TILING_NONE) { args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->stride = 0; } else { if (args->tiling_mode == I915_TILING_X) args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; else args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; /* Hide bit 17 swizzling from the user. This prevents old Mesa * from aborting the application on sw fallbacks to bit 17, * and we use the pread/pwrite bit17 paths to swizzle for it. * If there was a user that was relying on the swizzle * information for drm_intel_bo_map()ed reads/writes this would * break it, but we don't have any of those. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9; if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; /* If we can't handle the swizzling, make it untiled. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { args->tiling_mode = I915_TILING_NONE; args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->stride = 0; } } if (args->tiling_mode != obj->tiling_mode || args->stride != obj->stride) { /* We need to rebind the object if its current allocation * no longer meets the alignment restrictions for its new * tiling mode. Otherwise we can just leave it alone, but - * need to ensure that any fence register is cleared. + * need to ensure that any fence register is updated before + * the next fenced (either through the GTT or by the BLT unit + * on older GPUs) access. + * + * After updating the tiling parameters, we then flag whether + * we need to update an associated fence register. Note this + * has to also include the unfenced register the GPU uses + * whilst executing a fenced command for an untiled object. */ - i915_gem_release_mmap(obj); - obj->map_and_fenceable = obj->gtt_space == NULL || + obj->map_and_fenceable = + obj->gtt_space == NULL || (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && i915_gem_object_fence_ok(obj, args->tiling_mode)); /* Rebind if we need a change of alignment */ if (!obj->map_and_fenceable) { uint32_t unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(dev, obj->base.size, args->tiling_mode); if (obj->gtt_offset & (unfenced_alignment - 1)) ret = i915_gem_object_unbind(obj); } + if (ret == 0) { - obj->tiling_changed = true; + obj->fence_dirty = + obj->fenced_gpu_access || + obj->fence_reg != I915_FENCE_REG_NONE; + + obj->tiling_mode = args->tiling_mode; obj->stride = args->stride; + + /* Force the fence to be reacquired for GTT access */ + i915_gem_release_mmap(obj); } - } + } /* we have to maintain this existing ABI... */ args->stride = obj->stride; args->tiling_mode = obj->tiling_mode; drm_gem_object_unreference(&obj->base); return (ret); } /** * Returns the current tiling mode and required bit 6 swizzling for the object. */ int i915_gem_get_tiling(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_get_tiling *args = data; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (&obj->base == NULL) return -ENOENT; args->tiling_mode = obj->tiling_mode; switch (obj->tiling_mode) { case I915_TILING_X: args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; break; case I915_TILING_Y: args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; break; case I915_TILING_NONE: args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; break; default: DRM_ERROR("unknown tiling mode\n"); } /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9; if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; drm_gem_object_unreference(&obj->base); return 0; } /** * Swap every 64 bytes of this page around, to account for it having a new * bit 17 of its physical address and therefore being interpreted differently * by the GPU. */ static void i915_gem_swizzle_page(vm_page_t m) { char temp[64]; char *vaddr; struct sf_buf *sf; int i; /* XXXKIB sleep */ sf = sf_buf_alloc(m, SFB_DEFAULT); vaddr = (char *)sf_buf_kva(sf); for (i = 0; i < PAGE_SIZE; i += 128) { memcpy(temp, &vaddr[i], 64); memcpy(&vaddr[i], &vaddr[i + 64], 64); memcpy(&vaddr[i + 64], temp, 64); } sf_buf_free(sf); +} + +void +i915_gem_object_do_bit_17_swizzle_page(struct drm_i915_gem_object *obj, + vm_page_t m) +{ + char new_bit_17; + + if (obj->bit_17 == NULL) + return; + + new_bit_17 = VM_PAGE_TO_PHYS(m) >> 17; + if ((new_bit_17 & 0x1) != (test_bit(m->pindex, obj->bit_17) != 0)) { + i915_gem_swizzle_page(m); + vm_page_dirty(m); + } } void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) { int page_count = obj->base.size >> PAGE_SHIFT; int i; if (obj->bit_17 == NULL) return; for (i = 0; i < page_count; i++) { char new_bit_17 = VM_PAGE_TO_PHYS(obj->pages[i]) >> 17; if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) { i915_gem_swizzle_page(obj->pages[i]); vm_page_dirty(obj->pages[i]); } } } void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) { int page_count = obj->base.size >> PAGE_SHIFT; int i; if (obj->bit_17 == NULL) { obj->bit_17 = malloc(BITS_TO_LONGS(page_count) * sizeof(long), DRM_I915_GEM, M_WAITOK); } /* XXXKIB: review locking, atomics might be not needed there */ for (i = 0; i < page_count; i++) { if (VM_PAGE_TO_PHYS(obj->pages[i]) & (1 << 17)) set_bit(i, obj->bit_17); else clear_bit(i, obj->bit_17); } } Index: head/sys/dev/drm2/i915/i915_irq.c =================================================================== --- head/sys/dev/drm2/i915/i915_irq.c (revision 277486) +++ head/sys/dev/drm2/i915/i915_irq.c (revision 277487) @@ -1,2262 +1,2661 @@ /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- */ /*- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include +#include static void i915_capture_error_state(struct drm_device *dev); static u32 ring_last_seqno(struct intel_ring_buffer *ring); -/** - * Interrupts that are always left unmasked. - * - * Since pipe events are edge-triggered from the PIPESTAT register to IIR, - * we leave them always unmasked in IMR and then control enabling them through - * PIPESTAT alone. - */ -#define I915_INTERRUPT_ENABLE_FIX \ - (I915_ASLE_INTERRUPT | \ - I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \ - I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \ - I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \ - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - -/** Interrupts that we mask and unmask at runtime. */ -#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT) - -#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\ - PIPE_VBLANK_INTERRUPT_STATUS) - -#define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\ - PIPE_VBLANK_INTERRUPT_ENABLE) - -#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ - DRM_I915_VBLANK_PIPE_B) - /* For display hotplug interrupt */ static void ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) { if ((dev_priv->irq_mask & mask) != 0) { dev_priv->irq_mask &= ~mask; I915_WRITE(DEIMR, dev_priv->irq_mask); POSTING_READ(DEIMR); } } static inline void ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) { if ((dev_priv->irq_mask & mask) != mask) { dev_priv->irq_mask |= mask; I915_WRITE(DEIMR, dev_priv->irq_mask); POSTING_READ(DEIMR); } } void i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) { if ((dev_priv->pipestat[pipe] & mask) != mask) { u32 reg = PIPESTAT(pipe); dev_priv->pipestat[pipe] |= mask; /* Enable the interrupt, clear any pending status */ I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); POSTING_READ(reg); } } void i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) { if ((dev_priv->pipestat[pipe] & mask) != 0) { u32 reg = PIPESTAT(pipe); dev_priv->pipestat[pipe] &= ~mask; I915_WRITE(reg, dev_priv->pipestat[pipe]); POSTING_READ(reg); } } /** * intel_enable_asle - enable ASLE interrupt for OpRegion */ void intel_enable_asle(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; + /* FIXME: opregion/asle for VLV */ + if (IS_VALLEYVIEW(dev)) + return; + mtx_lock(&dev_priv->irq_lock); if (HAS_PCH_SPLIT(dev)) ironlake_enable_display_irq(dev_priv, DE_GSE); else { i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE); if (INTEL_INFO(dev)->gen >= 4) i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); } mtx_unlock(&dev_priv->irq_lock); } /** * i915_pipe_enabled - check if a pipe is enabled * @dev: DRM device * @pipe: pipe to check * * Reading certain registers when the pipe is disabled can hang the chip. * Use this routine to make sure the PLL is running and the pipe is active * before reading such registers if unsure. */ static int i915_pipe_enabled(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; } /* Called from drm generic code, passed a 'crtc', which * we use as a pipe index */ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long high_frame; unsigned long low_frame; u32 high1, high2, low; if (!i915_pipe_enabled(dev, pipe)) { DRM_DEBUG("trying to get vblank count for disabled " "pipe %c\n", pipe_name(pipe)); return 0; } high_frame = PIPEFRAME(pipe); low_frame = PIPEFRAMEPIXEL(pipe); /* * High & low register fields aren't synchronized, so make sure * we get a low value that's stable across two reads of the high * register. */ do { high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; } while (high1 != high2); high1 >>= PIPE_FRAME_HIGH_SHIFT; low >>= PIPE_FRAME_LOW_SHIFT; return (high1 << 8) | low; } static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int reg = PIPE_FRMCOUNT_GM45(pipe); if (!i915_pipe_enabled(dev, pipe)) { DRM_DEBUG("i915: trying to get vblank count for disabled " "pipe %c\n", pipe_name(pipe)); return 0; } return I915_READ(reg); } static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, int *vpos, int *hpos) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u32 vbl = 0, position = 0; int vbl_start, vbl_end, htotal, vtotal; bool in_vbl = true; int ret = 0; if (!i915_pipe_enabled(dev, pipe)) { DRM_DEBUG("i915: trying to get scanoutpos for disabled " "pipe %c\n", pipe_name(pipe)); return 0; } /* Get vtotal. */ vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff); if (INTEL_INFO(dev)->gen >= 4) { /* No obvious pixelcount register. Only query vertical * scanout position from Display scan line register. */ position = I915_READ(PIPEDSL(pipe)); /* Decode into vertical scanout position. Don't have * horizontal scanout position. */ *vpos = position & 0x1fff; *hpos = 0; } else { /* Have access to pixelcount since start of frame. * We can split this into vertical and horizontal * scanout position. */ position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff); *vpos = position / htotal; *hpos = position - (*vpos * htotal); } /* Query vblank area. */ vbl = I915_READ(VBLANK(pipe)); /* Test position against vblank region. */ vbl_start = vbl & 0x1fff; vbl_end = (vbl >> 16) & 0x1fff; if ((*vpos < vbl_start) || (*vpos > vbl_end)) in_vbl = false; /* Inside "upper part" of vblank area? Apply corrective offset: */ if (in_vbl && (*vpos >= vbl_start)) *vpos = *vpos - vtotal; /* Readouts valid? */ if (vbl > 0) ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; /* In vblank? */ if (in_vbl) ret |= DRM_SCANOUTPOS_INVBL; return ret; } static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, int *max_error, struct timeval *vblank_time, unsigned flags) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; if (pipe < 0 || pipe >= dev_priv->num_pipe) { DRM_ERROR("Invalid crtc %d\n", pipe); return -EINVAL; } /* Get drm_crtc to timestamp: */ crtc = intel_get_crtc_for_pipe(dev, pipe); if (crtc == NULL) { DRM_ERROR("Invalid crtc %d\n", pipe); return -EINVAL; } if (!crtc->enabled) { #if 0 DRM_DEBUG("crtc %d is disabled\n", pipe); #endif return -EBUSY; } /* Helper routine in DRM core does all the work: */ return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, vblank_time, flags, crtc); } /* * Handle hotplug events outside the interrupt handler proper. */ static void i915_hotplug_work_func(void *context, int pending) { drm_i915_private_t *dev_priv = context; struct drm_device *dev = dev_priv->dev; struct drm_mode_config *mode_config; struct intel_encoder *encoder; DRM_DEBUG("running encoder hotplug functions\n"); dev_priv = context; dev = dev_priv->dev; mode_config = &dev->mode_config; sx_xlock(&mode_config->mutex); DRM_DEBUG_KMS("running encoder hotplug functions\n"); list_for_each_entry(encoder, &mode_config->encoder_list, base.head) if (encoder->hot_plug) encoder->hot_plug(encoder); sx_xunlock(&mode_config->mutex); /* Just fire off a uevent and let userspace tell us what to do */ #if 0 drm_helper_hpd_irq_event(dev); #endif } static void i915_handle_rps_change(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; u32 busy_up, busy_down, max_avg, min_avg; u8 new_delay = dev_priv->cur_delay; I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); busy_up = I915_READ(RCPREVBSYTUPAVG); busy_down = I915_READ(RCPREVBSYTDNAVG); max_avg = I915_READ(RCBMAXAVG); min_avg = I915_READ(RCBMINAVG); /* Handle RCS change request from hw */ if (busy_up > max_avg) { if (dev_priv->cur_delay != dev_priv->max_delay) new_delay = dev_priv->cur_delay - 1; if (new_delay < dev_priv->max_delay) new_delay = dev_priv->max_delay; } else if (busy_down < min_avg) { if (dev_priv->cur_delay != dev_priv->min_delay) new_delay = dev_priv->cur_delay + 1; if (new_delay > dev_priv->min_delay) new_delay = dev_priv->min_delay; } if (ironlake_set_drps(dev, new_delay)) dev_priv->cur_delay = new_delay; return; } static void notify_ring(struct drm_device *dev, struct intel_ring_buffer *ring) { struct drm_i915_private *dev_priv = dev->dev_private; - u32 seqno; if (ring->obj == NULL) return; - seqno = ring->get_seqno(ring); - CTR2(KTR_DRM, "request_complete %s %d", ring->name, seqno); + CTR2(KTR_DRM, "request_complete %s %d", ring->name, + ring->get_seqno(ring)); - mtx_lock(&ring->irq_lock); - ring->irq_seqno = seqno; + mtx_lock(&dev_priv->irq_lock); wakeup(ring); - mtx_unlock(&ring->irq_lock); + mtx_unlock(&dev_priv->irq_lock); if (i915_enable_hangcheck) { dev_priv->hangcheck_count = 0; callout_schedule(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD); } } static void gen6_pm_rps_work_func(void *arg, int pending) { struct drm_device *dev; drm_i915_private_t *dev_priv; u8 new_delay; u32 pm_iir, pm_imr; dev_priv = (drm_i915_private_t *)arg; dev = dev_priv->dev; new_delay = dev_priv->cur_delay; mtx_lock(&dev_priv->rps_lock); pm_iir = dev_priv->pm_iir; dev_priv->pm_iir = 0; pm_imr = I915_READ(GEN6_PMIMR); I915_WRITE(GEN6_PMIMR, 0); mtx_unlock(&dev_priv->rps_lock); if (!pm_iir) return; DRM_LOCK(dev); if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { if (dev_priv->cur_delay != dev_priv->max_delay) new_delay = dev_priv->cur_delay + 1; if (new_delay > dev_priv->max_delay) new_delay = dev_priv->max_delay; } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) { gen6_gt_force_wake_get(dev_priv); if (dev_priv->cur_delay != dev_priv->min_delay) new_delay = dev_priv->cur_delay - 1; if (new_delay < dev_priv->min_delay) { new_delay = dev_priv->min_delay; I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, I915_READ(GEN6_RP_INTERRUPT_LIMITS) | ((new_delay << 16) & 0x3f0000)); } else { /* Make sure we continue to get down interrupts * until we hit the minimum frequency */ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000); } gen6_gt_force_wake_put(dev_priv); } gen6_set_rps(dev, new_delay); dev_priv->cur_delay = new_delay; /* * rps_lock not held here because clearing is non-destructive. There is * an *extremely* unlikely race with gen6_rps_enable() that is prevented * by holding struct_mutex for the duration of the write. */ DRM_UNLOCK(dev); } -static void pch_irq_handler(struct drm_device *dev) +static void snb_gt_irq_handler(struct drm_device *dev, + struct drm_i915_private *dev_priv, + u32 gt_iir) { + + if (gt_iir & (GEN6_RENDER_USER_INTERRUPT | + GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT)) + notify_ring(dev, &dev_priv->rings[RCS]); + if (gt_iir & GEN6_BSD_USER_INTERRUPT) + notify_ring(dev, &dev_priv->rings[VCS]); + if (gt_iir & GEN6_BLITTER_USER_INTERRUPT) + notify_ring(dev, &dev_priv->rings[BCS]); + + if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT | + GT_GEN6_BSD_CS_ERROR_INTERRUPT | + GT_RENDER_CS_ERROR_INTERRUPT)) { + DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); + i915_handle_error(dev, false); + } +} + +static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, + u32 pm_iir) +{ + + /* + * IIR bits should never already be set because IMR should + * prevent an interrupt from being shown in IIR. The warning + * displays a case where we've unsafely cleared + * dev_priv->pm_iir. Although missing an interrupt of the same + * type is not a problem, it displays a problem in the logic. + * + * The mask bit in IMR is cleared by rps_work. + */ + + mtx_lock(&dev_priv->rps_lock); + if (dev_priv->pm_iir & pm_iir) + printf("Missed a PM interrupt\n"); + dev_priv->pm_iir |= pm_iir; + I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir); + POSTING_READ(GEN6_PMIMR); + mtx_unlock(&dev_priv->rps_lock); + + taskqueue_enqueue(dev_priv->tq, &dev_priv->rps_task); +} + +static void valleyview_irq_handler(void *arg) +{ + struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u32 pch_iir; + u32 iir, gt_iir, pm_iir; int pipe; + u32 pipe_stats[I915_MAX_PIPES]; + u32 vblank_status; + int vblank = 0; + bool blc_event; - pch_iir = I915_READ(SDEIIR); + atomic_inc(&dev_priv->irq_received); + vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS | + PIPE_VBLANK_INTERRUPT_STATUS; + + while (true) { + iir = I915_READ(VLV_IIR); + gt_iir = I915_READ(GTIIR); + pm_iir = I915_READ(GEN6_PMIIR); + + if (gt_iir == 0 && pm_iir == 0 && iir == 0) + goto out; + + snb_gt_irq_handler(dev, dev_priv, gt_iir); + + mtx_lock(&dev_priv->irq_lock); + for_each_pipe(pipe) { + int reg = PIPESTAT(pipe); + pipe_stats[pipe] = I915_READ(reg); + + /* + * Clear the PIPE*STAT regs before the IIR + */ + if (pipe_stats[pipe] & 0x8000ffff) { + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + DRM_DEBUG_DRIVER("pipe %c underrun\n", + pipe_name(pipe)); + I915_WRITE(reg, pipe_stats[pipe]); + } + } + mtx_unlock(&dev_priv->irq_lock); + + /* Consume port. Then clear IIR or we'll miss events */ + if (iir & I915_DISPLAY_PORT_INTERRUPT) { + u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); + + DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", + hotplug_status); + if (hotplug_status & dev_priv->hotplug_supported_mask) + taskqueue_enqueue(dev_priv->tq, + &dev_priv->hotplug_task); + + I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); + I915_READ(PORT_HOTPLUG_STAT); + } + + + if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) { + drm_handle_vblank(dev, 0); + vblank++; + intel_finish_page_flip(dev, 0); + } + + if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) { + drm_handle_vblank(dev, 1); + vblank++; + intel_finish_page_flip(dev, 0); + } + + if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) + blc_event = true; + + if (pm_iir & GEN6_PM_DEFERRED_EVENTS) + gen6_queue_rps_work(dev_priv, pm_iir); + + I915_WRITE(GTIIR, gt_iir); + I915_WRITE(GEN6_PMIIR, pm_iir); + I915_WRITE(VLV_IIR, iir); + } + +out:; +} + +static void pch_irq_handler(struct drm_device *dev, u32 pch_iir) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + if (pch_iir & SDE_AUDIO_POWER_MASK) DRM_DEBUG("i915: PCH audio power change on port %d\n", (pch_iir & SDE_AUDIO_POWER_MASK) >> SDE_AUDIO_POWER_SHIFT); if (pch_iir & SDE_GMBUS) DRM_DEBUG("i915: PCH GMBUS interrupt\n"); if (pch_iir & SDE_AUDIO_HDCP_MASK) DRM_DEBUG("i915: PCH HDCP audio interrupt\n"); if (pch_iir & SDE_AUDIO_TRANS_MASK) DRM_DEBUG("i915: PCH transcoder audio interrupt\n"); if (pch_iir & SDE_POISON) DRM_ERROR("i915: PCH poison interrupt\n"); if (pch_iir & SDE_FDI_MASK) for_each_pipe(pipe) DRM_DEBUG(" pipe %c FDI IIR: 0x%08x\n", pipe_name(pipe), I915_READ(FDI_RX_IIR(pipe))); if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) DRM_DEBUG("i915: PCH transcoder CRC done interrupt\n"); if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) DRM_DEBUG("i915: PCH transcoder CRC error interrupt\n"); if (pch_iir & SDE_TRANSB_FIFO_UNDER) DRM_DEBUG("i915: PCH transcoder B underrun interrupt\n"); if (pch_iir & SDE_TRANSA_FIFO_UNDER) DRM_DEBUG("PCH transcoder A underrun interrupt\n"); } static void ivybridge_irq_handler(void *arg) { struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; -#if 0 - struct drm_i915_master_private *master_priv; -#endif + u32 de_iir, gt_iir, de_ier, pm_iir; + int i; atomic_inc(&dev_priv->irq_received); /* disable master interrupt before clearing iir */ de_ier = I915_READ(DEIER); I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); POSTING_READ(DEIER); - de_iir = I915_READ(DEIIR); - gt_iir = I915_READ(GTIIR); - pch_iir = I915_READ(SDEIIR); - pm_iir = I915_READ(GEN6_PMIIR); - - CTR4(KTR_DRM, "ivybridge_irq de %x gt %x pch %x pm %x", de_iir, - gt_iir, pch_iir, pm_iir); - - if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && pm_iir == 0) - goto done; - -#if 0 - if (dev->primary->master) { - master_priv = dev->primary->master->driver_priv; - if (master_priv->sarea_priv) - master_priv->sarea_priv->last_dispatch = - READ_BREADCRUMB(dev_priv); + gt_iir = I915_READ(GTIIR); + if (gt_iir) { + snb_gt_irq_handler(dev, dev_priv, gt_iir); + I915_WRITE(GTIIR, gt_iir); } -#else - if (dev_priv->sarea_priv) - dev_priv->sarea_priv->last_dispatch = - READ_BREADCRUMB(dev_priv); -#endif - if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) - notify_ring(dev, &dev_priv->rings[RCS]); - if (gt_iir & GT_GEN6_BSD_USER_INTERRUPT) - notify_ring(dev, &dev_priv->rings[VCS]); - if (gt_iir & GT_BLT_USER_INTERRUPT) - notify_ring(dev, &dev_priv->rings[BCS]); + de_iir = I915_READ(DEIIR); + if (de_iir) { + if (de_iir & DE_GSE_IVB) + intel_opregion_gse_intr(dev); - if (de_iir & DE_GSE_IVB) { - intel_opregion_gse_intr(dev); - } + for (i = 0; i < 3; i++) { + if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { + intel_prepare_page_flip(dev, i); + intel_finish_page_flip_plane(dev, i); + } + if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) + drm_handle_vblank(dev, i); + } - if (de_iir & DE_PLANEA_FLIP_DONE_IVB) { - intel_prepare_page_flip(dev, 0); - intel_finish_page_flip_plane(dev, 0); - } + /* check event from PCH */ + if (de_iir & DE_PCH_EVENT_IVB) { + u32 pch_iir = I915_READ(SDEIIR); - if (de_iir & DE_PLANEB_FLIP_DONE_IVB) { - intel_prepare_page_flip(dev, 1); - intel_finish_page_flip_plane(dev, 1); - } + if (pch_iir & SDE_HOTPLUG_MASK_CPT) + taskqueue_enqueue(dev_priv->tq, + &dev_priv->hotplug_task); + pch_irq_handler(dev, pch_iir); - if (de_iir & DE_PIPEA_VBLANK_IVB) - drm_handle_vblank(dev, 0); + /* clear PCH hotplug event before clear CPU irq */ + I915_WRITE(SDEIIR, pch_iir); + } - if (de_iir & DE_PIPEB_VBLANK_IVB) - drm_handle_vblank(dev, 1); - - /* check event from PCH */ - if (de_iir & DE_PCH_EVENT_IVB) { - if (pch_iir & SDE_HOTPLUG_MASK_CPT) - taskqueue_enqueue(dev_priv->tq, &dev_priv->hotplug_task); - pch_irq_handler(dev); + I915_WRITE(DEIIR, de_iir); } - if (pm_iir & GEN6_PM_DEFERRED_EVENTS) { - mtx_lock(&dev_priv->rps_lock); - if ((dev_priv->pm_iir & pm_iir) != 0) - printf("Missed a PM interrupt\n"); - dev_priv->pm_iir |= pm_iir; - I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir); - POSTING_READ(GEN6_PMIMR); - mtx_unlock(&dev_priv->rps_lock); - taskqueue_enqueue(dev_priv->tq, &dev_priv->rps_task); + pm_iir = I915_READ(GEN6_PMIIR); + if (pm_iir) { + if (pm_iir & GEN6_PM_DEFERRED_EVENTS) + gen6_queue_rps_work(dev_priv, pm_iir); + I915_WRITE(GEN6_PMIIR, pm_iir); } - /* should clear PCH hotplug event before clear CPU irq */ - I915_WRITE(SDEIIR, pch_iir); - I915_WRITE(GTIIR, gt_iir); - I915_WRITE(DEIIR, de_iir); - I915_WRITE(GEN6_PMIIR, pm_iir); - -done: I915_WRITE(DEIER, de_ier); POSTING_READ(DEIER); + + CTR3(KTR_DRM, "ivybridge_irq de %x gt %x pm %x", de_iir, + gt_iir, pm_iir); } +static void ilk_gt_irq_handler(struct drm_device *dev, + struct drm_i915_private *dev_priv, + u32 gt_iir) +{ + if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) + notify_ring(dev, &dev_priv->rings[RCS]); + if (gt_iir & GT_BSD_USER_INTERRUPT) + notify_ring(dev, &dev_priv->rings[VCS]); +} + static void ironlake_irq_handler(void *arg) { struct drm_device *dev = arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; u32 hotplug_mask; -#if 0 - struct drm_i915_master_private *master_priv; -#endif - u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; atomic_inc(&dev_priv->irq_received); - if (IS_GEN6(dev)) - bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT; - /* disable master interrupt before clearing iir */ de_ier = I915_READ(DEIER); I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); POSTING_READ(DEIER); de_iir = I915_READ(DEIIR); gt_iir = I915_READ(GTIIR); pch_iir = I915_READ(SDEIIR); pm_iir = I915_READ(GEN6_PMIIR); CTR4(KTR_DRM, "ironlake_irq de %x gt %x pch %x pm %x", de_iir, gt_iir, pch_iir, pm_iir); if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0)) goto done; if (HAS_PCH_CPT(dev)) hotplug_mask = SDE_HOTPLUG_MASK_CPT; else hotplug_mask = SDE_HOTPLUG_MASK; -#if 0 - if (dev->primary->master) { - master_priv = dev->primary->master->driver_priv; - if (master_priv->sarea_priv) - master_priv->sarea_priv->last_dispatch = - READ_BREADCRUMB(dev_priv); - } -#else - if (dev_priv->sarea_priv) - dev_priv->sarea_priv->last_dispatch = - READ_BREADCRUMB(dev_priv); -#endif + if (IS_GEN5(dev)) + ilk_gt_irq_handler(dev, dev_priv, gt_iir); + else + snb_gt_irq_handler(dev, dev_priv, gt_iir); - if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) - notify_ring(dev, &dev_priv->rings[RCS]); - if (gt_iir & bsd_usr_interrupt) - notify_ring(dev, &dev_priv->rings[VCS]); - if (gt_iir & GT_BLT_USER_INTERRUPT) - notify_ring(dev, &dev_priv->rings[BCS]); - if (de_iir & DE_GSE) { intel_opregion_gse_intr(dev); } if (de_iir & DE_PLANEA_FLIP_DONE) { intel_prepare_page_flip(dev, 0); intel_finish_page_flip_plane(dev, 0); } if (de_iir & DE_PLANEB_FLIP_DONE) { intel_prepare_page_flip(dev, 1); intel_finish_page_flip_plane(dev, 1); } if (de_iir & DE_PIPEA_VBLANK) drm_handle_vblank(dev, 0); if (de_iir & DE_PIPEB_VBLANK) drm_handle_vblank(dev, 1); /* check event from PCH */ if (de_iir & DE_PCH_EVENT) { if (pch_iir & hotplug_mask) taskqueue_enqueue(dev_priv->tq, &dev_priv->hotplug_task); - pch_irq_handler(dev); + pch_irq_handler(dev, pch_iir); } if (de_iir & DE_PCU_EVENT) { I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); i915_handle_rps_change(dev); } - if (pm_iir & GEN6_PM_DEFERRED_EVENTS) { - mtx_lock(&dev_priv->rps_lock); - if ((dev_priv->pm_iir & pm_iir) != 0) - printf("Missed a PM interrupt\n"); - dev_priv->pm_iir |= pm_iir; - I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir); - POSTING_READ(GEN6_PMIMR); - mtx_unlock(&dev_priv->rps_lock); - taskqueue_enqueue(dev_priv->tq, &dev_priv->rps_task); - } + if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) + gen6_queue_rps_work(dev_priv, pm_iir); /* should clear PCH hotplug event before clear CPU irq */ I915_WRITE(SDEIIR, pch_iir); I915_WRITE(GTIIR, gt_iir); I915_WRITE(DEIIR, de_iir); I915_WRITE(GEN6_PMIIR, pm_iir); done: I915_WRITE(DEIER, de_ier); POSTING_READ(DEIER); } /** * i915_error_work_func - do process context error handling work * @work: work struct * * Fire an error uevent so userspace can see that a hang or error * was detected. */ static void i915_error_work_func(void *context, int pending) { drm_i915_private_t *dev_priv = context; struct drm_device *dev = dev_priv->dev; /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); */ if (atomic_load_acq_int(&dev_priv->mm.wedged)) { DRM_DEBUG("i915: resetting chip\n"); /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); */ if (!i915_reset(dev)) { atomic_store_rel_int(&dev_priv->mm.wedged, 0); /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); */ } mtx_lock(&dev_priv->error_completion_lock); dev_priv->error_completion++; wakeup(&dev_priv->error_completion); mtx_unlock(&dev_priv->error_completion_lock); } } +#define pr_err(...) printf(__VA_ARGS__) + static void i915_report_and_clear_eir(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 eir = I915_READ(EIR); int pipe; if (!eir) return; printf("i915: render error detected, EIR: 0x%08x\n", eir); if (IS_G4X(dev)) { if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { u32 ipeir = I915_READ(IPEIR_I965); - printf(" IPEIR: 0x%08x\n", - I915_READ(IPEIR_I965)); - printf(" IPEHR: 0x%08x\n", - I915_READ(IPEHR_I965)); - printf(" INSTDONE: 0x%08x\n", + pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); + pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); + pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE_I965)); - printf(" INSTPS: 0x%08x\n", - I915_READ(INSTPS)); - printf(" INSTDONE1: 0x%08x\n", - I915_READ(INSTDONE1)); - printf(" ACTHD: 0x%08x\n", - I915_READ(ACTHD_I965)); + pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); + pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1)); + pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); I915_WRITE(IPEIR_I965, ipeir); POSTING_READ(IPEIR_I965); } if (eir & GM45_ERROR_PAGE_TABLE) { u32 pgtbl_err = I915_READ(PGTBL_ER); - printf("page table error\n"); - printf(" PGTBL_ER: 0x%08x\n", - pgtbl_err); + pr_err("page table error\n"); + pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); I915_WRITE(PGTBL_ER, pgtbl_err); POSTING_READ(PGTBL_ER); } } if (!IS_GEN2(dev)) { if (eir & I915_ERROR_PAGE_TABLE) { u32 pgtbl_err = I915_READ(PGTBL_ER); - printf("page table error\n"); - printf(" PGTBL_ER: 0x%08x\n", - pgtbl_err); + pr_err("page table error\n"); + pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); I915_WRITE(PGTBL_ER, pgtbl_err); POSTING_READ(PGTBL_ER); } } if (eir & I915_ERROR_MEMORY_REFRESH) { - printf("memory refresh error:\n"); + pr_err("memory refresh error:\n"); for_each_pipe(pipe) - printf("pipe %c stat: 0x%08x\n", + pr_err("pipe %c stat: 0x%08x\n", pipe_name(pipe), I915_READ(PIPESTAT(pipe))); /* pipestat has already been acked */ } if (eir & I915_ERROR_INSTRUCTION) { - printf("instruction error\n"); - printf(" INSTPM: 0x%08x\n", - I915_READ(INSTPM)); + pr_err("instruction error\n"); + pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); if (INTEL_INFO(dev)->gen < 4) { u32 ipeir = I915_READ(IPEIR); - printf(" IPEIR: 0x%08x\n", - I915_READ(IPEIR)); - printf(" IPEHR: 0x%08x\n", - I915_READ(IPEHR)); - printf(" INSTDONE: 0x%08x\n", - I915_READ(INSTDONE)); - printf(" ACTHD: 0x%08x\n", - I915_READ(ACTHD)); + pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); + pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); + pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE)); + pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); I915_WRITE(IPEIR, ipeir); POSTING_READ(IPEIR); } else { u32 ipeir = I915_READ(IPEIR_I965); - printf(" IPEIR: 0x%08x\n", - I915_READ(IPEIR_I965)); - printf(" IPEHR: 0x%08x\n", - I915_READ(IPEHR_I965)); - printf(" INSTDONE: 0x%08x\n", + pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); + pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); + pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE_I965)); - printf(" INSTPS: 0x%08x\n", - I915_READ(INSTPS)); - printf(" INSTDONE1: 0x%08x\n", - I915_READ(INSTDONE1)); - printf(" ACTHD: 0x%08x\n", - I915_READ(ACTHD_I965)); + pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); + pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1)); + pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); I915_WRITE(IPEIR_I965, ipeir); POSTING_READ(IPEIR_I965); } } I915_WRITE(EIR, eir); POSTING_READ(EIR); eir = I915_READ(EIR); if (eir) { /* * some errors might have become stuck, * mask them. */ DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); I915_WRITE(EMR, I915_READ(EMR) | eir); I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); } } /** * i915_handle_error - handle an error interrupt * @dev: drm device * * Do some basic checking of regsiter state at error interrupt time and * dump it to the syslog. Also call i915_capture_error_state() to make * sure we get a record and make it available in debugfs. Fire a uevent * so userspace knows something bad happened (should trigger collection * of a ring dump etc.). */ void i915_handle_error(struct drm_device *dev, bool wedged) { struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; + int i; i915_capture_error_state(dev); i915_report_and_clear_eir(dev); if (wedged) { mtx_lock(&dev_priv->error_completion_lock); dev_priv->error_completion = 0; dev_priv->mm.wedged = 1; /* unlock acts as rel barrier for store to wedged */ mtx_unlock(&dev_priv->error_completion_lock); /* * Wakeup waiting processes so they don't hang */ - mtx_lock(&dev_priv->rings[RCS].irq_lock); - wakeup(&dev_priv->rings[RCS]); - mtx_unlock(&dev_priv->rings[RCS].irq_lock); - if (HAS_BSD(dev)) { - mtx_lock(&dev_priv->rings[VCS].irq_lock); - wakeup(&dev_priv->rings[VCS]); - mtx_unlock(&dev_priv->rings[VCS].irq_lock); + for_each_ring(ring, dev_priv, i) { + mtx_lock(&dev_priv->irq_lock); + wakeup(ring); + mtx_unlock(&dev_priv->irq_lock); } - if (HAS_BLT(dev)) { - mtx_lock(&dev_priv->rings[BCS].irq_lock); - wakeup(&dev_priv->rings[BCS]); - mtx_unlock(&dev_priv->rings[BCS].irq_lock); - } } taskqueue_enqueue(dev_priv->tq, &dev_priv->error_task); } static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_i915_gem_object *obj; struct intel_unpin_work *work; bool stall_detected; /* Ignore early vblank irqs */ if (intel_crtc == NULL) return; mtx_lock(&dev->event_lock); work = intel_crtc->unpin_work; if (work == NULL || work->pending || !work->enable_stall_check) { /* Either the pending flip IRQ arrived, or we're too early. Don't check */ mtx_unlock(&dev->event_lock); return; } /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ obj = work->pending_flip_obj; if (INTEL_INFO(dev)->gen >= 4) { int dspsurf = DSPSURF(intel_crtc->plane); - stall_detected = I915_READ(dspsurf) == obj->gtt_offset; + stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == + obj->gtt_offset; } else { int dspaddr = DSPADDR(intel_crtc->plane); stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + crtc->y * crtc->fb->pitches[0] + crtc->x * crtc->fb->bits_per_pixel/8); } mtx_unlock(&dev->event_lock); if (stall_detected) { DRM_DEBUG("Pageflip stall detected\n"); intel_prepare_page_flip(dev, intel_crtc->plane); } } -static void -i915_driver_irq_handler(void *arg) -{ - struct drm_device *dev = (struct drm_device *)arg; - drm_i915_private_t *dev_priv = (drm_i915_private_t *)dev->dev_private; -#if 0 - struct drm_i915_master_private *master_priv; -#endif - u32 iir, new_iir; - u32 pipe_stats[I915_MAX_PIPES]; - u32 vblank_status; - int vblank = 0; - int irq_received; - int pipe; - bool blc_event = false; - - atomic_inc(&dev_priv->irq_received); - - iir = I915_READ(IIR); - - CTR1(KTR_DRM, "driver_irq_handler %x", iir); - - if (INTEL_INFO(dev)->gen >= 4) - vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS; - else - vblank_status = PIPE_VBLANK_INTERRUPT_STATUS; - - for (;;) { - irq_received = iir != 0; - - /* Can't rely on pipestat interrupt bit in iir as it might - * have been cleared after the pipestat interrupt was received. - * It doesn't set the bit in iir again, but it still produces - * interrupts (for non-MSI). - */ - mtx_lock(&dev_priv->irq_lock); - if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - i915_handle_error(dev, false); - - for_each_pipe(pipe) { - int reg = PIPESTAT(pipe); - pipe_stats[pipe] = I915_READ(reg); - - /* - * Clear the PIPE*STAT regs before the IIR - */ - if (pipe_stats[pipe] & 0x8000ffff) { - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) - DRM_DEBUG("pipe %c underrun\n", - pipe_name(pipe)); - I915_WRITE(reg, pipe_stats[pipe]); - irq_received = 1; - } - } - mtx_unlock(&dev_priv->irq_lock); - - if (!irq_received) - break; - - /* Consume port. Then clear IIR or we'll miss events */ - if ((I915_HAS_HOTPLUG(dev)) && - (iir & I915_DISPLAY_PORT_INTERRUPT)) { - u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); - - DRM_DEBUG("i915: hotplug event received, stat 0x%08x\n", - hotplug_status); - if (hotplug_status & dev_priv->hotplug_supported_mask) - taskqueue_enqueue(dev_priv->tq, - &dev_priv->hotplug_task); - - I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); - I915_READ(PORT_HOTPLUG_STAT); - } - - I915_WRITE(IIR, iir); - new_iir = I915_READ(IIR); /* Flush posted writes */ - -#if 0 - if (dev->primary->master) { - master_priv = dev->primary->master->driver_priv; - if (master_priv->sarea_priv) - master_priv->sarea_priv->last_dispatch = - READ_BREADCRUMB(dev_priv); - } -#else - if (dev_priv->sarea_priv) - dev_priv->sarea_priv->last_dispatch = - READ_BREADCRUMB(dev_priv); -#endif - - if (iir & I915_USER_INTERRUPT) - notify_ring(dev, &dev_priv->rings[RCS]); - if (iir & I915_BSD_USER_INTERRUPT) - notify_ring(dev, &dev_priv->rings[VCS]); - - if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { - intel_prepare_page_flip(dev, 0); - if (dev_priv->flip_pending_is_done) - intel_finish_page_flip_plane(dev, 0); - } - - if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) { - intel_prepare_page_flip(dev, 1); - if (dev_priv->flip_pending_is_done) - intel_finish_page_flip_plane(dev, 1); - } - - for_each_pipe(pipe) { - if (pipe_stats[pipe] & vblank_status && - drm_handle_vblank(dev, pipe)) { - vblank++; - if (!dev_priv->flip_pending_is_done) { - i915_pageflip_stall_check(dev, pipe); - intel_finish_page_flip(dev, pipe); - } - } - - if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) - blc_event = true; - } - - - if (blc_event || (iir & I915_ASLE_INTERRUPT)) { - intel_opregion_asle_intr(dev); - } - - /* With MSI, interrupts are only generated when iir - * transitions from zero to nonzero. If another bit got - * set while we were handling the existing iir bits, then - * we would never get another interrupt. - * - * This is fine on non-MSI as well, as if we hit this path - * we avoid exiting the interrupt handler only to generate - * another one. - * - * Note that for MSI this could cause a stray interrupt report - * if an interrupt landed in the time between writing IIR and - * the posting read. This should be rare enough to never - * trigger the 99% of 100,000 interrupts test for disabling - * stray interrupts. - */ - iir = new_iir; - } -} - -static int i915_emit_irq(struct drm_device * dev) -{ - drm_i915_private_t *dev_priv = dev->dev_private; -#if 0 - struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; -#endif - - i915_kernel_lost_context(dev); - - DRM_DEBUG("i915: emit_irq\n"); - - dev_priv->counter++; - if (dev_priv->counter > 0x7FFFFFFFUL) - dev_priv->counter = 1; -#if 0 - if (master_priv->sarea_priv) - master_priv->sarea_priv->last_enqueue = dev_priv->counter; -#else - if (dev_priv->sarea_priv) - dev_priv->sarea_priv->last_enqueue = dev_priv->counter; -#endif - - if (BEGIN_LP_RING(4) == 0) { - OUT_RING(MI_STORE_DWORD_INDEX); - OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - OUT_RING(dev_priv->counter); - OUT_RING(MI_USER_INTERRUPT); - ADVANCE_LP_RING(); - } - - return dev_priv->counter; -} - -static int i915_wait_irq(struct drm_device * dev, int irq_nr) -{ - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; -#if 0 - struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; -#endif - int ret; - struct intel_ring_buffer *ring = LP_RING(dev_priv); - - DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, - READ_BREADCRUMB(dev_priv)); - -#if 0 - if (READ_BREADCRUMB(dev_priv) >= irq_nr) { - if (master_priv->sarea_priv) - master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); - return 0; - } - - if (master_priv->sarea_priv) - master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; -#else - if (READ_BREADCRUMB(dev_priv) >= irq_nr) { - if (dev_priv->sarea_priv) { - dev_priv->sarea_priv->last_dispatch = - READ_BREADCRUMB(dev_priv); - } - return 0; - } - - if (dev_priv->sarea_priv) - dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; -#endif - - ret = 0; - mtx_lock(&ring->irq_lock); - if (ring->irq_get(ring)) { - DRM_UNLOCK(dev); - while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) { - ret = -msleep(ring, &ring->irq_lock, PCATCH, - "915wtq", 3 * hz); - } - ring->irq_put(ring); - mtx_unlock(&ring->irq_lock); - DRM_LOCK(dev); - } else { - mtx_unlock(&ring->irq_lock); - if (_intel_wait_for(dev, READ_BREADCRUMB(dev_priv) >= irq_nr, - 3000, 1, "915wir")) - ret = -EBUSY; - } - - if (ret == -EBUSY) { - DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", - READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); - } - - return ret; -} - -/* Needs the lock as it touches the ring. - */ -int i915_irq_emit(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - drm_i915_irq_emit_t *emit = data; - int result; - - if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { - DRM_ERROR("called with no initialization\n"); - return -EINVAL; - } - - RING_LOCK_TEST_WITH_RETURN(dev, file_priv); - - DRM_LOCK(dev); - result = i915_emit_irq(dev); - DRM_UNLOCK(dev); - - if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { - DRM_ERROR("copy_to_user\n"); - return -EFAULT; - } - - return 0; -} - -/* Doesn't need the hardware lock. - */ -int i915_irq_wait(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - drm_i915_irq_wait_t *irqwait = data; - - if (!dev_priv) { - DRM_ERROR("called with no initialization\n"); - return -EINVAL; - } - - return i915_wait_irq(dev, irqwait->irq_seq); -} - /* Called from drm generic code, passed 'crtc' which * we use as a pipe index */ static int i915_enable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; if (!i915_pipe_enabled(dev, pipe)) return -EINVAL; mtx_lock(&dev_priv->irq_lock); if (INTEL_INFO(dev)->gen >= 4) i915_enable_pipestat(dev_priv, pipe, PIPE_START_VBLANK_INTERRUPT_ENABLE); else i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE); /* maintain vblank delivery even in deep C-states */ if (dev_priv->info->gen == 3) - I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16); + I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); mtx_unlock(&dev_priv->irq_lock); CTR1(KTR_DRM, "i915_enable_vblank %d", pipe); return 0; } static int ironlake_enable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; if (!i915_pipe_enabled(dev, pipe)) return -EINVAL; mtx_lock(&dev_priv->irq_lock); ironlake_enable_display_irq(dev_priv, (pipe == 0) ? DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); mtx_unlock(&dev_priv->irq_lock); CTR1(KTR_DRM, "ironlake_enable_vblank %d", pipe); return 0; } static int ivybridge_enable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; if (!i915_pipe_enabled(dev, pipe)) return -EINVAL; mtx_lock(&dev_priv->irq_lock); - ironlake_enable_display_irq(dev_priv, (pipe == 0) ? - DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB); + ironlake_enable_display_irq(dev_priv, + DE_PIPEA_VBLANK_IVB << (5 * pipe)); mtx_unlock(&dev_priv->irq_lock); CTR1(KTR_DRM, "ivybridge_enable_vblank %d", pipe); return 0; } +static int valleyview_enable_vblank(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 dpfl, imr; + if (!i915_pipe_enabled(dev, pipe)) + return -EINVAL; + + mtx_lock(&dev_priv->irq_lock); + dpfl = I915_READ(VLV_DPFLIPSTAT); + imr = I915_READ(VLV_IMR); + if (pipe == 0) { + dpfl |= PIPEA_VBLANK_INT_EN; + imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; + } else { + dpfl |= PIPEA_VBLANK_INT_EN; + imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; + } + I915_WRITE(VLV_DPFLIPSTAT, dpfl); + I915_WRITE(VLV_IMR, imr); + mtx_unlock(&dev_priv->irq_lock); + + return 0; +} + /* Called from drm generic code, passed 'crtc' which * we use as a pipe index */ static void i915_disable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; mtx_lock(&dev_priv->irq_lock); if (dev_priv->info->gen == 3) - I915_WRITE(INSTPM, - INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS); + I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE | PIPE_START_VBLANK_INTERRUPT_ENABLE); mtx_unlock(&dev_priv->irq_lock); CTR1(KTR_DRM, "i915_disable_vblank %d", pipe); } static void ironlake_disable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; mtx_lock(&dev_priv->irq_lock); ironlake_disable_display_irq(dev_priv, (pipe == 0) ? DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); mtx_unlock(&dev_priv->irq_lock); CTR1(KTR_DRM, "ironlake_disable_vblank %d", pipe); } static void ivybridge_disable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; mtx_lock(&dev_priv->irq_lock); - ironlake_disable_display_irq(dev_priv, (pipe == 0) ? - DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB); + ironlake_disable_display_irq(dev_priv, + DE_PIPEA_VBLANK_IVB << (pipe * 5)); mtx_unlock(&dev_priv->irq_lock); CTR1(KTR_DRM, "ivybridge_disable_vblank %d", pipe); } -/* Set the vblank monitor pipe - */ -int i915_vblank_pipe_set(struct drm_device *dev, void *data, - struct drm_file *file_priv) +static void valleyview_disable_vblank(struct drm_device *dev, int pipe) { - drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 dpfl, imr; - if (!dev_priv) { - DRM_ERROR("called with no initialization\n"); - return -EINVAL; + mtx_lock(&dev_priv->irq_lock); + dpfl = I915_READ(VLV_DPFLIPSTAT); + imr = I915_READ(VLV_IMR); + if (pipe == 0) { + dpfl &= ~PIPEA_VBLANK_INT_EN; + imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; + } else { + dpfl &= ~PIPEB_VBLANK_INT_EN; + imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; } - - return 0; + I915_WRITE(VLV_IMR, imr); + I915_WRITE(VLV_DPFLIPSTAT, dpfl); + mtx_unlock(&dev_priv->irq_lock); } -int i915_vblank_pipe_get(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - drm_i915_vblank_pipe_t *pipe = data; - - if (!dev_priv) { - DRM_ERROR("called with no initialization\n"); - return -EINVAL; - } - - pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; - - return 0; -} - -/** - * Schedule buffer swap at given vertical blank. - */ -int i915_vblank_swap(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - /* The delayed swap mechanism was fundamentally racy, and has been - * removed. The model was that the client requested a delayed flip/swap - * from the kernel, then waited for vblank before continuing to perform - * rendering. The problem was that the kernel might wake the client - * up before it dispatched the vblank swap (since the lock has to be - * held while touching the ringbuffer), in which case the client would - * clear and start the next frame before the swap occurred, and - * flicker would occur in addition to likely missing the vblank. - * - * In the absence of this ioctl, userland falls back to a correct path - * of waiting for a vblank, then dispatching the swap on its own. - * Context switching to userland and back is plenty fast enough for - * meeting the requirements of vblank swapping. - */ - return -EINVAL; -} - static u32 ring_last_seqno(struct intel_ring_buffer *ring) { if (list_empty(&ring->request_list)) return (0); else return (list_entry(ring->request_list.prev, struct drm_i915_gem_request, list)->seqno); } static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) { if (list_empty(&ring->request_list) || i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) { /* Issue a wake-up to catch stuck h/w. */ - if (ring->waiting_seqno) { - DRM_ERROR( -"Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n", - ring->name, - ring->waiting_seqno, - ring->get_seqno(ring)); + sleepq_lock(ring); + if (sleepq_sleepcnt(ring, 0) != 0) { + sleepq_release(ring); + DRM_ERROR("Hangcheck timer elapsed... %s idle\n", + ring->name); wakeup(ring); *err = true; - } + } else + sleepq_release(ring); return true; } return false; } static bool kick_ring(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 tmp = I915_READ_CTL(ring); if (tmp & RING_WAIT) { DRM_ERROR("Kicking stuck wait on %s\n", ring->name); I915_WRITE_CTL(ring, tmp); return true; } return false; } +static bool i915_hangcheck_hung(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + if (dev_priv->hangcheck_count++ > 1) { + bool hung = true; + + DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); + i915_handle_error(dev, true); + + if (!IS_GEN2(dev)) { + struct intel_ring_buffer *ring; + int i; + + /* Is the chip hanging on a WAIT_FOR_EVENT? + * If so we can simply poke the RB_WAIT bit + * and break the hang. This should work on + * all but the second generation chipsets. + */ + for_each_ring(ring, dev_priv, i) + hung &= !kick_ring(ring); + } + + return hung; + } + + return false; +} + /** * This is called when the chip hasn't reported back with completed * batchbuffers in a long time. The first time this is called we simply record * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses * again, we assume the chip is wedged and try to fix it. */ void i915_hangcheck_elapsed(void *context) { struct drm_device *dev = (struct drm_device *)context; drm_i915_private_t *dev_priv = dev->dev_private; - uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt; - bool err = false; + uint32_t acthd[I915_NUM_RINGS], instdone, instdone1; + struct intel_ring_buffer *ring; + bool err = false, idle; + int i; if (!i915_enable_hangcheck) return; + memset(acthd, 0, sizeof(acthd)); + idle = true; + for_each_ring(ring, dev_priv, i) { + idle &= i915_hangcheck_ring_idle(ring, &err); + acthd[i] = intel_ring_get_active_head(ring); + } + /* If all work is done then ACTHD clearly hasn't advanced. */ - if (i915_hangcheck_ring_idle(&dev_priv->rings[RCS], &err) && - i915_hangcheck_ring_idle(&dev_priv->rings[VCS], &err) && - i915_hangcheck_ring_idle(&dev_priv->rings[BCS], &err)) { - dev_priv->hangcheck_count = 0; - if (err) + if (idle) { + if (err) { + if (i915_hangcheck_hung(dev)) + return; + goto repeat; + } + + dev_priv->hangcheck_count = 0; return; } if (INTEL_INFO(dev)->gen < 4) { instdone = I915_READ(INSTDONE); instdone1 = 0; } else { instdone = I915_READ(INSTDONE_I965); instdone1 = I915_READ(INSTDONE1); } - acthd = intel_ring_get_active_head(&dev_priv->rings[RCS]); - acthd_bsd = HAS_BSD(dev) ? - intel_ring_get_active_head(&dev_priv->rings[VCS]) : 0; - acthd_blt = HAS_BLT(dev) ? - intel_ring_get_active_head(&dev_priv->rings[BCS]) : 0; - - if (dev_priv->last_acthd == acthd && - dev_priv->last_acthd_bsd == acthd_bsd && - dev_priv->last_acthd_blt == acthd_blt && + if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 && dev_priv->last_instdone == instdone && dev_priv->last_instdone1 == instdone1) { - if (dev_priv->hangcheck_count++ > 1) { - DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); - i915_handle_error(dev, true); - - if (!IS_GEN2(dev)) { - /* Is the chip hanging on a WAIT_FOR_EVENT? - * If so we can simply poke the RB_WAIT bit - * and break the hang. This should work on - * all but the second generation chipsets. - */ - if (kick_ring(&dev_priv->rings[RCS])) - goto repeat; - - if (HAS_BSD(dev) && - kick_ring(&dev_priv->rings[VCS])) - goto repeat; - - if (HAS_BLT(dev) && - kick_ring(&dev_priv->rings[BCS])) - goto repeat; - } - + if (i915_hangcheck_hung(dev)) return; - } } else { dev_priv->hangcheck_count = 0; - dev_priv->last_acthd = acthd; - dev_priv->last_acthd_bsd = acthd_bsd; - dev_priv->last_acthd_blt = acthd_blt; + memcpy(dev_priv->last_acthd, acthd, sizeof(acthd)); dev_priv->last_instdone = instdone; dev_priv->last_instdone1 = instdone1; } repeat: /* Reset timer case chip hangs without another request being added */ callout_schedule(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD); } /* drm_dma.h hooks */ static void ironlake_irq_preinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; atomic_set(&dev_priv->irq_received, 0); - TASK_INIT(&dev_priv->hotplug_task, 0, i915_hotplug_work_func, - dev->dev_private); - TASK_INIT(&dev_priv->error_task, 0, i915_error_work_func, - dev->dev_private); - TASK_INIT(&dev_priv->rps_task, 0, gen6_pm_rps_work_func, - dev->dev_private); - I915_WRITE(HWSTAM, 0xeffe); /* XXX hotplug from PCH */ I915_WRITE(DEIMR, 0xffffffff); I915_WRITE(DEIER, 0x0); POSTING_READ(DEIER); /* and GT */ I915_WRITE(GTIMR, 0xffffffff); I915_WRITE(GTIER, 0x0); POSTING_READ(GTIER); /* south display irq */ I915_WRITE(SDEIMR, 0xffffffff); I915_WRITE(SDEIER, 0x0); POSTING_READ(SDEIER); } +static void valleyview_irq_preinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + + atomic_set(&dev_priv->irq_received, 0); + + /* VLV magic */ + I915_WRITE(VLV_IMR, 0); + I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); + I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); + I915_WRITE(RING_IMR(BLT_RING_BASE), 0); + + /* and GT */ + I915_WRITE(GTIIR, I915_READ(GTIIR)); + I915_WRITE(GTIIR, I915_READ(GTIIR)); + I915_WRITE(GTIMR, 0xffffffff); + I915_WRITE(GTIER, 0x0); + POSTING_READ(GTIER); + + I915_WRITE(DPINVGTT, 0xff); + + I915_WRITE(PORT_HOTPLUG_EN, 0); + I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), 0xffff); + I915_WRITE(VLV_IIR, 0xffffffff); + I915_WRITE(VLV_IMR, 0xffffffff); + I915_WRITE(VLV_IER, 0x0); + POSTING_READ(VLV_IER); +} + /* * Enable digital hotplug on the PCH, and configure the DP short pulse * duration to 2ms (which is the minimum in the Display Port spec) * * This register is the same on all known PCH chips. */ static void ironlake_enable_pch_hotplug(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u32 hotplug; hotplug = I915_READ(PCH_PORT_HOTPLUG); hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; I915_WRITE(PCH_PORT_HOTPLUG, hotplug); } static int ironlake_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; /* enable kind of interrupts always enabled */ u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; u32 render_irqs; u32 hotplug_mask; - dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; dev_priv->irq_mask = ~display_mask; /* should always can generate irq */ I915_WRITE(DEIIR, I915_READ(DEIIR)); I915_WRITE(DEIMR, dev_priv->irq_mask); I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); POSTING_READ(DEIER); dev_priv->gt_irq_mask = ~0; I915_WRITE(GTIIR, I915_READ(GTIIR)); I915_WRITE(GTIMR, dev_priv->gt_irq_mask); if (IS_GEN6(dev)) render_irqs = GT_USER_INTERRUPT | - GT_GEN6_BSD_USER_INTERRUPT | - GT_BLT_USER_INTERRUPT; + GEN6_BSD_USER_INTERRUPT | + GEN6_BLITTER_USER_INTERRUPT; else render_irqs = GT_USER_INTERRUPT | GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; I915_WRITE(GTIER, render_irqs); POSTING_READ(GTIER); if (HAS_PCH_CPT(dev)) { hotplug_mask = (SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT); } else { hotplug_mask = (SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG | SDE_AUX_MASK); } dev_priv->pch_irq_mask = ~hotplug_mask; I915_WRITE(SDEIIR, I915_READ(SDEIIR)); I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); I915_WRITE(SDEIER, hotplug_mask); POSTING_READ(SDEIER); ironlake_enable_pch_hotplug(dev); if (IS_IRONLAKE_M(dev)) { /* Clear & enable PCU event interrupts */ I915_WRITE(DEIIR, DE_PCU_EVENT); I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); } return 0; } static int ivybridge_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; /* enable kind of interrupts always enabled */ - u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | - DE_PCH_EVENT_IVB | DE_PLANEA_FLIP_DONE_IVB | - DE_PLANEB_FLIP_DONE_IVB; + u32 display_mask = + DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | + DE_PLANEC_FLIP_DONE_IVB | + DE_PLANEB_FLIP_DONE_IVB | + DE_PLANEA_FLIP_DONE_IVB; u32 render_irqs; u32 hotplug_mask; - dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; dev_priv->irq_mask = ~display_mask; /* should always can generate irq */ I915_WRITE(DEIIR, I915_READ(DEIIR)); I915_WRITE(DEIMR, dev_priv->irq_mask); - I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK_IVB | - DE_PIPEB_VBLANK_IVB); + I915_WRITE(DEIER, + display_mask | + DE_PIPEC_VBLANK_IVB | + DE_PIPEB_VBLANK_IVB | + DE_PIPEA_VBLANK_IVB); POSTING_READ(DEIER); dev_priv->gt_irq_mask = ~0; I915_WRITE(GTIIR, I915_READ(GTIIR)); I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - render_irqs = GT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT | - GT_BLT_USER_INTERRUPT; + render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | + GEN6_BLITTER_USER_INTERRUPT; I915_WRITE(GTIER, render_irqs); POSTING_READ(GTIER); hotplug_mask = (SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT); dev_priv->pch_irq_mask = ~hotplug_mask; I915_WRITE(SDEIIR, I915_READ(SDEIIR)); I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); I915_WRITE(SDEIER, hotplug_mask); POSTING_READ(SDEIER); ironlake_enable_pch_hotplug(dev); return 0; } +static int valleyview_irq_postinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 render_irqs; + u32 enable_mask; + u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); + u16 msid; + + enable_mask = I915_DISPLAY_PORT_INTERRUPT; + enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | + I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; + + dev_priv->irq_mask = ~enable_mask; + + dev_priv->pipestat[0] = 0; + dev_priv->pipestat[1] = 0; + + /* Hack for broken MSIs on VLV */ + pci_write_config(dev->device, 0x94, 0xfee00000, 4); + msid = pci_read_config(dev->device, 0x98, 2); + msid &= 0xff; /* mask out delivery bits */ + msid |= (1<<14); + pci_write_config(dev->device, 0x98, msid, 2); + + I915_WRITE(VLV_IMR, dev_priv->irq_mask); + I915_WRITE(VLV_IER, enable_mask); + I915_WRITE(VLV_IIR, 0xffffffff); + I915_WRITE(PIPESTAT(0), 0xffff); + I915_WRITE(PIPESTAT(1), 0xffff); + POSTING_READ(VLV_IER); + + I915_WRITE(VLV_IIR, 0xffffffff); + I915_WRITE(VLV_IIR, 0xffffffff); + + render_irqs = GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT | + GT_GEN6_BLT_CS_ERROR_INTERRUPT | + GT_GEN6_BLT_USER_INTERRUPT | + GT_GEN6_BSD_USER_INTERRUPT | + GT_GEN6_BSD_CS_ERROR_INTERRUPT | + GT_GEN7_L3_PARITY_ERROR_INTERRUPT | + GT_PIPE_NOTIFY | + GT_RENDER_CS_ERROR_INTERRUPT | + GT_SYNC_STATUS | + GT_USER_INTERRUPT; + + dev_priv->gt_irq_mask = ~render_irqs; + + I915_WRITE(GTIIR, I915_READ(GTIIR)); + I915_WRITE(GTIIR, I915_READ(GTIIR)); + I915_WRITE(GTIMR, 0); + I915_WRITE(GTIER, render_irqs); + POSTING_READ(GTIER); + + /* ack & enable invalid PTE error interrupts */ +#if 0 /* FIXME: add support to irq handler for checking these bits */ + I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); + I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); +#endif + + I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); +#if 0 /* FIXME: check register definitions; some have moved */ + /* Note HDMI and DP share bits */ + if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) + hotplug_en |= HDMIB_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) + hotplug_en |= HDMIC_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) + hotplug_en |= HDMID_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) + hotplug_en |= SDVOC_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) + hotplug_en |= SDVOB_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { + hotplug_en |= CRT_HOTPLUG_INT_EN; + hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; + } +#endif + + I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); + + return 0; +} + +static void valleyview_irq_uninstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + + if (!dev_priv) + return; + + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), 0xffff); + + I915_WRITE(HWSTAM, 0xffffffff); + I915_WRITE(PORT_HOTPLUG_EN, 0); + I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), 0xffff); + I915_WRITE(VLV_IIR, 0xffffffff); + I915_WRITE(VLV_IMR, 0xffffffff); + I915_WRITE(VLV_IER, 0x0); + POSTING_READ(VLV_IER); +} + static void -i915_driver_irq_preinstall(struct drm_device * dev) +ironlake_irq_uninstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + + if (dev_priv == NULL) + return; + + I915_WRITE(HWSTAM, 0xffffffff); + + I915_WRITE(DEIMR, 0xffffffff); + I915_WRITE(DEIER, 0x0); + I915_WRITE(DEIIR, I915_READ(DEIIR)); + + I915_WRITE(GTIMR, 0xffffffff); + I915_WRITE(GTIER, 0x0); + I915_WRITE(GTIIR, I915_READ(GTIIR)); + + I915_WRITE(SDEIMR, 0xffffffff); + I915_WRITE(SDEIER, 0x0); + I915_WRITE(SDEIIR, I915_READ(SDEIIR)); +} + +static void i8xx_irq_preinstall(struct drm_device * dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int pipe; + + atomic_set(&dev_priv->irq_received, 0); + + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), 0); + I915_WRITE16(IMR, 0xffff); + I915_WRITE16(IER, 0x0); + POSTING_READ16(IER); +} +static int i8xx_irq_postinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + + dev_priv->pipestat[0] = 0; + dev_priv->pipestat[1] = 0; + + I915_WRITE16(EMR, + ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); + + /* Unmask the interrupts that we always want on. */ + dev_priv->irq_mask = + ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | + I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); + I915_WRITE16(IMR, dev_priv->irq_mask); + + I915_WRITE16(IER, + I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | + I915_USER_INTERRUPT); + POSTING_READ16(IER); + + return 0; +} + +static void i8xx_irq_handler(void *arg) +{ + struct drm_device *dev = (struct drm_device *) arg; + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u16 iir, new_iir; + u32 pipe_stats[2]; + int irq_received; + int pipe; + u16 flip_mask = + I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; + + atomic_inc(&dev_priv->irq_received); + + iir = I915_READ16(IIR); + if (iir == 0) + return; + + while (iir & ~flip_mask) { + /* Can't rely on pipestat interrupt bit in iir as it might + * have been cleared after the pipestat interrupt was received. + * It doesn't set the bit in iir again, but it still produces + * interrupts (for non-MSI). + */ + mtx_lock(&dev_priv->irq_lock); + if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) + i915_handle_error(dev, false); + + for_each_pipe(pipe) { + int reg = PIPESTAT(pipe); + pipe_stats[pipe] = I915_READ(reg); + + /* + * Clear the PIPE*STAT regs before the IIR + */ + if (pipe_stats[pipe] & 0x8000ffff) { + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + DRM_DEBUG_DRIVER("pipe %c underrun\n", + pipe_name(pipe)); + I915_WRITE(reg, pipe_stats[pipe]); + irq_received = 1; + } + } + mtx_unlock(&dev_priv->irq_lock); + + I915_WRITE16(IIR, iir & ~flip_mask); + new_iir = I915_READ16(IIR); /* Flush posted writes */ + + i915_update_dri1_breadcrumb(dev); + + if (iir & I915_USER_INTERRUPT) + notify_ring(dev, &dev_priv->rings[RCS]); + + if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && + drm_handle_vblank(dev, 0)) { + if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { + intel_prepare_page_flip(dev, 0); + intel_finish_page_flip(dev, 0); + flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT; + } + } + + if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && + drm_handle_vblank(dev, 1)) { + if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) { + intel_prepare_page_flip(dev, 1); + intel_finish_page_flip(dev, 1); + flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; + } + } + + iir = new_iir; + } +} + +static void i8xx_irq_uninstall(struct drm_device * dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + + for_each_pipe(pipe) { + /* Clear enable bits; then clear status bits */ + I915_WRITE(PIPESTAT(pipe), 0); + I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); + } + I915_WRITE16(IMR, 0xffff); + I915_WRITE16(IER, 0x0); + I915_WRITE16(IIR, I915_READ16(IIR)); +} + +static void i915_irq_preinstall(struct drm_device * dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + atomic_set(&dev_priv->irq_received, 0); - TASK_INIT(&dev_priv->hotplug_task, 0, i915_hotplug_work_func, - dev->dev_private); - TASK_INIT(&dev_priv->error_task, 0, i915_error_work_func, - dev->dev_private); - TASK_INIT(&dev_priv->rps_task, 0, gen6_pm_rps_work_func, - dev->dev_private); + if (I915_HAS_HOTPLUG(dev)) { + I915_WRITE(PORT_HOTPLUG_EN, 0); + I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + } + I915_WRITE16(HWSTAM, 0xeffe); + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), 0); + I915_WRITE(IMR, 0xffffffff); + I915_WRITE(IER, 0x0); + POSTING_READ(IER); +} + +static int i915_irq_postinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 enable_mask; + + dev_priv->pipestat[0] = 0; + dev_priv->pipestat[1] = 0; + + I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); + + /* Unmask the interrupts that we always want on. */ + dev_priv->irq_mask = + ~(I915_ASLE_INTERRUPT | + I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | + I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); + + enable_mask = + I915_ASLE_INTERRUPT | + I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | + I915_USER_INTERRUPT; + if (I915_HAS_HOTPLUG(dev)) { + /* Enable in IER... */ + enable_mask |= I915_DISPLAY_PORT_INTERRUPT; + /* and unmask in IMR */ + dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; + } + + I915_WRITE(IMR, dev_priv->irq_mask); + I915_WRITE(IER, enable_mask); + POSTING_READ(IER); + + if (I915_HAS_HOTPLUG(dev)) { + u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); + + if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) + hotplug_en |= HDMIB_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) + hotplug_en |= HDMIC_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) + hotplug_en |= HDMID_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) + hotplug_en |= SDVOC_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) + hotplug_en |= SDVOB_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { + hotplug_en |= CRT_HOTPLUG_INT_EN; + hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; + } + + /* Ignore TV since it's buggy */ + + I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); + } + + intel_opregion_enable_asle(dev); + + return 0; +} + +static void i915_irq_handler(void *arg) +{ + struct drm_device *dev = (struct drm_device *) arg; + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; + u32 flip_mask = + I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; + u32 flip[2] = { + I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT, + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT + }; + int pipe; + + atomic_inc(&dev_priv->irq_received); + + iir = I915_READ(IIR); + do { + bool irq_received = (iir & ~flip_mask) != 0; + bool blc_event = false; + + /* Can't rely on pipestat interrupt bit in iir as it might + * have been cleared after the pipestat interrupt was received. + * It doesn't set the bit in iir again, but it still produces + * interrupts (for non-MSI). + */ + mtx_lock(&dev_priv->irq_lock); + if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) + i915_handle_error(dev, false); + + for_each_pipe(pipe) { + int reg = PIPESTAT(pipe); + pipe_stats[pipe] = I915_READ(reg); + + /* Clear the PIPE*STAT regs before the IIR */ + if (pipe_stats[pipe] & 0x8000ffff) { + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + DRM_DEBUG_DRIVER("pipe %c underrun\n", + pipe_name(pipe)); + I915_WRITE(reg, pipe_stats[pipe]); + irq_received = true; + } + } + mtx_unlock(&dev_priv->irq_lock); + + if (!irq_received) + break; + + /* Consume port. Then clear IIR or we'll miss events */ + if ((I915_HAS_HOTPLUG(dev)) && + (iir & I915_DISPLAY_PORT_INTERRUPT)) { + u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); + + DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", + hotplug_status); + if (hotplug_status & dev_priv->hotplug_supported_mask) + taskqueue_enqueue(dev_priv->tq, + &dev_priv->hotplug_task); + + I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); + POSTING_READ(PORT_HOTPLUG_STAT); + } + + I915_WRITE(IIR, iir & ~flip_mask); + new_iir = I915_READ(IIR); /* Flush posted writes */ + + if (iir & I915_USER_INTERRUPT) + notify_ring(dev, &dev_priv->rings[RCS]); + + for_each_pipe(pipe) { + int plane = pipe; + if (IS_MOBILE(dev)) + plane = !plane; + if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && + drm_handle_vblank(dev, pipe)) { + if (iir & flip[plane]) { + intel_prepare_page_flip(dev, plane); + intel_finish_page_flip(dev, pipe); + flip_mask &= ~flip[plane]; + } + } + + if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) + blc_event = true; + } + + if (blc_event || (iir & I915_ASLE_INTERRUPT)) + intel_opregion_asle_intr(dev); + + + /* With MSI, interrupts are only generated when iir + * transitions from zero to nonzero. If another bit got + * set while we were handling the existing iir bits, then + * we would never get another interrupt. + * + * This is fine on non-MSI as well, as if we hit this path + * we avoid exiting the interrupt handler only to generate + * another one. + * + * Note that for MSI this could cause a stray interrupt report + * if an interrupt landed in the time between writing IIR and + * the posting read. This should be rare enough to never + * trigger the 99% of 100,000 interrupts test for disabling + * stray interrupts. + */ + iir = new_iir; + } while (iir & ~flip_mask); + + i915_update_dri1_breadcrumb(dev); +} + +static void i915_irq_uninstall(struct drm_device * dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + + if (!dev_priv) + return; + + if (I915_HAS_HOTPLUG(dev)) { I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); } + I915_WRITE16(HWSTAM, 0xffff); + for_each_pipe(pipe) { + /* Clear enable bits; then clear status bits */ + I915_WRITE(PIPESTAT(pipe), 0); + I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); + } + I915_WRITE(IMR, 0xffffffff); + I915_WRITE(IER, 0x0); + + I915_WRITE(IIR, I915_READ(IIR)); +} + +static void i965_irq_preinstall(struct drm_device * dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + + atomic_set(&dev_priv->irq_received, 0); + + if (I915_HAS_HOTPLUG(dev)) { + I915_WRITE(PORT_HOTPLUG_EN, 0); + I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + } + I915_WRITE(HWSTAM, 0xeffe); for_each_pipe(pipe) I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE(IMR, 0xffffffff); I915_WRITE(IER, 0x0); POSTING_READ(IER); } -/* - * Must be called after intel_modeset_init or hotplug interrupts won't be - * enabled correctly. - */ -static int -i915_driver_irq_postinstall(struct drm_device *dev) +static int i965_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; + u32 enable_mask; u32 error_mask; - dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; - /* Unmask the interrupts that we always want on. */ - dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX; + dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | + I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | + I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); + enable_mask = ~dev_priv->irq_mask; + enable_mask |= I915_USER_INTERRUPT; + + if (IS_G4X(dev)) + enable_mask |= I915_BSD_USER_INTERRUPT; + dev_priv->pipestat[0] = 0; dev_priv->pipestat[1] = 0; if (I915_HAS_HOTPLUG(dev)) { /* Enable in IER... */ enable_mask |= I915_DISPLAY_PORT_INTERRUPT; /* and unmask in IMR */ dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; } /* * Enable some error detection, note the instruction error mask * bit is reserved, so we leave it masked. */ if (IS_G4X(dev)) { error_mask = ~(GM45_ERROR_PAGE_TABLE | GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV | I915_ERROR_MEMORY_REFRESH); } else { error_mask = ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH); } I915_WRITE(EMR, error_mask); I915_WRITE(IMR, dev_priv->irq_mask); I915_WRITE(IER, enable_mask); POSTING_READ(IER); if (I915_HAS_HOTPLUG(dev)) { u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); /* Note HDMI and DP share bits */ if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) hotplug_en |= HDMIB_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) hotplug_en |= HDMIC_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) hotplug_en |= HDMID_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) hotplug_en |= SDVOC_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) hotplug_en |= SDVOB_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { hotplug_en |= CRT_HOTPLUG_INT_EN; /* Programming the CRT detection parameters tends to generate a spurious hotplug event about three seconds later. So just do it once. */ if (IS_G4X(dev)) hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; } /* Ignore TV since it's buggy */ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); } intel_opregion_enable_asle(dev); return 0; } -static void -ironlake_irq_uninstall(struct drm_device *dev) +static void i965_irq_handler(void *arg) { + struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 iir, new_iir; + u32 pipe_stats[I915_MAX_PIPES]; + int irq_received; + int pipe; - if (dev_priv == NULL) - return; + atomic_inc(&dev_priv->irq_received); - dev_priv->vblank_pipe = 0; + iir = I915_READ(IIR); - I915_WRITE(HWSTAM, 0xffffffff); + for (;;) { + bool blc_event = false; - I915_WRITE(DEIMR, 0xffffffff); - I915_WRITE(DEIER, 0x0); - I915_WRITE(DEIIR, I915_READ(DEIIR)); + irq_received = iir != 0; - I915_WRITE(GTIMR, 0xffffffff); - I915_WRITE(GTIER, 0x0); - I915_WRITE(GTIIR, I915_READ(GTIIR)); + /* Can't rely on pipestat interrupt bit in iir as it might + * have been cleared after the pipestat interrupt was received. + * It doesn't set the bit in iir again, but it still produces + * interrupts (for non-MSI). + */ + mtx_lock(&dev_priv->irq_lock); + if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) + i915_handle_error(dev, false); - I915_WRITE(SDEIMR, 0xffffffff); - I915_WRITE(SDEIER, 0x0); - I915_WRITE(SDEIIR, I915_READ(SDEIIR)); + for_each_pipe(pipe) { + int reg = PIPESTAT(pipe); + pipe_stats[pipe] = I915_READ(reg); - taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task); - taskqueue_drain(dev_priv->tq, &dev_priv->error_task); - taskqueue_drain(dev_priv->tq, &dev_priv->rps_task); + /* + * Clear the PIPE*STAT regs before the IIR + */ + if (pipe_stats[pipe] & 0x8000ffff) { + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + DRM_DEBUG_DRIVER("pipe %c underrun\n", + pipe_name(pipe)); + I915_WRITE(reg, pipe_stats[pipe]); + irq_received = 1; + } + } + mtx_unlock(&dev_priv->irq_lock); + + if (!irq_received) + break; + + /* Consume port. Then clear IIR or we'll miss events */ + if ((I915_HAS_HOTPLUG(dev)) && + (iir & I915_DISPLAY_PORT_INTERRUPT)) { + u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); + + DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", + hotplug_status); + if (hotplug_status & dev_priv->hotplug_supported_mask) + taskqueue_enqueue(dev_priv->tq, + &dev_priv->hotplug_task); + + I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); + I915_READ(PORT_HOTPLUG_STAT); + } + + I915_WRITE(IIR, iir); + new_iir = I915_READ(IIR); /* Flush posted writes */ + + if (iir & I915_USER_INTERRUPT) + notify_ring(dev, &dev_priv->rings[RCS]); + if (iir & I915_BSD_USER_INTERRUPT) + notify_ring(dev, &dev_priv->rings[VCS]); + + if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) + intel_prepare_page_flip(dev, 0); + + if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) + intel_prepare_page_flip(dev, 1); + + for_each_pipe(pipe) { + if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && + drm_handle_vblank(dev, pipe)) { + i915_pageflip_stall_check(dev, pipe); + intel_finish_page_flip(dev, pipe); + } + + if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) + blc_event = true; + } + + + if (blc_event || (iir & I915_ASLE_INTERRUPT)) + intel_opregion_asle_intr(dev); + + /* With MSI, interrupts are only generated when iir + * transitions from zero to nonzero. If another bit got + * set while we were handling the existing iir bits, then + * we would never get another interrupt. + * + * This is fine on non-MSI as well, as if we hit this path + * we avoid exiting the interrupt handler only to generate + * another one. + * + * Note that for MSI this could cause a stray interrupt report + * if an interrupt landed in the time between writing IIR and + * the posting read. This should be rare enough to never + * trigger the 99% of 100,000 interrupts test for disabling + * stray interrupts. + */ + iir = new_iir; + } + + i915_update_dri1_breadcrumb(dev); } -static void i915_driver_irq_uninstall(struct drm_device * dev) +static void i965_irq_uninstall(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int pipe; - if (!dev_priv) - return; - - dev_priv->vblank_pipe = 0; - if (I915_HAS_HOTPLUG(dev)) { I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); } I915_WRITE(HWSTAM, 0xffffffff); for_each_pipe(pipe) I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE(IMR, 0xffffffff); I915_WRITE(IER, 0x0); for_each_pipe(pipe) I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)) & 0x8000ffff); I915_WRITE(IIR, I915_READ(IIR)); - - taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task); - taskqueue_drain(dev_priv->tq, &dev_priv->error_task); - taskqueue_drain(dev_priv->tq, &dev_priv->rps_task); } -void -intel_irq_init(struct drm_device *dev) +void intel_irq_init(struct drm_device *dev) { + struct drm_i915_private *dev_priv = dev->dev_private; + TASK_INIT(&dev_priv->hotplug_task, 0, i915_hotplug_work_func, + dev->dev_private); + TASK_INIT(&dev_priv->error_task, 0, i915_error_work_func, + dev->dev_private); + TASK_INIT(&dev_priv->rps_task, 0, gen6_pm_rps_work_func, + dev->dev_private); + dev->driver->get_vblank_counter = i915_get_vblank_counter; dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ - if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) { + if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ dev->driver->get_vblank_counter = gm45_get_vblank_counter; } - if (drm_core_check_feature(dev, DRIVER_MODESET)) dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; else dev->driver->get_vblank_timestamp = NULL; dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; - if (IS_IVYBRIDGE(dev)) { + if (IS_VALLEYVIEW(dev)) { + dev->driver->irq_handler = valleyview_irq_handler; + dev->driver->irq_preinstall = valleyview_irq_preinstall; + dev->driver->irq_postinstall = valleyview_irq_postinstall; + dev->driver->irq_uninstall = valleyview_irq_uninstall; + dev->driver->enable_vblank = valleyview_enable_vblank; + dev->driver->disable_vblank = valleyview_disable_vblank; + } else if (IS_IVYBRIDGE(dev)) { /* Share pre & uninstall handlers with ILK/SNB */ dev->driver->irq_handler = ivybridge_irq_handler; dev->driver->irq_preinstall = ironlake_irq_preinstall; dev->driver->irq_postinstall = ivybridge_irq_postinstall; dev->driver->irq_uninstall = ironlake_irq_uninstall; dev->driver->enable_vblank = ivybridge_enable_vblank; dev->driver->disable_vblank = ivybridge_disable_vblank; + } else if (IS_HASWELL(dev)) { + /* Share interrupts handling with IVB */ + dev->driver->irq_handler = ivybridge_irq_handler; + dev->driver->irq_preinstall = ironlake_irq_preinstall; + dev->driver->irq_postinstall = ivybridge_irq_postinstall; + dev->driver->irq_uninstall = ironlake_irq_uninstall; + dev->driver->enable_vblank = ivybridge_enable_vblank; + dev->driver->disable_vblank = ivybridge_disable_vblank; } else if (HAS_PCH_SPLIT(dev)) { dev->driver->irq_handler = ironlake_irq_handler; dev->driver->irq_preinstall = ironlake_irq_preinstall; dev->driver->irq_postinstall = ironlake_irq_postinstall; dev->driver->irq_uninstall = ironlake_irq_uninstall; dev->driver->enable_vblank = ironlake_enable_vblank; dev->driver->disable_vblank = ironlake_disable_vblank; } else { - dev->driver->irq_preinstall = i915_driver_irq_preinstall; - dev->driver->irq_postinstall = i915_driver_irq_postinstall; - dev->driver->irq_uninstall = i915_driver_irq_uninstall; - dev->driver->irq_handler = i915_driver_irq_handler; + if (INTEL_INFO(dev)->gen == 2) { + dev->driver->irq_preinstall = i8xx_irq_preinstall; + dev->driver->irq_postinstall = i8xx_irq_postinstall; + dev->driver->irq_handler = i8xx_irq_handler; + dev->driver->irq_uninstall = i8xx_irq_uninstall; + } else if (INTEL_INFO(dev)->gen == 3) { + /* IIR "flip pending" means done if this bit is set */ + I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE)); + + dev->driver->irq_preinstall = i915_irq_preinstall; + dev->driver->irq_postinstall = i915_irq_postinstall; + dev->driver->irq_uninstall = i915_irq_uninstall; + dev->driver->irq_handler = i915_irq_handler; + } else { + dev->driver->irq_preinstall = i965_irq_preinstall; + dev->driver->irq_postinstall = i965_irq_postinstall; + dev->driver->irq_uninstall = i965_irq_uninstall; + dev->driver->irq_handler = i965_irq_handler; + } dev->driver->enable_vblank = i915_enable_vblank; dev->driver->disable_vblank = i915_disable_vblank; } } static struct drm_i915_error_object * i915_error_object_create(struct drm_i915_private *dev_priv, struct drm_i915_gem_object *src) { struct drm_i915_error_object *dst; struct sf_buf *sf; void *d, *s; int page, page_count; u32 reloc_offset; if (src == NULL || src->pages == NULL) return NULL; page_count = src->base.size / PAGE_SIZE; dst = malloc(sizeof(*dst) + page_count * sizeof(u32 *), DRM_I915_GEM, M_NOWAIT); if (dst == NULL) return (NULL); reloc_offset = src->gtt_offset; for (page = 0; page < page_count; page++) { d = malloc(PAGE_SIZE, DRM_I915_GEM, M_NOWAIT); if (d == NULL) goto unwind; - if (reloc_offset < dev_priv->mm.gtt_mappable_end) { + if (reloc_offset < dev_priv->mm.gtt_mappable_end && + src->has_global_gtt_mapping) { /* Simply ignore tiling or any overlapping fence. * It's part of the error state, and this hopefully * captures what the GPU read. */ s = pmap_mapdev_attr(src->base.dev->agp->base + reloc_offset, PAGE_SIZE, PAT_WRITE_COMBINING); memcpy(d, s, PAGE_SIZE); pmap_unmapdev((vm_offset_t)s, PAGE_SIZE); } else { drm_clflush_pages(&src->pages[page], 1); sched_pin(); sf = sf_buf_alloc(src->pages[page], SFB_CPUPRIVATE | SFB_NOWAIT); if (sf != NULL) { s = (void *)(uintptr_t)sf_buf_kva(sf); memcpy(d, s, PAGE_SIZE); sf_buf_free(sf); } else { bzero(d, PAGE_SIZE); strcpy(d, "XXXKIB"); } sched_unpin(); drm_clflush_pages(&src->pages[page], 1); } dst->pages[page] = d; reloc_offset += PAGE_SIZE; } dst->page_count = page_count; dst->gtt_offset = src->gtt_offset; return (dst); unwind: while (page--) free(dst->pages[page], DRM_I915_GEM); free(dst, DRM_I915_GEM); return (NULL); } static void i915_error_object_free(struct drm_i915_error_object *obj) { int page; if (obj == NULL) return; for (page = 0; page < obj->page_count; page++) free(obj->pages[page], DRM_I915_GEM); free(obj, DRM_I915_GEM); } -static void -i915_error_state_free(struct drm_device *dev, - struct drm_i915_error_state *error) +void +i915_error_state_free(struct drm_i915_error_state *error) { int i; for (i = 0; i < DRM_ARRAY_SIZE(error->ring); i++) { i915_error_object_free(error->ring[i].batchbuffer); i915_error_object_free(error->ring[i].ringbuffer); free(error->ring[i].requests, DRM_I915_GEM); } free(error->active_bo, DRM_I915_GEM); free(error->overlay, DRM_I915_GEM); free(error, DRM_I915_GEM); } -static u32 -capture_bo_list(struct drm_i915_error_buffer *err, int count, - struct list_head *head) +static void capture_bo(struct drm_i915_error_buffer *err, + struct drm_i915_gem_object *obj) { + err->size = obj->base.size; + err->name = obj->base.name; + err->seqno = obj->last_rendering_seqno; + err->gtt_offset = obj->gtt_offset; + err->read_domains = obj->base.read_domains; + err->write_domain = obj->base.write_domain; + err->fence_reg = obj->fence_reg; + err->pinned = 0; + if (obj->pin_count > 0) + err->pinned = 1; + if (obj->user_pin_count > 0) + err->pinned = -1; + err->tiling = obj->tiling_mode; + err->dirty = obj->dirty; + err->purgeable = obj->madv != I915_MADV_WILLNEED; + err->ring = obj->ring ? obj->ring->id : -1; + err->cache_level = obj->cache_level; +} + +static u32 capture_active_bo(struct drm_i915_error_buffer *err, + int count, struct list_head *head) +{ struct drm_i915_gem_object *obj; int i = 0; list_for_each_entry(obj, head, mm_list) { - err->size = obj->base.size; - err->name = obj->base.name; - err->seqno = obj->last_rendering_seqno; - err->gtt_offset = obj->gtt_offset; - err->read_domains = obj->base.read_domains; - err->write_domain = obj->base.write_domain; - err->fence_reg = obj->fence_reg; - err->pinned = 0; - if (obj->pin_count > 0) - err->pinned = 1; - if (obj->user_pin_count > 0) - err->pinned = -1; - err->tiling = obj->tiling_mode; - err->dirty = obj->dirty; - err->purgeable = obj->madv != I915_MADV_WILLNEED; - err->ring = obj->ring ? obj->ring->id : -1; - err->cache_level = obj->cache_level; - + capture_bo(err++, obj); if (++i == count) break; + } - err++; + return i; +} + +static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, + int count, struct list_head *head) +{ + struct drm_i915_gem_object *obj; + int i = 0; + + list_for_each_entry(obj, head, gtt_list) { + if (obj->pin_count == 0) + continue; + + capture_bo(err++, obj); + if (++i == count) + break; } - return (i); + return i; } static void i915_gem_record_fences(struct drm_device *dev, struct drm_i915_error_state *error) { struct drm_i915_private *dev_priv = dev->dev_private; int i; /* Fences */ switch (INTEL_INFO(dev)->gen) { case 7: case 6: for (i = 0; i < 16; i++) error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); break; case 5: case 4: for (i = 0; i < 16; i++) error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); break; case 3: if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) for (i = 0; i < 8; i++) error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); case 2: for (i = 0; i < 8; i++) error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); break; } } static struct drm_i915_error_object * i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, struct intel_ring_buffer *ring) { struct drm_i915_gem_object *obj; u32 seqno; if (!ring->get_seqno) return (NULL); seqno = ring->get_seqno(ring); list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { if (obj->ring != ring) continue; if (i915_seqno_passed(seqno, obj->last_rendering_seqno)) continue; if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) continue; /* We need to copy these to an anonymous buffer as the simplest * method to avoid being overwritten by userspace. */ return (i915_error_object_create(dev_priv, obj)); } return NULL; } static void i915_record_ring_state(struct drm_device *dev, struct drm_i915_error_state *error, struct intel_ring_buffer *ring) { struct drm_i915_private *dev_priv = dev->dev_private; if (INTEL_INFO(dev)->gen >= 6) { - error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); error->semaphore_mboxes[ring->id][0] = I915_READ(RING_SYNC_0(ring->mmio_base)); error->semaphore_mboxes[ring->id][1] = I915_READ(RING_SYNC_1(ring->mmio_base)); } if (INTEL_INFO(dev)->gen >= 4) { + error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); if (ring->id == RCS) { error->instdone1 = I915_READ(INSTDONE1); error->bbaddr = I915_READ64(BB_ADDR); } } else { + error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); error->ipeir[ring->id] = I915_READ(IPEIR); error->ipehr[ring->id] = I915_READ(IPEHR); error->instdone[ring->id] = I915_READ(INSTDONE); } + sleepq_lock(ring); + error->waiting[ring->id] = sleepq_sleepcnt(ring, 0) != 0; + sleepq_release(ring); error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); error->seqno[ring->id] = ring->get_seqno(ring); error->acthd[ring->id] = intel_ring_get_active_head(ring); error->head[ring->id] = I915_READ_HEAD(ring); error->tail[ring->id] = I915_READ_TAIL(ring); error->cpu_ring_head[ring->id] = ring->head; error->cpu_ring_tail[ring->id] = ring->tail; } static void i915_gem_record_rings(struct drm_device *dev, struct drm_i915_error_state *error) { struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; struct drm_i915_gem_request *request; int i, count; - for (i = 0; i < I915_NUM_RINGS; i++) { - struct intel_ring_buffer *ring = &dev_priv->rings[i]; - - if (ring->obj == NULL) - continue; - + for_each_ring(ring, dev_priv, i) { i915_record_ring_state(dev, error, ring); error->ring[i].batchbuffer = i915_error_first_batchbuffer(dev_priv, ring); error->ring[i].ringbuffer = i915_error_object_create(dev_priv, ring->obj); count = 0; list_for_each_entry(request, &ring->request_list, list) count++; error->ring[i].num_requests = count; error->ring[i].requests = malloc(count * sizeof(struct drm_i915_error_request), DRM_I915_GEM, M_WAITOK); if (error->ring[i].requests == NULL) { error->ring[i].num_requests = 0; continue; } count = 0; list_for_each_entry(request, &ring->request_list, list) { struct drm_i915_error_request *erq; erq = &error->ring[i].requests[count++]; erq->seqno = request->seqno; erq->jiffies = request->emitted_jiffies; erq->tail = request->tail; } } } static void i915_capture_error_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; struct drm_i915_error_state *error; int i, pipe; mtx_lock(&dev_priv->error_lock); error = dev_priv->first_error; mtx_unlock(&dev_priv->error_lock); if (error != NULL) return; /* Account for pipe specific data like PIPE*STAT */ error = malloc(sizeof(*error), DRM_I915_GEM, M_NOWAIT | M_ZERO); if (error == NULL) { DRM_DEBUG("out of memory, not capturing error state\n"); return; } DRM_INFO("capturing error event; look for more information in " "sysctl hw.dri.%d.info.i915_error_state\n", dev->sysctl_node_idx); + refcount_init(&error->ref, 1); error->eir = I915_READ(EIR); error->pgtbl_er = I915_READ(PGTBL_ER); + + if (HAS_PCH_SPLIT(dev)) + error->ier = I915_READ(DEIER) | I915_READ(GTIER); + else if (IS_VALLEYVIEW(dev)) + error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); + else if (IS_GEN2(dev)) + error->ier = I915_READ16(IER); + else + error->ier = I915_READ(IER); + for_each_pipe(pipe) error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); if (INTEL_INFO(dev)->gen >= 6) { error->error = I915_READ(ERROR_GEN6); error->done_reg = I915_READ(DONE_REG); } i915_gem_record_fences(dev, error); i915_gem_record_rings(dev, error); /* Record buffers on the active and pinned lists. */ error->active_bo = NULL; error->pinned_bo = NULL; i = 0; list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) i++; error->active_bo_count = i; - list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) - i++; + list_for_each_entry(obj, &dev_priv->mm.gtt_list, mm_list) + if (obj->pin_count) + i++; error->pinned_bo_count = i - error->active_bo_count; error->active_bo = NULL; error->pinned_bo = NULL; if (i) { error->active_bo = malloc(sizeof(*error->active_bo) * i, DRM_I915_GEM, M_NOWAIT); if (error->active_bo) error->pinned_bo = error->active_bo + error->active_bo_count; } if (error->active_bo) - error->active_bo_count = capture_bo_list(error->active_bo, - error->active_bo_count, &dev_priv->mm.active_list); + error->active_bo_count = + capture_active_bo(error->active_bo, + error->active_bo_count, + &dev_priv->mm.active_list); if (error->pinned_bo) - error->pinned_bo_count = capture_bo_list(error->pinned_bo, - error->pinned_bo_count, &dev_priv->mm.pinned_list); + error->pinned_bo_count = + capture_pinned_bo(error->pinned_bo, + error->pinned_bo_count, + &dev_priv->mm.gtt_list); microtime(&error->time); error->overlay = intel_overlay_capture_error_state(dev); error->display = intel_display_capture_error_state(dev); mtx_lock(&dev_priv->error_lock); if (dev_priv->first_error == NULL) { dev_priv->first_error = error; error = NULL; } mtx_unlock(&dev_priv->error_lock); if (error != NULL) - i915_error_state_free(dev, error); + i915_error_state_free(error); } void i915_destroy_error_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_error_state *error; mtx_lock(&dev_priv->error_lock); error = dev_priv->first_error; dev_priv->first_error = NULL; mtx_unlock(&dev_priv->error_lock); - if (error != NULL) - i915_error_state_free(dev, error); + if (error != NULL && refcount_release(&error->ref)) + i915_error_state_free(error); } Index: head/sys/dev/drm2/i915/i915_reg.h =================================================================== --- head/sys/dev/drm2/i915/i915_reg.h (revision 277486) +++ head/sys/dev/drm2/i915/i915_reg.h (revision 277487) @@ -1,3906 +1,4344 @@ /* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #ifndef _I915_REG_H_ #define _I915_REG_H_ #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) +#define _PORT(port, a, b) ((a) + (port)*((b)-(a))) + +#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a)) +#define _MASKED_BIT_DISABLE(a) ((a) << 16) + /* * The Bridge device's PCI config space has information about the * fb aperture size and the amount of pre-reserved memory. * This is all handled in the intel-gtt.ko module. i915.ko only * cares about the vga bit for the vga rbiter. */ #define INTEL_GMCH_CTRL 0x52 #define INTEL_GMCH_VGA_DISABLE (1 << 1) /* PCI config space */ #define HPLLCC 0xc0 /* 855 only */ #define GC_CLOCK_CONTROL_MASK (0xf << 0) #define GC_CLOCK_133_200 (0 << 0) #define GC_CLOCK_100_200 (1 << 0) #define GC_CLOCK_100_133 (2 << 0) #define GC_CLOCK_166_250 (3 << 0) #define GCFGC2 0xda #define GCFGC 0xf0 /* 915+ only */ #define GC_LOW_FREQUENCY_ENABLE (1 << 7) #define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) #define GC_DISPLAY_CLOCK_333_MHZ (4 << 4) #define GC_DISPLAY_CLOCK_MASK (7 << 4) #define GM45_GC_RENDER_CLOCK_MASK (0xf << 0) #define GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0) #define GM45_GC_RENDER_CLOCK_320_MHZ (9 << 0) #define GM45_GC_RENDER_CLOCK_400_MHZ (0xb << 0) #define GM45_GC_RENDER_CLOCK_533_MHZ (0xc << 0) #define I965_GC_RENDER_CLOCK_MASK (0xf << 0) #define I965_GC_RENDER_CLOCK_267_MHZ (2 << 0) #define I965_GC_RENDER_CLOCK_333_MHZ (3 << 0) #define I965_GC_RENDER_CLOCK_444_MHZ (4 << 0) #define I965_GC_RENDER_CLOCK_533_MHZ (5 << 0) #define I945_GC_RENDER_CLOCK_MASK (7 << 0) #define I945_GC_RENDER_CLOCK_166_MHZ (0 << 0) #define I945_GC_RENDER_CLOCK_200_MHZ (1 << 0) #define I945_GC_RENDER_CLOCK_250_MHZ (3 << 0) #define I945_GC_RENDER_CLOCK_400_MHZ (5 << 0) #define I915_GC_RENDER_CLOCK_MASK (7 << 0) #define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0) #define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) #define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) #define LBB 0xf4 /* Graphics reset regs */ #define I965_GDRST 0xc0 /* PCI config register */ #define ILK_GDSR 0x2ca4 /* MCHBAR offset */ #define GRDOM_FULL (0<<2) #define GRDOM_RENDER (1<<2) #define GRDOM_MEDIA (3<<2) #define GRDOM_RESET_ENABLE (1<<0) #define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */ #define GEN6_MBC_SNPCR_SHIFT 21 #define GEN6_MBC_SNPCR_MASK (3<<21) #define GEN6_MBC_SNPCR_MAX (0<<21) #define GEN6_MBC_SNPCR_MED (1<<21) #define GEN6_MBC_SNPCR_LOW (2<<21) #define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */ #define GEN6_MBCTL 0x0907c #define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4) #define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3) #define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2) #define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1) #define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0) #define GEN6_GDRST 0x941c #define GEN6_GRDOM_FULL (1 << 0) #define GEN6_GRDOM_RENDER (1 << 1) #define GEN6_GRDOM_MEDIA (1 << 2) #define GEN6_GRDOM_BLT (1 << 3) /* PPGTT stuff */ #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) #define GEN6_PDE_VALID (1 << 0) #define GEN6_PDE_LARGE_PAGE (2 << 0) /* use 32kb pages */ /* gen6+ has bit 11-4 for physical addr bit 39-32 */ #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) #define GEN6_PTE_VALID (1 << 0) #define GEN6_PTE_UNCACHED (1 << 1) #define GEN6_PTE_CACHE_LLC (2 << 1) #define GEN6_PTE_CACHE_LLC_MLC (3 << 1) #define GEN6_PTE_CACHE_BITS (3 << 1) #define GEN6_PTE_GFDT (1 << 3) #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) #define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228) #define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518) #define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220) #define PP_DIR_DCLV_2G 0xffffffff #define GAM_ECOCHK 0x4090 #define ECOCHK_SNB_BIT (1<<10) #define ECOCHK_PPGTT_CACHE64B (0x3<<3) #define ECOCHK_PPGTT_CACHE4B (0x0<<3) +#define GAC_ECO_BITS 0x14090 +#define ECOBITS_PPGTT_CACHE64B (3<<8) +#define ECOBITS_PPGTT_CACHE4B (0<<8) + +#define GAB_CTL 0x24000 +#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8) + /* VGA stuff */ #define VGA_ST01_MDA 0x3ba #define VGA_ST01_CGA 0x3da #define VGA_MSR_WRITE 0x3c2 #define VGA_MSR_READ 0x3cc #define VGA_MSR_MEM_EN (1<<1) #define VGA_MSR_CGA_MODE (1<<0) #define VGA_SR_INDEX 0x3c4 #define VGA_SR_DATA 0x3c5 #define VGA_AR_INDEX 0x3c0 #define VGA_AR_VID_EN (1<<5) #define VGA_AR_DATA_WRITE 0x3c0 #define VGA_AR_DATA_READ 0x3c1 #define VGA_GR_INDEX 0x3ce #define VGA_GR_DATA 0x3cf /* GR05 */ #define VGA_GR_MEM_READ_MODE_SHIFT 3 #define VGA_GR_MEM_READ_MODE_PLANE 1 /* GR06 */ #define VGA_GR_MEM_MODE_MASK 0xc #define VGA_GR_MEM_MODE_SHIFT 2 #define VGA_GR_MEM_A0000_AFFFF 0 #define VGA_GR_MEM_A0000_BFFFF 1 #define VGA_GR_MEM_B0000_B7FFF 2 #define VGA_GR_MEM_B0000_BFFFF 3 #define VGA_DACMASK 0x3c6 #define VGA_DACRX 0x3c7 #define VGA_DACWX 0x3c8 #define VGA_DACDATA 0x3c9 #define VGA_CR_INDEX_MDA 0x3b4 #define VGA_CR_DATA_MDA 0x3b5 #define VGA_CR_INDEX_CGA 0x3d4 #define VGA_CR_DATA_CGA 0x3d5 /* * Memory interface instructions used by the kernel */ #define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags)) #define MI_NOOP MI_INSTR(0, 0) #define MI_USER_INTERRUPT MI_INSTR(0x02, 0) #define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0) #define MI_WAIT_FOR_OVERLAY_FLIP (1<<16) #define MI_WAIT_FOR_PLANE_B_FLIP (1<<6) #define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) #define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) #define MI_FLUSH MI_INSTR(0x04, 0) #define MI_READ_FLUSH (1 << 0) #define MI_EXE_FLUSH (1 << 1) #define MI_NO_WRITE_FLUSH (1 << 2) #define MI_SCENE_COUNT (1 << 3) /* just increment scene count */ #define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ #define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */ #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) #define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0) #define MI_SUSPEND_FLUSH_EN (1<<0) #define MI_REPORT_HEAD MI_INSTR(0x07, 0) #define MI_OVERLAY_FLIP MI_INSTR(0x11, 0) #define MI_OVERLAY_CONTINUE (0x0<<21) #define MI_OVERLAY_ON (0x1<<21) #define MI_OVERLAY_OFF (0x2<<21) #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) #define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) #define MI_ARB_ON_OFF MI_INSTR(0x08, 0) #define MI_ARB_ENABLE (1<<0) #define MI_ARB_DISABLE (0<<0) #define MI_SET_CONTEXT MI_INSTR(0x18, 0) #define MI_MM_SPACE_GTT (1<<8) #define MI_MM_SPACE_PHYSICAL (0<<8) #define MI_SAVE_EXT_STATE_EN (1<<3) #define MI_RESTORE_EXT_STATE_EN (1<<2) #define MI_FORCE_RESTORE (1<<1) #define MI_RESTORE_INHIBIT (1<<0) #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) #define MI_STORE_DWORD_INDEX_SHIFT 2 /* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM: * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw * simply ignores the register load under certain conditions. * - One can actually load arbitrary many arbitrary registers: Simply issue x * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! */ #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) #define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ #define MI_INVALIDATE_TLB (1<<18) #define MI_INVALIDATE_BSD (1<<7) #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) #define MI_BATCH_NON_SECURE (1) #define MI_BATCH_NON_SECURE_I965 (1<<8) #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) +#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ #define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ #define MI_SEMAPHORE_GLOBAL_GTT (1<<22) #define MI_SEMAPHORE_UPDATE (1<<21) #define MI_SEMAPHORE_COMPARE (1<<20) #define MI_SEMAPHORE_REGISTER (1<<18) #define MI_SEMAPHORE_SYNC_RV (2<<16) #define MI_SEMAPHORE_SYNC_RB (0<<16) #define MI_SEMAPHORE_SYNC_VR (0<<16) #define MI_SEMAPHORE_SYNC_VB (2<<16) #define MI_SEMAPHORE_SYNC_BR (2<<16) #define MI_SEMAPHORE_SYNC_BV (0<<16) #define MI_SEMAPHORE_SYNC_INVALID (1<<0) /* * 3D instructions used by the kernel */ #define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags)) #define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24)) #define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) #define SC_UPDATE_SCISSOR (0x1<<1) #define SC_ENABLE_MASK (0x1<<0) #define SC_ENABLE (0x1<<0) #define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16)) #define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1)) #define SCI_YMIN_MASK (0xffff<<16) #define SCI_XMIN_MASK (0xffff<<0) #define SCI_YMAX_MASK (0xffff<<16) #define SCI_XMAX_MASK (0xffff<<0) #define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19)) #define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1) #define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0) #define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) #define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4) #define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0) #define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) #define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) #define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) #define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) #define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) #define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5) #define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) #define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) #define BLT_DEPTH_8 (0<<24) #define BLT_DEPTH_16_565 (1<<24) #define BLT_DEPTH_16_1555 (2<<24) #define BLT_DEPTH_32 (3<<24) #define BLT_ROP_GXCOPY (0xcc<<16) #define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ #define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ #define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) #define ASYNC_FLIP (1<<22) #define DISPLAY_PLANE_A (0<<20) #define DISPLAY_PLANE_B (1<<20) #define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2)) #define PIPE_CONTROL_CS_STALL (1<<20) #define PIPE_CONTROL_QW_WRITE (1<<14) #define PIPE_CONTROL_DEPTH_STALL (1<<13) #define PIPE_CONTROL_WRITE_FLUSH (1<<12) #define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */ #define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on Ironlake */ #define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */ #define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9) #define PIPE_CONTROL_NOTIFY (1<<8) #define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4) #define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3) #define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2) #define PIPE_CONTROL_STALL_AT_SCOREBOARD (1<<1) #define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0) #define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ /* * Reset registers */ #define DEBUG_RESET_I830 0x6070 #define DEBUG_RESET_FULL (1<<7) #define DEBUG_RESET_RENDER (1<<8) #define DEBUG_RESET_DISPLAY (1<<9) +/* + * DPIO - a special bus for various display related registers to hide behind: + * 0x800c: m1, m2, n, p1, p2, k dividers + * 0x8014: REF and SFR select + * 0x8014: N divider, VCO select + * 0x801c/3c: core clock bits + * 0x8048/68: low pass filter coefficients + * 0x8100: fast clock controls + */ +#define DPIO_PKT 0x2100 +#define DPIO_RID (0<<24) +#define DPIO_OP_WRITE (1<<16) +#define DPIO_OP_READ (0<<16) +#define DPIO_PORTID (0x12<<8) +#define DPIO_BYTE (0xf<<4) +#define DPIO_BUSY (1<<0) /* status only */ +#define DPIO_DATA 0x2104 +#define DPIO_REG 0x2108 +#define DPIO_CTL 0x2110 +#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */ +#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */ +#define DPIO_SFR_BYPASS (1<<1) +#define DPIO_RESET (1<<0) +#define _DPIO_DIV_A 0x800c +#define DPIO_POST_DIV_SHIFT (28) /* 3 bits */ +#define DPIO_K_SHIFT (24) /* 4 bits */ +#define DPIO_P1_SHIFT (21) /* 3 bits */ +#define DPIO_P2_SHIFT (16) /* 5 bits */ +#define DPIO_N_SHIFT (12) /* 4 bits */ +#define DPIO_ENABLE_CALIBRATION (1<<11) +#define DPIO_M1DIV_SHIFT (8) /* 3 bits */ +#define DPIO_M2DIV_MASK 0xff +#define _DPIO_DIV_B 0x802c +#define DPIO_DIV(pipe) _PIPE(pipe, _DPIO_DIV_A, _DPIO_DIV_B) + +#define _DPIO_REFSFR_A 0x8014 +#define DPIO_REFSEL_OVERRIDE 27 +#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */ +#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */ +#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */ +#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */ +#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */ +#define _DPIO_REFSFR_B 0x8034 +#define DPIO_REFSFR(pipe) _PIPE(pipe, _DPIO_REFSFR_A, _DPIO_REFSFR_B) + +#define _DPIO_CORE_CLK_A 0x801c +#define _DPIO_CORE_CLK_B 0x803c +#define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B) + +#define _DPIO_LFP_COEFF_A 0x8048 +#define _DPIO_LFP_COEFF_B 0x8068 +#define DPIO_LFP_COEFF(pipe) _PIPE(pipe, _DPIO_LFP_COEFF_A, _DPIO_LFP_COEFF_B) + +#define DPIO_FASTCLK_DISABLE 0x8100 + /* * Fence registers */ #define FENCE_REG_830_0 0x2000 #define FENCE_REG_945_8 0x3000 #define I830_FENCE_START_MASK 0x07f80000 #define I830_FENCE_TILING_Y_SHIFT 12 #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) #define I830_FENCE_PITCH_SHIFT 4 #define I830_FENCE_REG_VALID (1<<0) #define I915_FENCE_MAX_PITCH_VAL 4 #define I830_FENCE_MAX_PITCH_VAL 6 #define I830_FENCE_MAX_SIZE_VAL (1<<8) #define I915_FENCE_START_MASK 0x0ff00000 #define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8) #define FENCE_REG_965_0 0x03000 #define I965_FENCE_PITCH_SHIFT 2 #define I965_FENCE_TILING_Y_SHIFT 1 #define I965_FENCE_REG_VALID (1<<0) #define I965_FENCE_MAX_PITCH_VAL 0x0400 #define FENCE_REG_SANDYBRIDGE_0 0x100000 #define SANDYBRIDGE_FENCE_PITCH_SHIFT 32 /* control register for cpu gtt access */ #define TILECTL 0x101000 #define TILECTL_SWZCTL (1 << 0) #define TILECTL_TLB_PREFETCH_DIS (1 << 2) #define TILECTL_BACKSNOOP_DIS (1 << 3) /* * Instruction and interrupt control regs */ #define PGTBL_ER 0x02024 #define RENDER_RING_BASE 0x02000 #define BSD_RING_BASE 0x04000 #define GEN6_BSD_RING_BASE 0x12000 #define BLT_RING_BASE 0x22000 #define RING_TAIL(base) ((base)+0x30) #define RING_HEAD(base) ((base)+0x34) #define RING_START(base) ((base)+0x38) #define RING_CTL(base) ((base)+0x3c) #define RING_SYNC_0(base) ((base)+0x40) #define RING_SYNC_1(base) ((base)+0x44) #define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE)) #define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE)) #define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE)) #define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE)) #define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE)) #define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE)) #define RING_MAX_IDLE(base) ((base)+0x54) #define RING_HWS_PGA(base) ((base)+0x80) #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) #define ARB_MODE 0x04030 #define ARB_MODE_SWIZZLE_SNB (1<<4) #define ARB_MODE_SWIZZLE_IVB (1<<5) -#define ARB_MODE_ENABLE(x) GFX_MODE_ENABLE(x) -#define ARB_MODE_DISABLE(x) GFX_MODE_DISABLE(x) #define RENDER_HWS_PGA_GEN7 (0x04080) #define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id) #define DONE_REG 0x40b0 #define BSD_HWS_PGA_GEN7 (0x04180) #define BLT_HWS_PGA_GEN7 (0x04280) #define RING_ACTHD(base) ((base)+0x74) #define RING_NOPID(base) ((base)+0x94) #define RING_IMR(base) ((base)+0xa8) #define TAIL_ADDR 0x001FFFF8 #define HEAD_WRAP_COUNT 0xFFE00000 #define HEAD_WRAP_ONE 0x00200000 #define HEAD_ADDR 0x001FFFFC #define RING_NR_PAGES 0x001FF000 #define RING_REPORT_MASK 0x00000006 #define RING_REPORT_64K 0x00000002 #define RING_REPORT_128K 0x00000004 #define RING_NO_REPORT 0x00000000 #define RING_VALID_MASK 0x00000001 #define RING_VALID 0x00000001 #define RING_INVALID 0x00000000 #define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */ #define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */ #define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */ #if 0 #define PRB0_TAIL 0x02030 #define PRB0_HEAD 0x02034 #define PRB0_START 0x02038 #define PRB0_CTL 0x0203c #define PRB1_TAIL 0x02040 /* 915+ only */ #define PRB1_HEAD 0x02044 /* 915+ only */ #define PRB1_START 0x02048 /* 915+ only */ #define PRB1_CTL 0x0204c /* 915+ only */ #endif #define IPEIR_I965 0x02064 #define IPEHR_I965 0x02068 #define INSTDONE_I965 0x0206c #define RING_IPEIR(base) ((base)+0x64) #define RING_IPEHR(base) ((base)+0x68) #define RING_INSTDONE(base) ((base)+0x6c) #define RING_INSTPS(base) ((base)+0x70) #define RING_DMA_FADD(base) ((base)+0x78) #define RING_INSTPM(base) ((base)+0xc0) #define INSTPS 0x02070 /* 965+ only */ #define INSTDONE1 0x0207c /* 965+ only */ #define ACTHD_I965 0x02074 #define HWS_PGA 0x02080 #define HWS_ADDRESS_MASK 0xfffff000 #define HWS_START_ADDRESS_SHIFT 4 #define PWRCTXA 0x2088 /* 965GM+ only */ #define PWRCTX_EN (1<<0) #define IPEIR 0x02088 #define IPEHR 0x0208c #define INSTDONE 0x02090 #define NOPID 0x02094 #define HWSTAM 0x02098 +#define DMA_FADD_I8XX 0x020d0 #define ERROR_GEN6 0x040a0 /* GM45+ chicken bits -- debug workaround bits that may be required * for various sorts of correct behavior. The top 16 bits of each are * the enables for writing to the corresponding low bit. */ #define _3D_CHICKEN 0x02084 #define _3D_CHICKEN2 0x0208c /* Disables pipelining of read flushes past the SF-WIZ interface. * Required on all Ironlake steppings according to the B-Spec, but the * particular danger of not doing so is not specified. */ # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) #define _3D_CHICKEN3 0x02090 +#define _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL (1 << 5) #define MI_MODE 0x0209c # define VS_TIMER_DISPATCH (1 << 6) # define MI_FLUSH_ENABLE (1 << 12) #define GFX_MODE 0x02520 #define GFX_MODE_GEN7 0x0229c #define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c) #define GFX_RUN_LIST_ENABLE (1<<15) #define GFX_TLB_INVALIDATE_ALWAYS (1<<13) #define GFX_SURFACE_FAULT_ENABLE (1<<12) #define GFX_REPLAY_MODE (1<<11) #define GFX_PSMI_GRANULARITY (1<<10) #define GFX_PPGTT_ENABLE (1<<9) -#define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit)) -#define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0)) - #define SCPD0 0x0209c /* 915+ only */ #define IER 0x020a0 #define IIR 0x020a4 #define IMR 0x020a8 #define ISR 0x020ac +#define VLV_IIR_RW 0x182084 +#define VLV_IER 0x1820a0 +#define VLV_IIR 0x1820a4 +#define VLV_IMR 0x1820a8 +#define VLV_ISR 0x1820ac #define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) #define I915_DISPLAY_PORT_INTERRUPT (1<<17) #define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) #define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */ #define I915_HWB_OOM_INTERRUPT (1<<13) #define I915_SYNC_STATUS_INTERRUPT (1<<12) #define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11) #define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10) #define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9) #define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8) #define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7) #define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6) #define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5) #define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4) #define I915_DEBUG_INTERRUPT (1<<2) #define I915_USER_INTERRUPT (1<<1) #define I915_ASLE_INTERRUPT (1<<0) #define I915_BSD_USER_INTERRUPT (1<<25) #define EIR 0x020b0 #define EMR 0x020b4 #define ESR 0x020b8 #define GM45_ERROR_PAGE_TABLE (1<<5) #define GM45_ERROR_MEM_PRIV (1<<4) #define I915_ERROR_PAGE_TABLE (1<<4) #define GM45_ERROR_CP_PRIV (1<<3) #define I915_ERROR_MEMORY_REFRESH (1<<1) #define I915_ERROR_INSTRUCTION (1<<0) #define INSTPM 0x020c0 #define INSTPM_SELF_EN (1<<12) /* 915GM only */ #define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts will not assert AGPBUSY# and will only be delivered when out of C3. */ #define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ #define ACTHD 0x020c8 #define FW_BLC 0x020d8 #define FW_BLC2 0x020dc #define FW_BLC_SELF 0x020e0 /* 915+ only */ #define FW_BLC_SELF_EN_MASK (1<<31) #define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */ #define FW_BLC_SELF_EN (1<<15) /* 945 only */ #define MM_BURST_LENGTH 0x00700000 #define MM_FIFO_WATERMARK 0x0001F000 #define LM_BURST_LENGTH 0x00000700 #define LM_FIFO_WATERMARK 0x0000001F #define MI_ARB_STATE 0x020e4 /* 915+ only */ -#define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */ /* Make render/texture TLB fetches lower priorty than associated data * fetches. This is not turned on by default */ #define MI_ARB_RENDER_TLB_LOW_PRIORITY (1 << 15) /* Isoch request wait on GTT enable (Display A/B/C streams). * Make isoch requests stall on the TLB update. May cause * display underruns (test mode only) */ #define MI_ARB_ISOCH_WAIT_GTT (1 << 14) /* Block grant count for isoch requests when block count is * set to a finite value. */ #define MI_ARB_BLOCK_GRANT_MASK (3 << 12) #define MI_ARB_BLOCK_GRANT_8 (0 << 12) /* for 3 display planes */ #define MI_ARB_BLOCK_GRANT_4 (1 << 12) /* for 2 display planes */ #define MI_ARB_BLOCK_GRANT_2 (2 << 12) /* for 1 display plane */ #define MI_ARB_BLOCK_GRANT_0 (3 << 12) /* don't use */ /* Enable render writes to complete in C2/C3/C4 power states. * If this isn't enabled, render writes are prevented in low * power states. That seems bad to me. */ #define MI_ARB_C3_LP_WRITE_ENABLE (1 << 11) /* This acknowledges an async flip immediately instead * of waiting for 2TLB fetches. */ #define MI_ARB_ASYNC_FLIP_ACK_IMMEDIATE (1 << 10) /* Enables non-sequential data reads through arbiter */ #define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9) /* Disable FSB snooping of cacheable write cycles from binner/render * command stream */ #define MI_ARB_CACHE_SNOOP_DISABLE (1 << 8) /* Arbiter time slice for non-isoch streams */ #define MI_ARB_TIME_SLICE_MASK (7 << 5) #define MI_ARB_TIME_SLICE_1 (0 << 5) #define MI_ARB_TIME_SLICE_2 (1 << 5) #define MI_ARB_TIME_SLICE_4 (2 << 5) #define MI_ARB_TIME_SLICE_6 (3 << 5) #define MI_ARB_TIME_SLICE_8 (4 << 5) #define MI_ARB_TIME_SLICE_10 (5 << 5) #define MI_ARB_TIME_SLICE_14 (6 << 5) #define MI_ARB_TIME_SLICE_16 (7 << 5) /* Low priority grace period page size */ #define MI_ARB_LOW_PRIORITY_GRACE_4KB (0 << 4) /* default */ #define MI_ARB_LOW_PRIORITY_GRACE_8KB (1 << 4) /* Disable display A/B trickle feed */ #define MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2) /* Set display plane priority */ #define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */ #define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ #define CACHE_MODE_0 0x02120 /* 915+ only */ -#define CM0_MASK_SHIFT 16 #define CM0_IZ_OPT_DISABLE (1<<6) #define CM0_ZR_OPT_DISABLE (1<<5) #define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5) #define CM0_DEPTH_EVICT_DISABLE (1<<4) #define CM0_COLOR_EVICT_DISABLE (1<<3) #define CM0_DEPTH_WRITE_DISABLE (1<<1) #define CM0_RC_OP_FLUSH_DISABLE (1<<0) #define BB_ADDR 0x02140 /* 8 bytes */ #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ #define ECOSKPD 0x021d0 #define ECO_GATING_CX_ONLY (1<<3) #define ECO_FLIP_DONE (1<<0) -/* GEN6 interrupt control */ +#define CACHE_MODE_1 0x7004 /* IVB+ */ +#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6) + +/* GEN6 interrupt control + * Note that the per-ring interrupt bits do alias with the global interrupt bits + * in GTIMR. */ #define GEN6_RENDER_HWSTAM 0x2098 #define GEN6_RENDER_IMR 0x20a8 #define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8) #define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7) #define GEN6_RENDER_TIMEOUT_COUNTER_EXPIRED (1 << 6) #define GEN6_RENDER_L3_PARITY_ERROR (1 << 5) #define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4) #define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3) #define GEN6_RENDER_SYNC_STATUS (1 << 2) #define GEN6_RENDER_DEBUG_INTERRUPT (1 << 1) #define GEN6_RENDER_USER_INTERRUPT (1 << 0) #define GEN6_BLITTER_HWSTAM 0x22098 #define GEN6_BLITTER_IMR 0x220a8 #define GEN6_BLITTER_MI_FLUSH_DW_NOTIFY_INTERRUPT (1 << 26) #define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25) #define GEN6_BLITTER_SYNC_STATUS (1 << 24) #define GEN6_BLITTER_USER_INTERRUPT (1 << 22) #define GEN6_BLITTER_ECOSKPD 0x221d0 #define GEN6_BLITTER_LOCK_SHIFT 16 #define GEN6_BLITTER_FBC_NOTIFY (1<<3) #define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 #define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16) #define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0) #define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0 #define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3) #define GEN6_BSD_HWSTAM 0x12098 #define GEN6_BSD_IMR 0x120a8 #define GEN6_BSD_USER_INTERRUPT (1 << 12) #define GEN6_BSD_RNCID 0x12198 +#define GEN7_FF_THREAD_MODE 0x20a0 +#define GEN7_FF_SCHED_MASK 0x0077070 +#define GEN7_FF_TS_SCHED_HS1 (0x5<<16) +#define GEN7_FF_TS_SCHED_HS0 (0x3<<16) +#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16) +#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */ +#define GEN7_FF_VS_SCHED_HS1 (0x5<<12) +#define GEN7_FF_VS_SCHED_HS0 (0x3<<12) +#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */ +#define GEN7_FF_VS_SCHED_HW (0x0<<12) +#define GEN7_FF_DS_SCHED_HS1 (0x5<<4) +#define GEN7_FF_DS_SCHED_HS0 (0x3<<4) +#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1<<4) /* Default */ +#define GEN7_FF_DS_SCHED_HW (0x0<<4) + /* * Framebuffer compression (915+ only) */ #define FBC_CFB_BASE 0x03200 /* 4k page aligned */ #define FBC_LL_BASE 0x03204 /* 4k page aligned */ #define FBC_CONTROL 0x03208 #define FBC_CTL_EN (1<<31) #define FBC_CTL_PERIODIC (1<<30) #define FBC_CTL_INTERVAL_SHIFT (16) #define FBC_CTL_UNCOMPRESSIBLE (1<<14) #define FBC_CTL_C3_IDLE (1<<13) #define FBC_CTL_STRIDE_SHIFT (5) #define FBC_CTL_FENCENO (1<<0) #define FBC_COMMAND 0x0320c #define FBC_CMD_COMPRESS (1<<0) #define FBC_STATUS 0x03210 #define FBC_STAT_COMPRESSING (1<<31) #define FBC_STAT_COMPRESSED (1<<30) #define FBC_STAT_MODIFIED (1<<29) #define FBC_STAT_CURRENT_LINE (1<<0) #define FBC_CONTROL2 0x03214 #define FBC_CTL_FENCE_DBL (0<<4) #define FBC_CTL_IDLE_IMM (0<<2) #define FBC_CTL_IDLE_FULL (1<<2) #define FBC_CTL_IDLE_LINE (2<<2) #define FBC_CTL_IDLE_DEBUG (3<<2) #define FBC_CTL_CPU_FENCE (1<<1) #define FBC_CTL_PLANEA (0<<0) #define FBC_CTL_PLANEB (1<<0) #define FBC_FENCE_OFF 0x0321b #define FBC_TAG 0x03300 #define FBC_LL_SIZE (1536) /* Framebuffer compression for GM45+ */ #define DPFC_CB_BASE 0x3200 #define DPFC_CONTROL 0x3208 #define DPFC_CTL_EN (1<<31) #define DPFC_CTL_PLANEA (0<<30) #define DPFC_CTL_PLANEB (1<<30) #define DPFC_CTL_FENCE_EN (1<<29) #define DPFC_CTL_PERSISTENT_MODE (1<<25) #define DPFC_SR_EN (1<<10) #define DPFC_CTL_LIMIT_1X (0<<6) #define DPFC_CTL_LIMIT_2X (1<<6) #define DPFC_CTL_LIMIT_4X (2<<6) #define DPFC_RECOMP_CTL 0x320c #define DPFC_RECOMP_STALL_EN (1<<27) #define DPFC_RECOMP_STALL_WM_SHIFT (16) #define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000) #define DPFC_RECOMP_TIMER_COUNT_SHIFT (0) #define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f) #define DPFC_STATUS 0x3210 #define DPFC_INVAL_SEG_SHIFT (16) #define DPFC_INVAL_SEG_MASK (0x07ff0000) #define DPFC_COMP_SEG_SHIFT (0) #define DPFC_COMP_SEG_MASK (0x000003ff) #define DPFC_STATUS2 0x3214 #define DPFC_FENCE_YOFF 0x3218 #define DPFC_CHICKEN 0x3224 #define DPFC_HT_MODIFY (1<<31) /* Framebuffer compression for Ironlake */ #define ILK_DPFC_CB_BASE 0x43200 #define ILK_DPFC_CONTROL 0x43208 /* The bit 28-8 is reserved */ #define DPFC_RESERVED (0x1FFFFF00) #define ILK_DPFC_RECOMP_CTL 0x4320c #define ILK_DPFC_STATUS 0x43210 #define ILK_DPFC_FENCE_YOFF 0x43218 #define ILK_DPFC_CHICKEN 0x43224 #define ILK_FBC_RT_BASE 0x2128 #define ILK_FBC_RT_VALID (1<<0) #define ILK_DISPLAY_CHICKEN1 0x42000 #define ILK_FBCQ_DIS (1<<22) #define ILK_PABSTRETCH_DIS (1<<21) /* * Framebuffer compression for Sandybridge * * The following two registers are of type GTTMMADR */ #define SNB_DPFC_CTL_SA 0x100100 #define SNB_CPU_FENCE_ENABLE (1<<29) #define DPFC_CPU_FENCE_OFFSET 0x100104 /* * GPIO regs */ #define GPIOA 0x5010 #define GPIOB 0x5014 #define GPIOC 0x5018 #define GPIOD 0x501c #define GPIOE 0x5020 #define GPIOF 0x5024 #define GPIOG 0x5028 #define GPIOH 0x502c # define GPIO_CLOCK_DIR_MASK (1 << 0) # define GPIO_CLOCK_DIR_IN (0 << 1) # define GPIO_CLOCK_DIR_OUT (1 << 1) # define GPIO_CLOCK_VAL_MASK (1 << 2) # define GPIO_CLOCK_VAL_OUT (1 << 3) # define GPIO_CLOCK_VAL_IN (1 << 4) # define GPIO_CLOCK_PULLUP_DISABLE (1 << 5) # define GPIO_DATA_DIR_MASK (1 << 8) # define GPIO_DATA_DIR_IN (0 << 9) # define GPIO_DATA_DIR_OUT (1 << 9) # define GPIO_DATA_VAL_MASK (1 << 10) # define GPIO_DATA_VAL_OUT (1 << 11) # define GPIO_DATA_VAL_IN (1 << 12) # define GPIO_DATA_PULLUP_DISABLE (1 << 13) #define GMBUS0 0x5100 /* clock/port select */ #define GMBUS_RATE_100KHZ (0<<8) #define GMBUS_RATE_50KHZ (1<<8) #define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */ #define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */ #define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */ #define GMBUS_PORT_DISABLED 0 #define GMBUS_PORT_SSC 1 #define GMBUS_PORT_VGADDC 2 #define GMBUS_PORT_PANEL 3 #define GMBUS_PORT_DPC 4 /* HDMIC */ #define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */ - /* 6 reserved */ -#define GMBUS_PORT_DPD 7 /* HDMID */ -#define GMBUS_NUM_PORTS 8 +#define GMBUS_PORT_DPD 6 /* HDMID */ +#define GMBUS_PORT_RESERVED 7 /* 7 reserved */ +#define GMBUS_NUM_PORTS (GMBUS_PORT_DPD - GMBUS_PORT_SSC + 1) #define GMBUS1 0x5104 /* command/status */ #define GMBUS_SW_CLR_INT (1<<31) #define GMBUS_SW_RDY (1<<30) #define GMBUS_ENT (1<<29) /* enable timeout */ #define GMBUS_CYCLE_NONE (0<<25) #define GMBUS_CYCLE_WAIT (1<<25) #define GMBUS_CYCLE_INDEX (2<<25) #define GMBUS_CYCLE_STOP (4<<25) #define GMBUS_BYTE_COUNT_SHIFT 16 #define GMBUS_SLAVE_INDEX_SHIFT 8 #define GMBUS_SLAVE_ADDR_SHIFT 1 #define GMBUS_SLAVE_READ (1<<0) #define GMBUS_SLAVE_WRITE (0<<0) #define GMBUS2 0x5108 /* status */ #define GMBUS_INUSE (1<<15) #define GMBUS_HW_WAIT_PHASE (1<<14) #define GMBUS_STALL_TIMEOUT (1<<13) #define GMBUS_INT (1<<12) #define GMBUS_HW_RDY (1<<11) #define GMBUS_SATOER (1<<10) #define GMBUS_ACTIVE (1<<9) #define GMBUS3 0x510c /* data buffer bytes 3-0 */ #define GMBUS4 0x5110 /* interrupt mask (Pineview+) */ #define GMBUS_SLAVE_TIMEOUT_EN (1<<4) #define GMBUS_NAK_EN (1<<3) #define GMBUS_IDLE_EN (1<<2) #define GMBUS_HW_WAIT_EN (1<<1) #define GMBUS_HW_RDY_EN (1<<0) #define GMBUS5 0x5120 /* byte index */ #define GMBUS_2BYTE_INDEX_EN (1<<31) /* * Clock control & power management */ #define VGA0 0x6000 #define VGA1 0x6004 #define VGA_PD 0x6010 #define VGA0_PD_P2_DIV_4 (1 << 7) #define VGA0_PD_P1_DIV_2 (1 << 5) #define VGA0_PD_P1_SHIFT 0 #define VGA0_PD_P1_MASK (0x1f << 0) #define VGA1_PD_P2_DIV_4 (1 << 15) #define VGA1_PD_P1_DIV_2 (1 << 13) #define VGA1_PD_P1_SHIFT 8 #define VGA1_PD_P1_MASK (0x1f << 8) #define _DPLL_A 0x06014 #define _DPLL_B 0x06018 #define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B) #define DPLL_VCO_ENABLE (1 << 31) #define DPLL_DVO_HIGH_SPEED (1 << 30) +#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30) #define DPLL_SYNCLOCK_ENABLE (1 << 29) +#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29) #define DPLL_VGA_MODE_DIS (1 << 28) #define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */ #define DPLLB_MODE_LVDS (2 << 26) /* i915 */ #define DPLL_MODE_MASK (3 << 26) #define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */ #define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */ #define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */ #define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ #define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ #define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ #define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */ +#define DPLL_INTEGRATED_CLOCK_VLV (1<<13) #define SRX_INDEX 0x3c4 #define SRX_DATA 0x3c5 #define SR01 1 #define SR01_SCREEN_OFF (1<<5) #define PPCR 0x61204 #define PPCR_ON (1<<0) #define DVOB 0x61140 #define DVOB_ON (1<<31) #define DVOC 0x61160 #define DVOC_ON (1<<31) #define LVDS 0x61180 #define LVDS_ON (1<<31) /* Scratch pad debug 0 reg: */ #define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 /* * The i830 generation, in LVDS mode, defines P1 as the bit number set within * this field (only one bit may be set). */ #define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 #define DPLL_FPA01_P1_POST_DIV_SHIFT 16 #define DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15 /* i830, required in DVO non-gang */ #define PLL_P2_DIVIDE_BY_4 (1 << 23) #define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ #define PLL_REF_INPUT_DREFCLK (0 << 13) #define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */ #define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */ #define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13) #define PLL_REF_INPUT_MASK (3 << 13) #define PLL_LOAD_PULSE_PHASE_SHIFT 9 /* Ironlake */ # define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9 # define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9) # define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9) # define DPLL_FPA1_P1_POST_DIV_SHIFT 0 # define DPLL_FPA1_P1_POST_DIV_MASK 0xff /* * Parallel to Serial Load Pulse phase selection. * Selects the phase for the 10X DPLL clock for the PCIe * digital display port. The range is 4 to 13; 10 or more * is just a flip delay. The default is 6 */ #define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT) #define DISPLAY_RATE_SELECT_FPA1 (1 << 8) /* * SDVO multiplier for 945G/GM. Not used on 965. */ #define SDVO_MULTIPLIER_MASK 0x000000ff #define SDVO_MULTIPLIER_SHIFT_HIRES 4 #define SDVO_MULTIPLIER_SHIFT_VGA 0 #define _DPLL_A_MD 0x0601c /* 965+ only */ /* * UDI pixel divider, controlling how many pixels are stuffed into a packet. * * Value is pixels minus 1. Must be set to 1 pixel for SDVO. */ #define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000 #define DPLL_MD_UDI_DIVIDER_SHIFT 24 /* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */ #define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000 #define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16 /* * SDVO/UDI pixel multiplier. * * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus * clock rate is 10 times the DPLL clock. At low resolution/refresh rate * modes, the bus rate would be below the limits, so SDVO allows for stuffing * dummy bytes in the datastream at an increased clock rate, with both sides of * the link knowing how many bytes are fill. * * So, for a mode with a dotclock of 65Mhz, we would want to double the clock * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be * set to 130Mhz, and the SDVO multiplier set to 2x in this register and * through an SDVO command. * * This register field has values of multiplication factor minus 1, with * a maximum multiplier of 5 for SDVO. */ #define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00 #define DPLL_MD_UDI_MULTIPLIER_SHIFT 8 /* * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK. * This best be set to the default value (3) or the CRT won't work. No, * I don't entirely understand what this does... */ #define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 #define _DPLL_B_MD 0x06020 /* 965+ only */ #define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD) + #define _FPA0 0x06040 #define _FPA1 0x06044 #define _FPB0 0x06048 #define _FPB1 0x0604c #define FP0(pipe) _PIPE(pipe, _FPA0, _FPB0) #define FP1(pipe) _PIPE(pipe, _FPA1, _FPB1) #define FP_N_DIV_MASK 0x003f0000 #define FP_N_PINEVIEW_DIV_MASK 0x00ff0000 #define FP_N_DIV_SHIFT 16 #define FP_M1_DIV_MASK 0x00003f00 #define FP_M1_DIV_SHIFT 8 #define FP_M2_DIV_MASK 0x0000003f #define FP_M2_PINEVIEW_DIV_MASK 0x000000ff #define FP_M2_DIV_SHIFT 0 #define DPLL_TEST 0x606c #define DPLLB_TEST_SDVO_DIV_1 (0 << 22) #define DPLLB_TEST_SDVO_DIV_2 (1 << 22) #define DPLLB_TEST_SDVO_DIV_4 (2 << 22) #define DPLLB_TEST_SDVO_DIV_MASK (3 << 22) #define DPLLB_TEST_N_BYPASS (1 << 19) #define DPLLB_TEST_M_BYPASS (1 << 18) #define DPLLB_INPUT_BUFFER_ENABLE (1 << 16) #define DPLLA_TEST_N_BYPASS (1 << 3) #define DPLLA_TEST_M_BYPASS (1 << 2) #define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) #define D_STATE 0x6104 #define DSTATE_GFX_RESET_I830 (1<<6) #define DSTATE_PLL_D3_OFF (1<<3) #define DSTATE_GFX_CLOCK_GATING (1<<1) #define DSTATE_DOT_CLOCK_GATING (1<<0) #define DSPCLK_GATE_D 0x6200 # define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */ # define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */ # define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */ # define VRDUNIT_CLOCK_GATE_DISABLE (1 << 27) /* 965 */ # define AUDUNIT_CLOCK_GATE_DISABLE (1 << 26) /* 965 */ # define DPUNIT_A_CLOCK_GATE_DISABLE (1 << 25) /* 965 */ # define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24) /* 965 */ # define TVRUNIT_CLOCK_GATE_DISABLE (1 << 23) /* 915-945 */ # define TVCUNIT_CLOCK_GATE_DISABLE (1 << 22) /* 915-945 */ # define TVFUNIT_CLOCK_GATE_DISABLE (1 << 21) /* 915-945 */ # define TVEUNIT_CLOCK_GATE_DISABLE (1 << 20) /* 915-945 */ # define DVSUNIT_CLOCK_GATE_DISABLE (1 << 19) /* 915-945 */ # define DSSUNIT_CLOCK_GATE_DISABLE (1 << 18) /* 915-945 */ # define DDBUNIT_CLOCK_GATE_DISABLE (1 << 17) /* 915-945 */ # define DPRUNIT_CLOCK_GATE_DISABLE (1 << 16) /* 915-945 */ # define DPFUNIT_CLOCK_GATE_DISABLE (1 << 15) /* 915-945 */ # define DPBMUNIT_CLOCK_GATE_DISABLE (1 << 14) /* 915-945 */ # define DPLSUNIT_CLOCK_GATE_DISABLE (1 << 13) /* 915-945 */ # define DPLUNIT_CLOCK_GATE_DISABLE (1 << 12) /* 915-945 */ # define DPOUNIT_CLOCK_GATE_DISABLE (1 << 11) # define DPBUNIT_CLOCK_GATE_DISABLE (1 << 10) # define DCUNIT_CLOCK_GATE_DISABLE (1 << 9) # define DPUNIT_CLOCK_GATE_DISABLE (1 << 8) # define VRUNIT_CLOCK_GATE_DISABLE (1 << 7) /* 915+: reserved */ # define OVHUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 830-865 */ # define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 915-945 */ # define OVFUNIT_CLOCK_GATE_DISABLE (1 << 5) # define OVBUNIT_CLOCK_GATE_DISABLE (1 << 4) /** * This bit must be set on the 830 to prevent hangs when turning off the * overlay scaler. */ # define OVRUNIT_CLOCK_GATE_DISABLE (1 << 3) # define OVCUNIT_CLOCK_GATE_DISABLE (1 << 2) # define OVUUNIT_CLOCK_GATE_DISABLE (1 << 1) # define ZVUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 830 */ # define OVLUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 845,865 */ #define RENCLK_GATE_D1 0x6204 # define BLITTER_CLOCK_GATE_DISABLE (1 << 13) /* 945GM only */ # define MPEG_CLOCK_GATE_DISABLE (1 << 12) /* 945GM only */ # define PC_FE_CLOCK_GATE_DISABLE (1 << 11) # define PC_BE_CLOCK_GATE_DISABLE (1 << 10) # define WINDOWER_CLOCK_GATE_DISABLE (1 << 9) # define INTERPOLATOR_CLOCK_GATE_DISABLE (1 << 8) # define COLOR_CALCULATOR_CLOCK_GATE_DISABLE (1 << 7) # define MOTION_COMP_CLOCK_GATE_DISABLE (1 << 6) # define MAG_CLOCK_GATE_DISABLE (1 << 5) /** This bit must be unset on 855,865 */ # define MECI_CLOCK_GATE_DISABLE (1 << 4) # define DCMP_CLOCK_GATE_DISABLE (1 << 3) # define MEC_CLOCK_GATE_DISABLE (1 << 2) # define MECO_CLOCK_GATE_DISABLE (1 << 1) /** This bit must be set on 855,865. */ # define SV_CLOCK_GATE_DISABLE (1 << 0) # define I915_MPEG_CLOCK_GATE_DISABLE (1 << 16) # define I915_VLD_IP_PR_CLOCK_GATE_DISABLE (1 << 15) # define I915_MOTION_COMP_CLOCK_GATE_DISABLE (1 << 14) # define I915_BD_BF_CLOCK_GATE_DISABLE (1 << 13) # define I915_SF_SE_CLOCK_GATE_DISABLE (1 << 12) # define I915_WM_CLOCK_GATE_DISABLE (1 << 11) # define I915_IZ_CLOCK_GATE_DISABLE (1 << 10) # define I915_PI_CLOCK_GATE_DISABLE (1 << 9) # define I915_DI_CLOCK_GATE_DISABLE (1 << 8) # define I915_SH_SV_CLOCK_GATE_DISABLE (1 << 7) # define I915_PL_DG_QC_FT_CLOCK_GATE_DISABLE (1 << 6) # define I915_SC_CLOCK_GATE_DISABLE (1 << 5) # define I915_FL_CLOCK_GATE_DISABLE (1 << 4) # define I915_DM_CLOCK_GATE_DISABLE (1 << 3) # define I915_PS_CLOCK_GATE_DISABLE (1 << 2) # define I915_CC_CLOCK_GATE_DISABLE (1 << 1) # define I915_BY_CLOCK_GATE_DISABLE (1 << 0) # define I965_RCZ_CLOCK_GATE_DISABLE (1 << 30) /** This bit must always be set on 965G/965GM */ # define I965_RCC_CLOCK_GATE_DISABLE (1 << 29) # define I965_RCPB_CLOCK_GATE_DISABLE (1 << 28) # define I965_DAP_CLOCK_GATE_DISABLE (1 << 27) # define I965_ROC_CLOCK_GATE_DISABLE (1 << 26) # define I965_GW_CLOCK_GATE_DISABLE (1 << 25) # define I965_TD_CLOCK_GATE_DISABLE (1 << 24) /** This bit must always be set on 965G */ # define I965_ISC_CLOCK_GATE_DISABLE (1 << 23) # define I965_IC_CLOCK_GATE_DISABLE (1 << 22) # define I965_EU_CLOCK_GATE_DISABLE (1 << 21) # define I965_IF_CLOCK_GATE_DISABLE (1 << 20) # define I965_TC_CLOCK_GATE_DISABLE (1 << 19) # define I965_SO_CLOCK_GATE_DISABLE (1 << 17) # define I965_FBC_CLOCK_GATE_DISABLE (1 << 16) # define I965_MARI_CLOCK_GATE_DISABLE (1 << 15) # define I965_MASF_CLOCK_GATE_DISABLE (1 << 14) # define I965_MAWB_CLOCK_GATE_DISABLE (1 << 13) # define I965_EM_CLOCK_GATE_DISABLE (1 << 12) # define I965_UC_CLOCK_GATE_DISABLE (1 << 11) # define I965_SI_CLOCK_GATE_DISABLE (1 << 6) # define I965_MT_CLOCK_GATE_DISABLE (1 << 5) # define I965_PL_CLOCK_GATE_DISABLE (1 << 4) # define I965_DG_CLOCK_GATE_DISABLE (1 << 3) # define I965_QC_CLOCK_GATE_DISABLE (1 << 2) # define I965_FT_CLOCK_GATE_DISABLE (1 << 1) # define I965_DM_CLOCK_GATE_DISABLE (1 << 0) #define RENCLK_GATE_D2 0x6208 #define VF_UNIT_CLOCK_GATE_DISABLE (1 << 9) #define GS_UNIT_CLOCK_GATE_DISABLE (1 << 7) #define CL_UNIT_CLOCK_GATE_DISABLE (1 << 6) #define RAMCLK_GATE_D 0x6210 /* CRL only */ #define DEUC 0x6214 /* CRL only */ +#define FW_BLC_SELF_VLV 0x6500 +#define FW_CSPWRDWNEN (1<<15) + /* * Palette regs */ #define _PALETTE_A 0x0a000 #define _PALETTE_B 0x0a800 #define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B) /* MCH MMIO space */ /* * MCHBAR mirror. * * This mirrors the MCHBAR MMIO space whose location is determined by * device 0 function 0's pci config register 0x44 or 0x48 and matches it in * every way. It is not accessible from the CP register read instructions. * */ #define MCHBAR_MIRROR_BASE 0x10000 #define MCHBAR_MIRROR_BASE_SNB 0x140000 /** 915-945 and GM965 MCH register controlling DRAM channel access */ #define DCC 0x10200 #define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) #define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0) #define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) #define DCC_ADDRESSING_MODE_MASK (3 << 0) #define DCC_CHANNEL_XOR_DISABLE (1 << 10) #define DCC_CHANNEL_XOR_BIT_17 (1 << 9) /** Pineview MCH register contains DDR3 setting */ #define CSHRDDR3CTL 0x101a8 #define CSHRDDR3CTL_DDR3 (1 << 2) /** 965 MCH register controlling DRAM channel configuration */ #define C0DRB3 0x10206 #define C1DRB3 0x10606 /** snb MCH registers for reading the DRAM channel configuration */ #define MAD_DIMM_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5004) #define MAD_DIMM_C1 (MCHBAR_MIRROR_BASE_SNB + 0x5008) #define MAD_DIMM_C2 (MCHBAR_MIRROR_BASE_SNB + 0x500C) #define MAD_DIMM_ECC_MASK (0x3 << 24) #define MAD_DIMM_ECC_OFF (0x0 << 24) #define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24) #define MAD_DIMM_ECC_IO_OFF_LOGIC_ON (0x2 << 24) #define MAD_DIMM_ECC_ON (0x3 << 24) #define MAD_DIMM_ENH_INTERLEAVE (0x1 << 22) #define MAD_DIMM_RANK_INTERLEAVE (0x1 << 21) #define MAD_DIMM_B_WIDTH_X16 (0x1 << 20) /* X8 chips if unset */ #define MAD_DIMM_A_WIDTH_X16 (0x1 << 19) /* X8 chips if unset */ #define MAD_DIMM_B_DUAL_RANK (0x1 << 18) #define MAD_DIMM_A_DUAL_RANK (0x1 << 17) #define MAD_DIMM_A_SELECT (0x1 << 16) /* DIMM sizes are in multiples of 256mb. */ #define MAD_DIMM_B_SIZE_SHIFT 8 #define MAD_DIMM_B_SIZE_MASK (0xff << MAD_DIMM_B_SIZE_SHIFT) #define MAD_DIMM_A_SIZE_SHIFT 0 #define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT) /* Clocking configuration register */ #define CLKCFG 0x10c00 #define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */ #define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */ #define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */ #define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */ #define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */ #define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */ /* Note, below two are guess */ #define CLKCFG_FSB_1600 (4 << 0) /* hrawclk 400 */ #define CLKCFG_FSB_1600_ALT (0 << 0) /* hrawclk 400 */ #define CLKCFG_FSB_MASK (7 << 0) #define CLKCFG_MEM_533 (1 << 4) #define CLKCFG_MEM_667 (2 << 4) #define CLKCFG_MEM_800 (3 << 4) #define CLKCFG_MEM_MASK (7 << 4) #define TSC1 0x11001 #define TSE (1<<0) #define I915_TR1 0x11006 #define TSFS 0x11020 #define TSFS_SLOPE_MASK 0x0000ff00 #define TSFS_SLOPE_SHIFT 8 #define TSFS_INTR_MASK 0x000000ff #define CRSTANDVID 0x11100 #define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */ #define PXVFREQ_PX_MASK 0x7f000000 #define PXVFREQ_PX_SHIFT 24 #define VIDFREQ_BASE 0x11110 #define VIDFREQ1 0x11110 /* VIDFREQ1-4 (0x1111c) (Cantiga) */ #define VIDFREQ2 0x11114 #define VIDFREQ3 0x11118 #define VIDFREQ4 0x1111c #define VIDFREQ_P0_MASK 0x1f000000 #define VIDFREQ_P0_SHIFT 24 #define VIDFREQ_P0_CSCLK_MASK 0x00f00000 #define VIDFREQ_P0_CSCLK_SHIFT 20 #define VIDFREQ_P0_CRCLK_MASK 0x000f0000 #define VIDFREQ_P0_CRCLK_SHIFT 16 #define VIDFREQ_P1_MASK 0x00001f00 #define VIDFREQ_P1_SHIFT 8 #define VIDFREQ_P1_CSCLK_MASK 0x000000f0 #define VIDFREQ_P1_CSCLK_SHIFT 4 #define VIDFREQ_P1_CRCLK_MASK 0x0000000f #define INTTOEXT_BASE_ILK 0x11300 #define INTTOEXT_BASE 0x11120 /* INTTOEXT1-8 (0x1113c) */ #define INTTOEXT_MAP3_SHIFT 24 #define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT) #define INTTOEXT_MAP2_SHIFT 16 #define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT) #define INTTOEXT_MAP1_SHIFT 8 #define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT) #define INTTOEXT_MAP0_SHIFT 0 #define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT) #define MEMSWCTL 0x11170 /* Ironlake only */ #define MEMCTL_CMD_MASK 0xe000 #define MEMCTL_CMD_SHIFT 13 #define MEMCTL_CMD_RCLK_OFF 0 #define MEMCTL_CMD_RCLK_ON 1 #define MEMCTL_CMD_CHFREQ 2 #define MEMCTL_CMD_CHVID 3 #define MEMCTL_CMD_VMMOFF 4 #define MEMCTL_CMD_VMMON 5 #define MEMCTL_CMD_STS (1<<12) /* write 1 triggers command, clears when command complete */ #define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */ #define MEMCTL_FREQ_SHIFT 8 #define MEMCTL_SFCAVM (1<<7) #define MEMCTL_TGT_VID_MASK 0x007f #define MEMIHYST 0x1117c #define MEMINTREN 0x11180 /* 16 bits */ #define MEMINT_RSEXIT_EN (1<<8) #define MEMINT_CX_SUPR_EN (1<<7) #define MEMINT_CONT_BUSY_EN (1<<6) #define MEMINT_AVG_BUSY_EN (1<<5) #define MEMINT_EVAL_CHG_EN (1<<4) #define MEMINT_MON_IDLE_EN (1<<3) #define MEMINT_UP_EVAL_EN (1<<2) #define MEMINT_DOWN_EVAL_EN (1<<1) #define MEMINT_SW_CMD_EN (1<<0) #define MEMINTRSTR 0x11182 /* 16 bits */ #define MEM_RSEXIT_MASK 0xc000 #define MEM_RSEXIT_SHIFT 14 #define MEM_CONT_BUSY_MASK 0x3000 #define MEM_CONT_BUSY_SHIFT 12 #define MEM_AVG_BUSY_MASK 0x0c00 #define MEM_AVG_BUSY_SHIFT 10 #define MEM_EVAL_CHG_MASK 0x0300 #define MEM_EVAL_BUSY_SHIFT 8 #define MEM_MON_IDLE_MASK 0x00c0 #define MEM_MON_IDLE_SHIFT 6 #define MEM_UP_EVAL_MASK 0x0030 #define MEM_UP_EVAL_SHIFT 4 #define MEM_DOWN_EVAL_MASK 0x000c #define MEM_DOWN_EVAL_SHIFT 2 #define MEM_SW_CMD_MASK 0x0003 #define MEM_INT_STEER_GFX 0 #define MEM_INT_STEER_CMR 1 #define MEM_INT_STEER_SMI 2 #define MEM_INT_STEER_SCI 3 #define MEMINTRSTS 0x11184 #define MEMINT_RSEXIT (1<<7) #define MEMINT_CONT_BUSY (1<<6) #define MEMINT_AVG_BUSY (1<<5) #define MEMINT_EVAL_CHG (1<<4) #define MEMINT_MON_IDLE (1<<3) #define MEMINT_UP_EVAL (1<<2) #define MEMINT_DOWN_EVAL (1<<1) #define MEMINT_SW_CMD (1<<0) #define MEMMODECTL 0x11190 #define MEMMODE_BOOST_EN (1<<31) #define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */ #define MEMMODE_BOOST_FREQ_SHIFT 24 #define MEMMODE_IDLE_MODE_MASK 0x00030000 #define MEMMODE_IDLE_MODE_SHIFT 16 #define MEMMODE_IDLE_MODE_EVAL 0 #define MEMMODE_IDLE_MODE_CONT 1 #define MEMMODE_HWIDLE_EN (1<<15) #define MEMMODE_SWMODE_EN (1<<14) #define MEMMODE_RCLK_GATE (1<<13) #define MEMMODE_HW_UPDATE (1<<12) #define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */ #define MEMMODE_FSTART_SHIFT 8 #define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */ #define MEMMODE_FMAX_SHIFT 4 #define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */ #define RCBMAXAVG 0x1119c #define MEMSWCTL2 0x1119e /* Cantiga only */ #define SWMEMCMD_RENDER_OFF (0 << 13) #define SWMEMCMD_RENDER_ON (1 << 13) #define SWMEMCMD_SWFREQ (2 << 13) #define SWMEMCMD_TARVID (3 << 13) #define SWMEMCMD_VRM_OFF (4 << 13) #define SWMEMCMD_VRM_ON (5 << 13) #define CMDSTS (1<<12) #define SFCAVM (1<<11) #define SWFREQ_MASK 0x0380 /* P0-7 */ #define SWFREQ_SHIFT 7 #define TARVID_MASK 0x001f #define MEMSTAT_CTG 0x111a0 #define RCBMINAVG 0x111a0 #define RCUPEI 0x111b0 #define RCDNEI 0x111b4 #define RSTDBYCTL 0x111b8 #define RS1EN (1<<31) #define RS2EN (1<<30) #define RS3EN (1<<29) #define D3RS3EN (1<<28) /* Display D3 imlies RS3 */ #define SWPROMORSX (1<<27) /* RSx promotion timers ignored */ #define RCWAKERW (1<<26) /* Resetwarn from PCH causes wakeup */ #define DPRSLPVREN (1<<25) /* Fast voltage ramp enable */ #define GFXTGHYST (1<<24) /* Hysteresis to allow trunk gating */ #define RCX_SW_EXIT (1<<23) /* Leave RSx and prevent re-entry */ #define RSX_STATUS_MASK (7<<20) #define RSX_STATUS_ON (0<<20) #define RSX_STATUS_RC1 (1<<20) #define RSX_STATUS_RC1E (2<<20) #define RSX_STATUS_RS1 (3<<20) #define RSX_STATUS_RS2 (4<<20) /* aka rc6 */ #define RSX_STATUS_RSVD (5<<20) /* deep rc6 unsupported on ilk */ #define RSX_STATUS_RS3 (6<<20) /* rs3 unsupported on ilk */ #define RSX_STATUS_RSVD2 (7<<20) #define UWRCRSXE (1<<19) /* wake counter limit prevents rsx */ #define RSCRP (1<<18) /* rs requests control on rs1/2 reqs */ #define JRSC (1<<17) /* rsx coupled to cpu c-state */ #define RS2INC0 (1<<16) /* allow rs2 in cpu c0 */ #define RS1CONTSAV_MASK (3<<14) #define RS1CONTSAV_NO_RS1 (0<<14) /* rs1 doesn't save/restore context */ #define RS1CONTSAV_RSVD (1<<14) #define RS1CONTSAV_SAVE_RS1 (2<<14) /* rs1 saves context */ #define RS1CONTSAV_FULL_RS1 (3<<14) /* rs1 saves and restores context */ #define NORMSLEXLAT_MASK (3<<12) #define SLOW_RS123 (0<<12) #define SLOW_RS23 (1<<12) #define SLOW_RS3 (2<<12) #define NORMAL_RS123 (3<<12) #define RCMODE_TIMEOUT (1<<11) /* 0 is eval interval method */ #define IMPROMOEN (1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */ #define RCENTSYNC (1<<9) /* rs coupled to cpu c-state (3/6/7) */ #define STATELOCK (1<<7) /* locked to rs_cstate if 0 */ #define RS_CSTATE_MASK (3<<4) #define RS_CSTATE_C367_RS1 (0<<4) #define RS_CSTATE_C36_RS1_C7_RS2 (1<<4) #define RS_CSTATE_RSVD (2<<4) #define RS_CSTATE_C367_RS2 (3<<4) #define REDSAVES (1<<3) /* no context save if was idle during rs0 */ #define REDRESTORES (1<<2) /* no restore if was idle during rs0 */ #define VIDCTL 0x111c0 #define VIDSTS 0x111c8 #define VIDSTART 0x111cc /* 8 bits */ #define MEMSTAT_ILK 0x111f8 #define MEMSTAT_VID_MASK 0x7f00 #define MEMSTAT_VID_SHIFT 8 #define MEMSTAT_PSTATE_MASK 0x00f8 #define MEMSTAT_PSTATE_SHIFT 3 #define MEMSTAT_MON_ACTV (1<<2) #define MEMSTAT_SRC_CTL_MASK 0x0003 #define MEMSTAT_SRC_CTL_CORE 0 #define MEMSTAT_SRC_CTL_TRB 1 #define MEMSTAT_SRC_CTL_THM 2 #define MEMSTAT_SRC_CTL_STDBY 3 #define RCPREVBSYTUPAVG 0x113b8 #define RCPREVBSYTDNAVG 0x113bc #define PMMISC 0x11214 #define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */ #define SDEW 0x1124c #define CSIEW0 0x11250 #define CSIEW1 0x11254 #define CSIEW2 0x11258 #define PEW 0x1125c #define DEW 0x11270 #define MCHAFE 0x112c0 #define CSIEC 0x112e0 #define DMIEC 0x112e4 #define DDREC 0x112e8 #define PEG0EC 0x112ec #define PEG1EC 0x112f0 #define GFXEC 0x112f4 #define RPPREVBSYTUPAVG 0x113b8 #define RPPREVBSYTDNAVG 0x113bc #define ECR 0x11600 #define ECR_GPFE (1<<31) #define ECR_IMONE (1<<30) #define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */ #define OGW0 0x11608 #define OGW1 0x1160c #define EG0 0x11610 #define EG1 0x11614 #define EG2 0x11618 #define EG3 0x1161c #define EG4 0x11620 #define EG5 0x11624 #define EG6 0x11628 #define EG7 0x1162c #define PXW 0x11664 #define PXWL 0x11680 #define LCFUSE02 0x116c0 #define LCFUSE_HIV_MASK 0x000000ff #define CSIPLL0 0x12c10 #define DDRMPLL1 0X12c20 #define PEG_BAND_GAP_DATA 0x14d68 #define GEN6_GT_PERF_STATUS 0x145948 #define GEN6_RP_STATE_LIMITS 0x145994 #define GEN6_RP_STATE_CAP 0x145998 /* * Logical Context regs */ #define CCID 0x2180 #define CCID_EN (1<<0) #define CXT_SIZE 0x21a0 #define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f) #define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f) #define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f) #define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f) #define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f) #define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_POWER_SIZE(cxt_reg) + \ GEN6_CXT_RING_SIZE(cxt_reg) + \ GEN6_CXT_RENDER_SIZE(cxt_reg) + \ GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \ GEN6_CXT_PIPELINE_SIZE(cxt_reg)) #define GEN7_CXT_SIZE 0x21a8 #define GEN7_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 25) & 0x7f) #define GEN7_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 22) & 0x7) #define GEN7_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 16) & 0x3f) #define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f) #define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7) #define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f) #define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_POWER_SIZE(ctx_reg) + \ GEN7_CXT_RING_SIZE(ctx_reg) + \ GEN7_CXT_RENDER_SIZE(ctx_reg) + \ GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ GEN7_CXT_GT1_SIZE(ctx_reg) + \ GEN7_CXT_VFSTATE_SIZE(ctx_reg)) /* * Overlay regs */ #define OVADD 0x30000 #define DOVSTA 0x30008 #define OC_BUF (0x3<<20) #define OGAMC5 0x30010 #define OGAMC4 0x30014 #define OGAMC3 0x30018 #define OGAMC2 0x3001c #define OGAMC1 0x30020 #define OGAMC0 0x30024 /* * Display engine regs */ /* Pipe A timing regs */ #define _HTOTAL_A 0x60000 #define _HBLANK_A 0x60004 #define _HSYNC_A 0x60008 #define _VTOTAL_A 0x6000c #define _VBLANK_A 0x60010 #define _VSYNC_A 0x60014 #define _PIPEASRC 0x6001c #define _BCLRPAT_A 0x60020 #define _VSYNCSHIFT_A 0x60028 /* Pipe B timing regs */ #define _HTOTAL_B 0x61000 #define _HBLANK_B 0x61004 #define _HSYNC_B 0x61008 #define _VTOTAL_B 0x6100c #define _VBLANK_B 0x61010 #define _VSYNC_B 0x61014 #define _PIPEBSRC 0x6101c #define _BCLRPAT_B 0x61020 #define _VSYNCSHIFT_B 0x61028 #define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B) #define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B) #define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B) #define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B) #define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B) #define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B) #define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) #define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B) /* VGA port control */ #define ADPA 0x61100 #define ADPA_DAC_ENABLE (1<<31) #define ADPA_DAC_DISABLE 0 #define ADPA_PIPE_SELECT_MASK (1<<30) #define ADPA_PIPE_A_SELECT 0 #define ADPA_PIPE_B_SELECT (1<<30) #define ADPA_PIPE_SELECT(pipe) ((pipe) << 30) #define ADPA_USE_VGA_HVPOLARITY (1<<15) #define ADPA_SETS_HVPOLARITY 0 #define ADPA_VSYNC_CNTL_DISABLE (1<<11) #define ADPA_VSYNC_CNTL_ENABLE 0 #define ADPA_HSYNC_CNTL_DISABLE (1<<10) #define ADPA_HSYNC_CNTL_ENABLE 0 #define ADPA_VSYNC_ACTIVE_HIGH (1<<4) #define ADPA_VSYNC_ACTIVE_LOW 0 #define ADPA_HSYNC_ACTIVE_HIGH (1<<3) #define ADPA_HSYNC_ACTIVE_LOW 0 #define ADPA_DPMS_MASK (~(3<<10)) #define ADPA_DPMS_ON (0<<10) #define ADPA_DPMS_SUSPEND (1<<10) #define ADPA_DPMS_STANDBY (2<<10) #define ADPA_DPMS_OFF (3<<10) /* Hotplug control (945+ only) */ #define PORT_HOTPLUG_EN 0x61110 #define HDMIB_HOTPLUG_INT_EN (1 << 29) #define DPB_HOTPLUG_INT_EN (1 << 29) #define HDMIC_HOTPLUG_INT_EN (1 << 28) #define DPC_HOTPLUG_INT_EN (1 << 28) #define HDMID_HOTPLUG_INT_EN (1 << 27) #define DPD_HOTPLUG_INT_EN (1 << 27) #define SDVOB_HOTPLUG_INT_EN (1 << 26) #define SDVOC_HOTPLUG_INT_EN (1 << 25) #define TV_HOTPLUG_INT_EN (1 << 18) #define CRT_HOTPLUG_INT_EN (1 << 9) #define CRT_HOTPLUG_FORCE_DETECT (1 << 3) #define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8) /* must use period 64 on GM45 according to docs */ #define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8) #define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7) #define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7) #define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5) #define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5) #define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5) #define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5) #define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5) #define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4) #define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4) #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) #define PORT_HOTPLUG_STAT 0x61114 #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) #define DPB_HOTPLUG_INT_STATUS (1 << 29) #define HDMIC_HOTPLUG_INT_STATUS (1 << 28) #define DPC_HOTPLUG_INT_STATUS (1 << 28) #define HDMID_HOTPLUG_INT_STATUS (1 << 27) #define DPD_HOTPLUG_INT_STATUS (1 << 27) #define CRT_HOTPLUG_INT_STATUS (1 << 11) #define TV_HOTPLUG_INT_STATUS (1 << 10) #define CRT_HOTPLUG_MONITOR_MASK (3 << 8) #define CRT_HOTPLUG_MONITOR_COLOR (3 << 8) #define CRT_HOTPLUG_MONITOR_MONO (2 << 8) #define CRT_HOTPLUG_MONITOR_NONE (0 << 8) #define SDVOC_HOTPLUG_INT_STATUS (1 << 7) #define SDVOB_HOTPLUG_INT_STATUS (1 << 6) /* SDVO port control */ #define SDVOB 0x61140 #define SDVOC 0x61160 #define SDVO_ENABLE (1 << 31) #define SDVO_PIPE_B_SELECT (1 << 30) #define SDVO_STALL_SELECT (1 << 29) #define SDVO_INTERRUPT_ENABLE (1 << 26) /** * 915G/GM SDVO pixel multiplier. * * Programmed value is multiplier - 1, up to 5x. * * \sa DPLL_MD_UDI_MULTIPLIER_MASK */ #define SDVO_PORT_MULTIPLY_MASK (7 << 23) #define SDVO_PORT_MULTIPLY_SHIFT 23 #define SDVO_PHASE_SELECT_MASK (15 << 19) #define SDVO_PHASE_SELECT_DEFAULT (6 << 19) #define SDVO_CLOCK_OUTPUT_INVERT (1 << 18) #define SDVOC_GANG_MODE (1 << 16) #define SDVO_ENCODING_SDVO (0x0 << 10) #define SDVO_ENCODING_HDMI (0x2 << 10) /** Requird for HDMI operation */ #define SDVO_NULL_PACKETS_DURING_VSYNC (1 << 9) #define SDVO_COLOR_RANGE_16_235 (1 << 8) #define SDVO_BORDER_ENABLE (1 << 7) #define SDVO_AUDIO_ENABLE (1 << 6) /** New with 965, default is to be set */ #define SDVO_VSYNC_ACTIVE_HIGH (1 << 4) /** New with 965, default is to be set */ #define SDVO_HSYNC_ACTIVE_HIGH (1 << 3) #define SDVOB_PCIE_CONCURRENCY (1 << 3) #define SDVO_DETECTED (1 << 2) /* Bits to be preserved when writing */ #define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | (1 << 26)) #define SDVOC_PRESERVE_MASK ((1 << 17) | (1 << 26)) /* DVO port control */ #define DVOA 0x61120 #define DVOB 0x61140 #define DVOC 0x61160 #define DVO_ENABLE (1 << 31) #define DVO_PIPE_B_SELECT (1 << 30) #define DVO_PIPE_STALL_UNUSED (0 << 28) #define DVO_PIPE_STALL (1 << 28) #define DVO_PIPE_STALL_TV (2 << 28) #define DVO_PIPE_STALL_MASK (3 << 28) #define DVO_USE_VGA_SYNC (1 << 15) #define DVO_DATA_ORDER_I740 (0 << 14) #define DVO_DATA_ORDER_FP (1 << 14) #define DVO_VSYNC_DISABLE (1 << 11) #define DVO_HSYNC_DISABLE (1 << 10) #define DVO_VSYNC_TRISTATE (1 << 9) #define DVO_HSYNC_TRISTATE (1 << 8) #define DVO_BORDER_ENABLE (1 << 7) #define DVO_DATA_ORDER_GBRG (1 << 6) #define DVO_DATA_ORDER_RGGB (0 << 6) #define DVO_DATA_ORDER_GBRG_ERRATA (0 << 6) #define DVO_DATA_ORDER_RGGB_ERRATA (1 << 6) #define DVO_VSYNC_ACTIVE_HIGH (1 << 4) #define DVO_HSYNC_ACTIVE_HIGH (1 << 3) #define DVO_BLANK_ACTIVE_HIGH (1 << 2) #define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */ #define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */ #define DVO_PRESERVE_MASK (0x7<<24) #define DVOA_SRCDIM 0x61124 #define DVOB_SRCDIM 0x61144 #define DVOC_SRCDIM 0x61164 #define DVO_SRCDIM_HORIZONTAL_SHIFT 12 #define DVO_SRCDIM_VERTICAL_SHIFT 0 /* LVDS port control */ #define LVDS 0x61180 /* * Enables the LVDS port. This bit must be set before DPLLs are enabled, as * the DPLL semantics change when the LVDS is assigned to that pipe. */ #define LVDS_PORT_EN (1 << 31) /* Selects pipe B for LVDS data. Must be set on pre-965. */ #define LVDS_PIPEB_SELECT (1 << 30) #define LVDS_PIPE_MASK (1 << 30) #define LVDS_PIPE(pipe) ((pipe) << 30) /* LVDS dithering flag on 965/g4x platform */ #define LVDS_ENABLE_DITHER (1 << 25) /* LVDS sync polarity flags. Set to invert (i.e. negative) */ #define LVDS_VSYNC_POLARITY (1 << 21) #define LVDS_HSYNC_POLARITY (1 << 20) /* Enable border for unscaled (or aspect-scaled) display */ #define LVDS_BORDER_ENABLE (1 << 15) /* * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per * pixel. */ #define LVDS_A0A2_CLKA_POWER_MASK (3 << 8) #define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8) #define LVDS_A0A2_CLKA_POWER_UP (3 << 8) /* * Controls the A3 data pair, which contains the additional LSBs for 24 bit * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be * on. */ #define LVDS_A3_POWER_MASK (3 << 6) #define LVDS_A3_POWER_DOWN (0 << 6) #define LVDS_A3_POWER_UP (3 << 6) /* * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP * is set. */ #define LVDS_CLKB_POWER_MASK (3 << 4) #define LVDS_CLKB_POWER_DOWN (0 << 4) #define LVDS_CLKB_POWER_UP (3 << 4) /* * Controls the B0-B3 data pairs. This must be set to match the DPLL p2 * setting for whether we are in dual-channel mode. The B3 pair will * additionally only be powered up when LVDS_A3_POWER_UP is set. */ #define LVDS_B0B3_POWER_MASK (3 << 2) #define LVDS_B0B3_POWER_DOWN (0 << 2) #define LVDS_B0B3_POWER_UP (3 << 2) /* Video Data Island Packet control */ #define VIDEO_DIP_DATA 0x61178 #define VIDEO_DIP_CTL 0x61170 +/* Pre HSW: */ #define VIDEO_DIP_ENABLE (1 << 31) #define VIDEO_DIP_PORT_B (1 << 29) #define VIDEO_DIP_PORT_C (2 << 29) +#define VIDEO_DIP_PORT_D (3 << 29) +#define VIDEO_DIP_PORT_MASK (3 << 29) #define VIDEO_DIP_ENABLE_AVI (1 << 21) #define VIDEO_DIP_ENABLE_VENDOR (2 << 21) #define VIDEO_DIP_ENABLE_SPD (8 << 21) #define VIDEO_DIP_SELECT_MASK (3 << 19) #define VIDEO_DIP_SELECT_AVI (0 << 19) #define VIDEO_DIP_SELECT_VENDOR (1 << 19) #define VIDEO_DIP_SELECT_SPD (3 << 19) #define VIDEO_DIP_FREQ_ONCE (0 << 16) #define VIDEO_DIP_FREQ_VSYNC (1 << 16) #define VIDEO_DIP_FREQ_2VSYNC (2 << 16) +#define VIDEO_DIP_FREQ_MASK (3 << 16) +/* HSW and later: */ +#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12) +#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0) /* Panel power sequencing */ #define PP_STATUS 0x61200 #define PP_ON (1 << 31) /* * Indicates that all dependencies of the panel are on: * * - PLL enabled * - pipe enabled * - LVDS/DVOB/DVOC on */ #define PP_READY (1 << 30) #define PP_SEQUENCE_NONE (0 << 28) #define PP_SEQUENCE_POWER_UP (1 << 28) #define PP_SEQUENCE_POWER_DOWN (2 << 28) #define PP_SEQUENCE_MASK (3 << 28) #define PP_SEQUENCE_SHIFT 28 #define PP_CYCLE_DELAY_ACTIVE (1 << 27) #define PP_SEQUENCE_STATE_MASK 0x0000000f #define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0) #define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0) #define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0) #define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0) #define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0) #define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0) #define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0) #define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0) #define PP_SEQUENCE_STATE_RESET (0xf << 0) #define PP_CONTROL 0x61204 #define POWER_TARGET_ON (1 << 0) #define PP_ON_DELAYS 0x61208 #define PP_OFF_DELAYS 0x6120c #define PP_DIVISOR 0x61210 /* Panel fitting */ #define PFIT_CONTROL 0x61230 #define PFIT_ENABLE (1 << 31) #define PFIT_PIPE_MASK (3 << 29) #define PFIT_PIPE_SHIFT 29 #define VERT_INTERP_DISABLE (0 << 10) #define VERT_INTERP_BILINEAR (1 << 10) #define VERT_INTERP_MASK (3 << 10) #define VERT_AUTO_SCALE (1 << 9) #define HORIZ_INTERP_DISABLE (0 << 6) #define HORIZ_INTERP_BILINEAR (1 << 6) #define HORIZ_INTERP_MASK (3 << 6) #define HORIZ_AUTO_SCALE (1 << 5) #define PANEL_8TO6_DITHER_ENABLE (1 << 3) #define PFIT_FILTER_FUZZY (0 << 24) #define PFIT_SCALING_AUTO (0 << 26) #define PFIT_SCALING_PROGRAMMED (1 << 26) #define PFIT_SCALING_PILLAR (2 << 26) #define PFIT_SCALING_LETTER (3 << 26) #define PFIT_PGM_RATIOS 0x61234 #define PFIT_VERT_SCALE_MASK 0xfff00000 #define PFIT_HORIZ_SCALE_MASK 0x0000fff0 /* Pre-965 */ #define PFIT_VERT_SCALE_SHIFT 20 #define PFIT_VERT_SCALE_MASK 0xfff00000 #define PFIT_HORIZ_SCALE_SHIFT 4 #define PFIT_HORIZ_SCALE_MASK 0x0000fff0 /* 965+ */ #define PFIT_VERT_SCALE_SHIFT_965 16 #define PFIT_VERT_SCALE_MASK_965 0x1fff0000 #define PFIT_HORIZ_SCALE_SHIFT_965 0 #define PFIT_HORIZ_SCALE_MASK_965 0x00001fff #define PFIT_AUTO_RATIOS 0x61238 /* Backlight control */ #define BLC_PWM_CTL 0x61254 #define BACKLIGHT_MODULATION_FREQ_SHIFT (17) #define BLC_PWM_CTL2 0x61250 /* 965+ only */ #define BLM_COMBINATION_MODE (1 << 30) /* * This is the most significant 15 bits of the number of backlight cycles in a * complete cycle of the modulated backlight control. * * The actual value is this field multiplied by two. */ #define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) #define BLM_LEGACY_MODE (1 << 16) /* * This is the number of cycles out of the backlight modulation cycle for which * the backlight is on. * * This field must be no greater than the number of cycles in the complete * backlight modulation cycle. */ #define BACKLIGHT_DUTY_CYCLE_SHIFT (0) #define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) #define BLC_HIST_CTL 0x61260 /* TV port control */ #define TV_CTL 0x68000 /** Enables the TV encoder */ # define TV_ENC_ENABLE (1 << 31) /** Sources the TV encoder input from pipe B instead of A. */ # define TV_ENC_PIPEB_SELECT (1 << 30) /** Outputs composite video (DAC A only) */ # define TV_ENC_OUTPUT_COMPOSITE (0 << 28) /** Outputs SVideo video (DAC B/C) */ # define TV_ENC_OUTPUT_SVIDEO (1 << 28) /** Outputs Component video (DAC A/B/C) */ # define TV_ENC_OUTPUT_COMPONENT (2 << 28) /** Outputs Composite and SVideo (DAC A/B/C) */ # define TV_ENC_OUTPUT_SVIDEO_COMPOSITE (3 << 28) # define TV_TRILEVEL_SYNC (1 << 21) /** Enables slow sync generation (945GM only) */ # define TV_SLOW_SYNC (1 << 20) /** Selects 4x oversampling for 480i and 576p */ # define TV_OVERSAMPLE_4X (0 << 18) /** Selects 2x oversampling for 720p and 1080i */ # define TV_OVERSAMPLE_2X (1 << 18) /** Selects no oversampling for 1080p */ # define TV_OVERSAMPLE_NONE (2 << 18) /** Selects 8x oversampling */ # define TV_OVERSAMPLE_8X (3 << 18) /** Selects progressive mode rather than interlaced */ # define TV_PROGRESSIVE (1 << 17) /** Sets the colorburst to PAL mode. Required for non-M PAL modes. */ # define TV_PAL_BURST (1 << 16) /** Field for setting delay of Y compared to C */ # define TV_YC_SKEW_MASK (7 << 12) /** Enables a fix for 480p/576p standard definition modes on the 915GM only */ # define TV_ENC_SDP_FIX (1 << 11) /** * Enables a fix for the 915GM only. * * Not sure what it does. */ # define TV_ENC_C0_FIX (1 << 10) /** Bits that must be preserved by software */ # define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf) # define TV_FUSE_STATE_MASK (3 << 4) /** Read-only state that reports all features enabled */ # define TV_FUSE_STATE_ENABLED (0 << 4) /** Read-only state that reports that Macrovision is disabled in hardware*/ # define TV_FUSE_STATE_NO_MACROVISION (1 << 4) /** Read-only state that reports that TV-out is disabled in hardware. */ # define TV_FUSE_STATE_DISABLED (2 << 4) /** Normal operation */ # define TV_TEST_MODE_NORMAL (0 << 0) /** Encoder test pattern 1 - combo pattern */ # define TV_TEST_MODE_PATTERN_1 (1 << 0) /** Encoder test pattern 2 - full screen vertical 75% color bars */ # define TV_TEST_MODE_PATTERN_2 (2 << 0) /** Encoder test pattern 3 - full screen horizontal 75% color bars */ # define TV_TEST_MODE_PATTERN_3 (3 << 0) /** Encoder test pattern 4 - random noise */ # define TV_TEST_MODE_PATTERN_4 (4 << 0) /** Encoder test pattern 5 - linear color ramps */ # define TV_TEST_MODE_PATTERN_5 (5 << 0) /** * This test mode forces the DACs to 50% of full output. * * This is used for load detection in combination with TVDAC_SENSE_MASK */ # define TV_TEST_MODE_MONITOR_DETECT (7 << 0) # define TV_TEST_MODE_MASK (7 << 0) #define TV_DAC 0x68004 # define TV_DAC_SAVE 0x00ffff00 /** * Reports that DAC state change logic has reported change (RO). * * This gets cleared when TV_DAC_STATE_EN is cleared */ # define TVDAC_STATE_CHG (1 << 31) # define TVDAC_SENSE_MASK (7 << 28) /** Reports that DAC A voltage is above the detect threshold */ # define TVDAC_A_SENSE (1 << 30) /** Reports that DAC B voltage is above the detect threshold */ # define TVDAC_B_SENSE (1 << 29) /** Reports that DAC C voltage is above the detect threshold */ # define TVDAC_C_SENSE (1 << 28) /** * Enables DAC state detection logic, for load-based TV detection. * * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set * to off, for load detection to work. */ # define TVDAC_STATE_CHG_EN (1 << 27) /** Sets the DAC A sense value to high */ # define TVDAC_A_SENSE_CTL (1 << 26) /** Sets the DAC B sense value to high */ # define TVDAC_B_SENSE_CTL (1 << 25) /** Sets the DAC C sense value to high */ # define TVDAC_C_SENSE_CTL (1 << 24) /** Overrides the ENC_ENABLE and DAC voltage levels */ # define DAC_CTL_OVERRIDE (1 << 7) /** Sets the slew rate. Must be preserved in software */ # define ENC_TVDAC_SLEW_FAST (1 << 6) # define DAC_A_1_3_V (0 << 4) # define DAC_A_1_1_V (1 << 4) # define DAC_A_0_7_V (2 << 4) # define DAC_A_MASK (3 << 4) # define DAC_B_1_3_V (0 << 2) # define DAC_B_1_1_V (1 << 2) # define DAC_B_0_7_V (2 << 2) # define DAC_B_MASK (3 << 2) # define DAC_C_1_3_V (0 << 0) # define DAC_C_1_1_V (1 << 0) # define DAC_C_0_7_V (2 << 0) # define DAC_C_MASK (3 << 0) /** * CSC coefficients are stored in a floating point format with 9 bits of * mantissa and 2 or 3 bits of exponent. The exponent is represented as 2**-n, * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with * -1 (0x3) being the only legal negative value. */ #define TV_CSC_Y 0x68010 # define TV_RY_MASK 0x07ff0000 # define TV_RY_SHIFT 16 # define TV_GY_MASK 0x00000fff # define TV_GY_SHIFT 0 #define TV_CSC_Y2 0x68014 # define TV_BY_MASK 0x07ff0000 # define TV_BY_SHIFT 16 /** * Y attenuation for component video. * * Stored in 1.9 fixed point. */ # define TV_AY_MASK 0x000003ff # define TV_AY_SHIFT 0 #define TV_CSC_U 0x68018 # define TV_RU_MASK 0x07ff0000 # define TV_RU_SHIFT 16 # define TV_GU_MASK 0x000007ff # define TV_GU_SHIFT 0 #define TV_CSC_U2 0x6801c # define TV_BU_MASK 0x07ff0000 # define TV_BU_SHIFT 16 /** * U attenuation for component video. * * Stored in 1.9 fixed point. */ # define TV_AU_MASK 0x000003ff # define TV_AU_SHIFT 0 #define TV_CSC_V 0x68020 # define TV_RV_MASK 0x0fff0000 # define TV_RV_SHIFT 16 # define TV_GV_MASK 0x000007ff # define TV_GV_SHIFT 0 #define TV_CSC_V2 0x68024 # define TV_BV_MASK 0x07ff0000 # define TV_BV_SHIFT 16 /** * V attenuation for component video. * * Stored in 1.9 fixed point. */ # define TV_AV_MASK 0x000007ff # define TV_AV_SHIFT 0 #define TV_CLR_KNOBS 0x68028 /** 2s-complement brightness adjustment */ # define TV_BRIGHTNESS_MASK 0xff000000 # define TV_BRIGHTNESS_SHIFT 24 /** Contrast adjustment, as a 2.6 unsigned floating point number */ # define TV_CONTRAST_MASK 0x00ff0000 # define TV_CONTRAST_SHIFT 16 /** Saturation adjustment, as a 2.6 unsigned floating point number */ # define TV_SATURATION_MASK 0x0000ff00 # define TV_SATURATION_SHIFT 8 /** Hue adjustment, as an integer phase angle in degrees */ # define TV_HUE_MASK 0x000000ff # define TV_HUE_SHIFT 0 #define TV_CLR_LEVEL 0x6802c /** Controls the DAC level for black */ # define TV_BLACK_LEVEL_MASK 0x01ff0000 # define TV_BLACK_LEVEL_SHIFT 16 /** Controls the DAC level for blanking */ # define TV_BLANK_LEVEL_MASK 0x000001ff # define TV_BLANK_LEVEL_SHIFT 0 #define TV_H_CTL_1 0x68030 /** Number of pixels in the hsync. */ # define TV_HSYNC_END_MASK 0x1fff0000 # define TV_HSYNC_END_SHIFT 16 /** Total number of pixels minus one in the line (display and blanking). */ # define TV_HTOTAL_MASK 0x00001fff # define TV_HTOTAL_SHIFT 0 #define TV_H_CTL_2 0x68034 /** Enables the colorburst (needed for non-component color) */ # define TV_BURST_ENA (1 << 31) /** Offset of the colorburst from the start of hsync, in pixels minus one. */ # define TV_HBURST_START_SHIFT 16 # define TV_HBURST_START_MASK 0x1fff0000 /** Length of the colorburst */ # define TV_HBURST_LEN_SHIFT 0 # define TV_HBURST_LEN_MASK 0x0001fff #define TV_H_CTL_3 0x68038 /** End of hblank, measured in pixels minus one from start of hsync */ # define TV_HBLANK_END_SHIFT 16 # define TV_HBLANK_END_MASK 0x1fff0000 /** Start of hblank, measured in pixels minus one from start of hsync */ # define TV_HBLANK_START_SHIFT 0 # define TV_HBLANK_START_MASK 0x0001fff #define TV_V_CTL_1 0x6803c /** XXX */ # define TV_NBR_END_SHIFT 16 # define TV_NBR_END_MASK 0x07ff0000 /** XXX */ # define TV_VI_END_F1_SHIFT 8 # define TV_VI_END_F1_MASK 0x00003f00 /** XXX */ # define TV_VI_END_F2_SHIFT 0 # define TV_VI_END_F2_MASK 0x0000003f #define TV_V_CTL_2 0x68040 /** Length of vsync, in half lines */ # define TV_VSYNC_LEN_MASK 0x07ff0000 # define TV_VSYNC_LEN_SHIFT 16 /** Offset of the start of vsync in field 1, measured in one less than the * number of half lines. */ # define TV_VSYNC_START_F1_MASK 0x00007f00 # define TV_VSYNC_START_F1_SHIFT 8 /** * Offset of the start of vsync in field 2, measured in one less than the * number of half lines. */ # define TV_VSYNC_START_F2_MASK 0x0000007f # define TV_VSYNC_START_F2_SHIFT 0 #define TV_V_CTL_3 0x68044 /** Enables generation of the equalization signal */ # define TV_EQUAL_ENA (1 << 31) /** Length of vsync, in half lines */ # define TV_VEQ_LEN_MASK 0x007f0000 # define TV_VEQ_LEN_SHIFT 16 /** Offset of the start of equalization in field 1, measured in one less than * the number of half lines. */ # define TV_VEQ_START_F1_MASK 0x0007f00 # define TV_VEQ_START_F1_SHIFT 8 /** * Offset of the start of equalization in field 2, measured in one less than * the number of half lines. */ # define TV_VEQ_START_F2_MASK 0x000007f # define TV_VEQ_START_F2_SHIFT 0 #define TV_V_CTL_4 0x68048 /** * Offset to start of vertical colorburst, measured in one less than the * number of lines from vertical start. */ # define TV_VBURST_START_F1_MASK 0x003f0000 # define TV_VBURST_START_F1_SHIFT 16 /** * Offset to the end of vertical colorburst, measured in one less than the * number of lines from the start of NBR. */ # define TV_VBURST_END_F1_MASK 0x000000ff # define TV_VBURST_END_F1_SHIFT 0 #define TV_V_CTL_5 0x6804c /** * Offset to start of vertical colorburst, measured in one less than the * number of lines from vertical start. */ # define TV_VBURST_START_F2_MASK 0x003f0000 # define TV_VBURST_START_F2_SHIFT 16 /** * Offset to the end of vertical colorburst, measured in one less than the * number of lines from the start of NBR. */ # define TV_VBURST_END_F2_MASK 0x000000ff # define TV_VBURST_END_F2_SHIFT 0 #define TV_V_CTL_6 0x68050 /** * Offset to start of vertical colorburst, measured in one less than the * number of lines from vertical start. */ # define TV_VBURST_START_F3_MASK 0x003f0000 # define TV_VBURST_START_F3_SHIFT 16 /** * Offset to the end of vertical colorburst, measured in one less than the * number of lines from the start of NBR. */ # define TV_VBURST_END_F3_MASK 0x000000ff # define TV_VBURST_END_F3_SHIFT 0 #define TV_V_CTL_7 0x68054 /** * Offset to start of vertical colorburst, measured in one less than the * number of lines from vertical start. */ # define TV_VBURST_START_F4_MASK 0x003f0000 # define TV_VBURST_START_F4_SHIFT 16 /** * Offset to the end of vertical colorburst, measured in one less than the * number of lines from the start of NBR. */ # define TV_VBURST_END_F4_MASK 0x000000ff # define TV_VBURST_END_F4_SHIFT 0 #define TV_SC_CTL_1 0x68060 /** Turns on the first subcarrier phase generation DDA */ # define TV_SC_DDA1_EN (1 << 31) /** Turns on the first subcarrier phase generation DDA */ # define TV_SC_DDA2_EN (1 << 30) /** Turns on the first subcarrier phase generation DDA */ # define TV_SC_DDA3_EN (1 << 29) /** Sets the subcarrier DDA to reset frequency every other field */ # define TV_SC_RESET_EVERY_2 (0 << 24) /** Sets the subcarrier DDA to reset frequency every fourth field */ # define TV_SC_RESET_EVERY_4 (1 << 24) /** Sets the subcarrier DDA to reset frequency every eighth field */ # define TV_SC_RESET_EVERY_8 (2 << 24) /** Sets the subcarrier DDA to never reset the frequency */ # define TV_SC_RESET_NEVER (3 << 24) /** Sets the peak amplitude of the colorburst.*/ # define TV_BURST_LEVEL_MASK 0x00ff0000 # define TV_BURST_LEVEL_SHIFT 16 /** Sets the increment of the first subcarrier phase generation DDA */ # define TV_SCDDA1_INC_MASK 0x00000fff # define TV_SCDDA1_INC_SHIFT 0 #define TV_SC_CTL_2 0x68064 /** Sets the rollover for the second subcarrier phase generation DDA */ # define TV_SCDDA2_SIZE_MASK 0x7fff0000 # define TV_SCDDA2_SIZE_SHIFT 16 /** Sets the increent of the second subcarrier phase generation DDA */ # define TV_SCDDA2_INC_MASK 0x00007fff # define TV_SCDDA2_INC_SHIFT 0 #define TV_SC_CTL_3 0x68068 /** Sets the rollover for the third subcarrier phase generation DDA */ # define TV_SCDDA3_SIZE_MASK 0x7fff0000 # define TV_SCDDA3_SIZE_SHIFT 16 /** Sets the increent of the third subcarrier phase generation DDA */ # define TV_SCDDA3_INC_MASK 0x00007fff # define TV_SCDDA3_INC_SHIFT 0 #define TV_WIN_POS 0x68070 /** X coordinate of the display from the start of horizontal active */ # define TV_XPOS_MASK 0x1fff0000 # define TV_XPOS_SHIFT 16 /** Y coordinate of the display from the start of vertical active (NBR) */ # define TV_YPOS_MASK 0x00000fff # define TV_YPOS_SHIFT 0 #define TV_WIN_SIZE 0x68074 /** Horizontal size of the display window, measured in pixels*/ # define TV_XSIZE_MASK 0x1fff0000 # define TV_XSIZE_SHIFT 16 /** * Vertical size of the display window, measured in pixels. * * Must be even for interlaced modes. */ # define TV_YSIZE_MASK 0x00000fff # define TV_YSIZE_SHIFT 0 #define TV_FILTER_CTL_1 0x68080 /** * Enables automatic scaling calculation. * * If set, the rest of the registers are ignored, and the calculated values can * be read back from the register. */ # define TV_AUTO_SCALE (1 << 31) /** * Disables the vertical filter. * * This is required on modes more than 1024 pixels wide */ # define TV_V_FILTER_BYPASS (1 << 29) /** Enables adaptive vertical filtering */ # define TV_VADAPT (1 << 28) # define TV_VADAPT_MODE_MASK (3 << 26) /** Selects the least adaptive vertical filtering mode */ # define TV_VADAPT_MODE_LEAST (0 << 26) /** Selects the moderately adaptive vertical filtering mode */ # define TV_VADAPT_MODE_MODERATE (1 << 26) /** Selects the most adaptive vertical filtering mode */ # define TV_VADAPT_MODE_MOST (3 << 26) /** * Sets the horizontal scaling factor. * * This should be the fractional part of the horizontal scaling factor divided * by the oversampling rate. TV_HSCALE should be less than 1, and set to: * * (src width - 1) / ((oversample * dest width) - 1) */ # define TV_HSCALE_FRAC_MASK 0x00003fff # define TV_HSCALE_FRAC_SHIFT 0 #define TV_FILTER_CTL_2 0x68084 /** * Sets the integer part of the 3.15 fixed-point vertical scaling factor. * * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1) */ # define TV_VSCALE_INT_MASK 0x00038000 # define TV_VSCALE_INT_SHIFT 15 /** * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. * * \sa TV_VSCALE_INT_MASK */ # define TV_VSCALE_FRAC_MASK 0x00007fff # define TV_VSCALE_FRAC_SHIFT 0 #define TV_FILTER_CTL_3 0x68088 /** * Sets the integer part of the 3.15 fixed-point vertical scaling factor. * * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1)) * * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. */ # define TV_VSCALE_IP_INT_MASK 0x00038000 # define TV_VSCALE_IP_INT_SHIFT 15 /** * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. * * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. * * \sa TV_VSCALE_IP_INT_MASK */ # define TV_VSCALE_IP_FRAC_MASK 0x00007fff # define TV_VSCALE_IP_FRAC_SHIFT 0 #define TV_CC_CONTROL 0x68090 # define TV_CC_ENABLE (1 << 31) /** * Specifies which field to send the CC data in. * * CC data is usually sent in field 0. */ # define TV_CC_FID_MASK (1 << 27) # define TV_CC_FID_SHIFT 27 /** Sets the horizontal position of the CC data. Usually 135. */ # define TV_CC_HOFF_MASK 0x03ff0000 # define TV_CC_HOFF_SHIFT 16 /** Sets the vertical position of the CC data. Usually 21 */ # define TV_CC_LINE_MASK 0x0000003f # define TV_CC_LINE_SHIFT 0 #define TV_CC_DATA 0x68094 # define TV_CC_RDY (1 << 31) /** Second word of CC data to be transmitted. */ # define TV_CC_DATA_2_MASK 0x007f0000 # define TV_CC_DATA_2_SHIFT 16 /** First word of CC data to be transmitted. */ # define TV_CC_DATA_1_MASK 0x0000007f # define TV_CC_DATA_1_SHIFT 0 #define TV_H_LUMA_0 0x68100 #define TV_H_LUMA_59 0x681ec #define TV_H_CHROMA_0 0x68200 #define TV_H_CHROMA_59 0x682ec #define TV_V_LUMA_0 0x68300 #define TV_V_LUMA_42 0x683a8 #define TV_V_CHROMA_0 0x68400 #define TV_V_CHROMA_42 0x684a8 /* Display Port */ #define DP_A 0x64000 /* eDP */ #define DP_B 0x64100 #define DP_C 0x64200 #define DP_D 0x64300 #define DP_PORT_EN (1 << 31) #define DP_PIPEB_SELECT (1 << 30) #define DP_PIPE_MASK (1 << 30) /* Link training mode - select a suitable mode for each stage */ #define DP_LINK_TRAIN_PAT_1 (0 << 28) #define DP_LINK_TRAIN_PAT_2 (1 << 28) #define DP_LINK_TRAIN_PAT_IDLE (2 << 28) #define DP_LINK_TRAIN_OFF (3 << 28) #define DP_LINK_TRAIN_MASK (3 << 28) #define DP_LINK_TRAIN_SHIFT 28 /* CPT Link training mode */ #define DP_LINK_TRAIN_PAT_1_CPT (0 << 8) #define DP_LINK_TRAIN_PAT_2_CPT (1 << 8) #define DP_LINK_TRAIN_PAT_IDLE_CPT (2 << 8) #define DP_LINK_TRAIN_OFF_CPT (3 << 8) #define DP_LINK_TRAIN_MASK_CPT (7 << 8) #define DP_LINK_TRAIN_SHIFT_CPT 8 /* Signal voltages. These are mostly controlled by the other end */ #define DP_VOLTAGE_0_4 (0 << 25) #define DP_VOLTAGE_0_6 (1 << 25) #define DP_VOLTAGE_0_8 (2 << 25) #define DP_VOLTAGE_1_2 (3 << 25) #define DP_VOLTAGE_MASK (7 << 25) #define DP_VOLTAGE_SHIFT 25 /* Signal pre-emphasis levels, like voltages, the other end tells us what * they want */ #define DP_PRE_EMPHASIS_0 (0 << 22) #define DP_PRE_EMPHASIS_3_5 (1 << 22) #define DP_PRE_EMPHASIS_6 (2 << 22) #define DP_PRE_EMPHASIS_9_5 (3 << 22) #define DP_PRE_EMPHASIS_MASK (7 << 22) #define DP_PRE_EMPHASIS_SHIFT 22 /* How many wires to use. I guess 3 was too hard */ #define DP_PORT_WIDTH_1 (0 << 19) #define DP_PORT_WIDTH_2 (1 << 19) #define DP_PORT_WIDTH_4 (3 << 19) #define DP_PORT_WIDTH_MASK (7 << 19) /* Mystic DPCD version 1.1 special mode */ #define DP_ENHANCED_FRAMING (1 << 18) /* eDP */ #define DP_PLL_FREQ_270MHZ (0 << 16) #define DP_PLL_FREQ_160MHZ (1 << 16) #define DP_PLL_FREQ_MASK (3 << 16) /** locked once port is enabled */ #define DP_PORT_REVERSAL (1 << 15) /* eDP */ #define DP_PLL_ENABLE (1 << 14) /** sends the clock on lane 15 of the PEG for debug */ #define DP_CLOCK_OUTPUT_ENABLE (1 << 13) #define DP_SCRAMBLING_DISABLE (1 << 12) #define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7) /** limit RGB values to avoid confusing TVs */ #define DP_COLOR_RANGE_16_235 (1 << 8) /** Turn on the audio link */ #define DP_AUDIO_OUTPUT_ENABLE (1 << 6) /** vs and hs sync polarity */ #define DP_SYNC_VS_HIGH (1 << 4) #define DP_SYNC_HS_HIGH (1 << 3) /** A fantasy */ #define DP_DETECTED (1 << 2) /** The aux channel provides a way to talk to the * signal sink for DDC etc. Max packet size supported * is 20 bytes in each direction, hence the 5 fixed * data registers */ #define DPA_AUX_CH_CTL 0x64010 #define DPA_AUX_CH_DATA1 0x64014 #define DPA_AUX_CH_DATA2 0x64018 #define DPA_AUX_CH_DATA3 0x6401c #define DPA_AUX_CH_DATA4 0x64020 #define DPA_AUX_CH_DATA5 0x64024 #define DPB_AUX_CH_CTL 0x64110 #define DPB_AUX_CH_DATA1 0x64114 #define DPB_AUX_CH_DATA2 0x64118 #define DPB_AUX_CH_DATA3 0x6411c #define DPB_AUX_CH_DATA4 0x64120 #define DPB_AUX_CH_DATA5 0x64124 #define DPC_AUX_CH_CTL 0x64210 #define DPC_AUX_CH_DATA1 0x64214 #define DPC_AUX_CH_DATA2 0x64218 #define DPC_AUX_CH_DATA3 0x6421c #define DPC_AUX_CH_DATA4 0x64220 #define DPC_AUX_CH_DATA5 0x64224 #define DPD_AUX_CH_CTL 0x64310 #define DPD_AUX_CH_DATA1 0x64314 #define DPD_AUX_CH_DATA2 0x64318 #define DPD_AUX_CH_DATA3 0x6431c #define DPD_AUX_CH_DATA4 0x64320 #define DPD_AUX_CH_DATA5 0x64324 #define DP_AUX_CH_CTL_SEND_BUSY (1 << 31) #define DP_AUX_CH_CTL_DONE (1 << 30) #define DP_AUX_CH_CTL_INTERRUPT (1 << 29) #define DP_AUX_CH_CTL_TIME_OUT_ERROR (1 << 28) #define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26) #define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26) #define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26) #define DP_AUX_CH_CTL_TIME_OUT_1600us (3 << 26) #define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26) #define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25) #define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20) #define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20 #define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16) #define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16 #define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15) #define DP_AUX_CH_CTL_MANCHESTER_TEST (1 << 14) #define DP_AUX_CH_CTL_SYNC_TEST (1 << 13) #define DP_AUX_CH_CTL_DEGLITCH_TEST (1 << 12) #define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11) #define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff) #define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0 /* * Computing GMCH M and N values for the Display Port link * * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes * * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz) * * The GMCH value is used internally * * bytes_per_pixel is the number of bytes coming out of the plane, * which is after the LUTs, so we want the bytes for our color format. * For our current usage, this is always 3, one byte for R, G and B. */ #define _PIPEA_GMCH_DATA_M 0x70050 #define _PIPEB_GMCH_DATA_M 0x71050 /* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */ #define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25) #define PIPE_GMCH_DATA_M_TU_SIZE_SHIFT 25 #define PIPE_GMCH_DATA_M_MASK (0xffffff) #define _PIPEA_GMCH_DATA_N 0x70054 #define _PIPEB_GMCH_DATA_N 0x71054 #define PIPE_GMCH_DATA_N_MASK (0xffffff) /* * Computing Link M and N values for the Display Port link * * Link M / N = pixel_clock / ls_clk * * (the DP spec calls pixel_clock the 'strm_clk') * * The Link value is transmitted in the Main Stream * Attributes and VB-ID. */ #define _PIPEA_DP_LINK_M 0x70060 #define _PIPEB_DP_LINK_M 0x71060 #define PIPEA_DP_LINK_M_MASK (0xffffff) #define _PIPEA_DP_LINK_N 0x70064 #define _PIPEB_DP_LINK_N 0x71064 #define PIPEA_DP_LINK_N_MASK (0xffffff) #define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M) #define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N) #define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M) #define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N) /* Display & cursor control */ /* Pipe A */ #define _PIPEADSL 0x70000 -#define DSL_LINEMASK 0x00000fff +#define DSL_LINEMASK_GEN2 0x00000fff +#define DSL_LINEMASK_GEN3 0x00001fff #define _PIPEACONF 0x70008 #define PIPECONF_ENABLE (1<<31) #define PIPECONF_DISABLE 0 #define PIPECONF_DOUBLE_WIDE (1<<30) #define I965_PIPECONF_ACTIVE (1<<30) #define PIPECONF_FRAME_START_DELAY_MASK (3<<27) #define PIPECONF_SINGLE_WIDE 0 #define PIPECONF_PIPE_UNLOCKED 0 #define PIPECONF_PIPE_LOCKED (1<<25) #define PIPECONF_PALETTE 0 #define PIPECONF_GAMMA (1<<24) #define PIPECONF_FORCE_BORDER (1<<25) #define PIPECONF_INTERLACE_MASK (7 << 21) /* Note that pre-gen3 does not support interlaced display directly. Panel * fitting must be disabled on pre-ilk for interlaced. */ #define PIPECONF_PROGRESSIVE (0 << 21) #define PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL (4 << 21) /* gen4 only */ #define PIPECONF_INTERLACE_W_SYNC_SHIFT (5 << 21) /* gen4 only */ #define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) #define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) /* gen3 only */ /* Ironlake and later have a complete new set of values for interlaced. PFIT * means panel fitter required, PF means progressive fetch, DBL means power * saving pixel doubling. */ #define PIPECONF_PFIT_PF_INTERLACED_ILK (1 << 21) #define PIPECONF_INTERLACED_ILK (3 << 21) #define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */ #define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */ #define PIPECONF_CXSR_DOWNCLOCK (1<<16) #define PIPECONF_BPP_MASK (0x000000e0) #define PIPECONF_BPP_8 (0<<5) #define PIPECONF_BPP_10 (1<<5) #define PIPECONF_BPP_6 (2<<5) #define PIPECONF_BPP_12 (3<<5) #define PIPECONF_DITHER_EN (1<<4) #define PIPECONF_DITHER_TYPE_MASK (0x0000000c) #define PIPECONF_DITHER_TYPE_SP (0<<2) #define PIPECONF_DITHER_TYPE_ST1 (1<<2) #define PIPECONF_DITHER_TYPE_ST2 (2<<2) #define PIPECONF_DITHER_TYPE_TEMP (3<<2) #define _PIPEASTAT 0x70024 #define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) +#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30) #define PIPE_CRC_ERROR_ENABLE (1UL<<29) #define PIPE_CRC_DONE_ENABLE (1UL<<28) #define PIPE_GMBUS_EVENT_ENABLE (1UL<<27) +#define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26) #define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26) #define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25) #define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) #define PIPE_DPST_EVENT_ENABLE (1UL<<23) +#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<26) #define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22) #define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) #define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) #define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */ #define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ #define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17) +#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16) #define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16) +#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15) +#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<15) #define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) #define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12) #define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11) +#define PLANE_FLIPDONE_INT_STATUS_VLV (1UL<<10) #define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10) #define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9) #define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) #define PIPE_DPST_EVENT_STATUS (1UL<<7) #define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6) #define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) #define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) #define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */ #define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ #define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) #define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) #define PIPE_BPC_MASK (7 << 5) /* Ironlake */ #define PIPE_8BPC (0 << 5) #define PIPE_10BPC (1 << 5) #define PIPE_6BPC (2 << 5) #define PIPE_12BPC (3 << 5) #define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC) #define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF) #define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL) #define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH) #define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) #define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT) +#define VLV_DPFLIPSTAT 0x70028 +#define PIPEB_LINE_COMPARE_STATUS (1<<29) +#define PIPEB_HLINE_INT_EN (1<<28) +#define PIPEB_VBLANK_INT_EN (1<<27) +#define SPRITED_FLIPDONE_INT_EN (1<<26) +#define SPRITEC_FLIPDONE_INT_EN (1<<25) +#define PLANEB_FLIPDONE_INT_EN (1<<24) +#define PIPEA_LINE_COMPARE_STATUS (1<<21) +#define PIPEA_HLINE_INT_EN (1<<20) +#define PIPEA_VBLANK_INT_EN (1<<19) +#define SPRITEB_FLIPDONE_INT_EN (1<<18) +#define SPRITEA_FLIPDONE_INT_EN (1<<17) +#define PLANEA_FLIPDONE_INT_EN (1<<16) + +#define DPINVGTT 0x7002c /* VLV only */ +#define CURSORB_INVALID_GTT_INT_EN (1<<23) +#define CURSORA_INVALID_GTT_INT_EN (1<<22) +#define SPRITED_INVALID_GTT_INT_EN (1<<21) +#define SPRITEC_INVALID_GTT_INT_EN (1<<20) +#define PLANEB_INVALID_GTT_INT_EN (1<<19) +#define SPRITEB_INVALID_GTT_INT_EN (1<<18) +#define SPRITEA_INVALID_GTT_INT_EN (1<<17) +#define PLANEA_INVALID_GTT_INT_EN (1<<16) +#define DPINVGTT_EN_MASK 0xff0000 +#define CURSORB_INVALID_GTT_STATUS (1<<7) +#define CURSORA_INVALID_GTT_STATUS (1<<6) +#define SPRITED_INVALID_GTT_STATUS (1<<5) +#define SPRITEC_INVALID_GTT_STATUS (1<<4) +#define PLANEB_INVALID_GTT_STATUS (1<<3) +#define SPRITEB_INVALID_GTT_STATUS (1<<2) +#define SPRITEA_INVALID_GTT_STATUS (1<<1) +#define PLANEA_INVALID_GTT_STATUS (1<<0) +#define DPINVGTT_STATUS_MASK 0xff + #define DSPARB 0x70030 #define DSPARB_CSTART_MASK (0x7f << 7) #define DSPARB_CSTART_SHIFT 7 #define DSPARB_BSTART_MASK (0x7f) #define DSPARB_BSTART_SHIFT 0 #define DSPARB_BEND_SHIFT 9 /* on 855 */ #define DSPARB_AEND_SHIFT 0 #define DSPFW1 0x70034 #define DSPFW_SR_SHIFT 23 #define DSPFW_SR_MASK (0x1ff<<23) #define DSPFW_CURSORB_SHIFT 16 #define DSPFW_CURSORB_MASK (0x3f<<16) #define DSPFW_PLANEB_SHIFT 8 #define DSPFW_PLANEB_MASK (0x7f<<8) #define DSPFW_PLANEA_MASK (0x7f) #define DSPFW2 0x70038 #define DSPFW_CURSORA_MASK 0x00003f00 #define DSPFW_CURSORA_SHIFT 8 #define DSPFW_PLANEC_MASK (0x7f) #define DSPFW3 0x7003c #define DSPFW_HPLL_SR_EN (1<<31) #define DSPFW_CURSOR_SR_SHIFT 24 #define PINEVIEW_SELF_REFRESH_EN (1<<30) #define DSPFW_CURSOR_SR_MASK (0x3f<<24) #define DSPFW_HPLL_CURSOR_SHIFT 16 #define DSPFW_HPLL_CURSOR_MASK (0x3f<<16) #define DSPFW_HPLL_SR_MASK (0x1ff) +/* drain latency register values*/ +#define DRAIN_LATENCY_PRECISION_32 32 +#define DRAIN_LATENCY_PRECISION_16 16 +#define VLV_DDL1 0x70050 +#define DDL_CURSORA_PRECISION_32 (1<<31) +#define DDL_CURSORA_PRECISION_16 (0<<31) +#define DDL_CURSORA_SHIFT 24 +#define DDL_PLANEA_PRECISION_32 (1<<7) +#define DDL_PLANEA_PRECISION_16 (0<<7) +#define VLV_DDL2 0x70054 +#define DDL_CURSORB_PRECISION_32 (1<<31) +#define DDL_CURSORB_PRECISION_16 (0<<31) +#define DDL_CURSORB_SHIFT 24 +#define DDL_PLANEB_PRECISION_32 (1<<7) +#define DDL_PLANEB_PRECISION_16 (0<<7) + /* FIFO watermark sizes etc */ #define G4X_FIFO_LINE_SIZE 64 #define I915_FIFO_LINE_SIZE 64 #define I830_FIFO_LINE_SIZE 32 +#define VALLEYVIEW_FIFO_SIZE 255 #define G4X_FIFO_SIZE 127 #define I965_FIFO_SIZE 512 #define I945_FIFO_SIZE 127 #define I915_FIFO_SIZE 95 #define I855GM_FIFO_SIZE 127 /* In cachelines */ #define I830_FIFO_SIZE 95 +#define VALLEYVIEW_MAX_WM 0xff #define G4X_MAX_WM 0x3f #define I915_MAX_WM 0x3f #define PINEVIEW_DISPLAY_FIFO 512 /* in 64byte unit */ #define PINEVIEW_FIFO_LINE_SIZE 64 #define PINEVIEW_MAX_WM 0x1ff #define PINEVIEW_DFT_WM 0x3f #define PINEVIEW_DFT_HPLLOFF_WM 0 #define PINEVIEW_GUARD_WM 10 #define PINEVIEW_CURSOR_FIFO 64 #define PINEVIEW_CURSOR_MAX_WM 0x3f #define PINEVIEW_CURSOR_DFT_WM 0 #define PINEVIEW_CURSOR_GUARD_WM 5 +#define VALLEYVIEW_CURSOR_MAX_WM 64 #define I965_CURSOR_FIFO 64 #define I965_CURSOR_MAX_WM 32 #define I965_CURSOR_DFT_WM 8 /* define the Watermark register on Ironlake */ #define WM0_PIPEA_ILK 0x45100 #define WM0_PIPE_PLANE_MASK (0x7f<<16) #define WM0_PIPE_PLANE_SHIFT 16 #define WM0_PIPE_SPRITE_MASK (0x3f<<8) #define WM0_PIPE_SPRITE_SHIFT 8 #define WM0_PIPE_CURSOR_MASK (0x1f) #define WM0_PIPEB_ILK 0x45104 #define WM0_PIPEC_IVB 0x45200 #define WM1_LP_ILK 0x45108 #define WM1_LP_SR_EN (1<<31) #define WM1_LP_LATENCY_SHIFT 24 #define WM1_LP_LATENCY_MASK (0x7f<<24) #define WM1_LP_FBC_MASK (0xf<<20) #define WM1_LP_FBC_SHIFT 20 #define WM1_LP_SR_MASK (0x1ff<<8) #define WM1_LP_SR_SHIFT 8 #define WM1_LP_CURSOR_MASK (0x3f) #define WM2_LP_ILK 0x4510c #define WM2_LP_EN (1<<31) #define WM3_LP_ILK 0x45110 #define WM3_LP_EN (1<<31) #define WM1S_LP_ILK 0x45120 #define WM2S_LP_IVB 0x45124 #define WM3S_LP_IVB 0x45128 #define WM1S_LP_EN (1<<31) /* Memory latency timer register */ #define MLTR_ILK 0x11222 #define MLTR_WM1_SHIFT 0 #define MLTR_WM2_SHIFT 8 /* the unit of memory self-refresh latency time is 0.5us */ #define ILK_SRLT_MASK 0x3f #define ILK_LATENCY(shift) (I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK) #define ILK_READ_WM1_LATENCY() ILK_LATENCY(MLTR_WM1_SHIFT) #define ILK_READ_WM2_LATENCY() ILK_LATENCY(MLTR_WM2_SHIFT) /* define the fifo size on Ironlake */ #define ILK_DISPLAY_FIFO 128 #define ILK_DISPLAY_MAXWM 64 #define ILK_DISPLAY_DFTWM 8 #define ILK_CURSOR_FIFO 32 #define ILK_CURSOR_MAXWM 16 #define ILK_CURSOR_DFTWM 8 #define ILK_DISPLAY_SR_FIFO 512 #define ILK_DISPLAY_MAX_SRWM 0x1ff #define ILK_DISPLAY_DFT_SRWM 0x3f #define ILK_CURSOR_SR_FIFO 64 #define ILK_CURSOR_MAX_SRWM 0x3f #define ILK_CURSOR_DFT_SRWM 8 #define ILK_FIFO_LINE_SIZE 64 /* define the WM info on Sandybridge */ #define SNB_DISPLAY_FIFO 128 #define SNB_DISPLAY_MAXWM 0x7f /* bit 16:22 */ #define SNB_DISPLAY_DFTWM 8 #define SNB_CURSOR_FIFO 32 #define SNB_CURSOR_MAXWM 0x1f /* bit 4:0 */ #define SNB_CURSOR_DFTWM 8 #define SNB_DISPLAY_SR_FIFO 512 #define SNB_DISPLAY_MAX_SRWM 0x1ff /* bit 16:8 */ #define SNB_DISPLAY_DFT_SRWM 0x3f #define SNB_CURSOR_SR_FIFO 64 #define SNB_CURSOR_MAX_SRWM 0x3f /* bit 5:0 */ #define SNB_CURSOR_DFT_SRWM 8 #define SNB_FBC_MAX_SRWM 0xf /* bit 23:20 */ #define SNB_FIFO_LINE_SIZE 64 /* the address where we get all kinds of latency value */ #define SSKPD 0x5d10 #define SSKPD_WM_MASK 0x3f #define SSKPD_WM0_SHIFT 0 #define SSKPD_WM1_SHIFT 8 #define SSKPD_WM2_SHIFT 16 #define SSKPD_WM3_SHIFT 24 #define SNB_LATENCY(shift) (I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK) #define SNB_READ_WM0_LATENCY() SNB_LATENCY(SSKPD_WM0_SHIFT) #define SNB_READ_WM1_LATENCY() SNB_LATENCY(SSKPD_WM1_SHIFT) #define SNB_READ_WM2_LATENCY() SNB_LATENCY(SSKPD_WM2_SHIFT) #define SNB_READ_WM3_LATENCY() SNB_LATENCY(SSKPD_WM3_SHIFT) /* * The two pipe frame counter registers are not synchronized, so * reading a stable value is somewhat tricky. The following code * should work: * * do { * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >> * PIPE_FRAME_HIGH_SHIFT; * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >> * PIPE_FRAME_LOW_SHIFT); * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >> * PIPE_FRAME_HIGH_SHIFT); * } while (high1 != high2); * frame = (high1 << 8) | low1; */ #define _PIPEAFRAMEHIGH 0x70040 #define PIPE_FRAME_HIGH_MASK 0x0000ffff #define PIPE_FRAME_HIGH_SHIFT 0 #define _PIPEAFRAMEPIXEL 0x70044 #define PIPE_FRAME_LOW_MASK 0xff000000 #define PIPE_FRAME_LOW_SHIFT 24 #define PIPE_PIXEL_MASK 0x00ffffff #define PIPE_PIXEL_SHIFT 0 /* GM45+ just has to be different */ #define _PIPEA_FRMCOUNT_GM45 0x70040 #define _PIPEA_FLIPCOUNT_GM45 0x70044 #define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45) /* Cursor A & B regs */ #define _CURACNTR 0x70080 /* Old style CUR*CNTR flags (desktop 8xx) */ #define CURSOR_ENABLE 0x80000000 #define CURSOR_GAMMA_ENABLE 0x40000000 #define CURSOR_STRIDE_MASK 0x30000000 #define CURSOR_FORMAT_SHIFT 24 #define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT) #define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT) #define CURSOR_FORMAT_3C (0x01 << CURSOR_FORMAT_SHIFT) #define CURSOR_FORMAT_4C (0x02 << CURSOR_FORMAT_SHIFT) #define CURSOR_FORMAT_ARGB (0x04 << CURSOR_FORMAT_SHIFT) #define CURSOR_FORMAT_XRGB (0x05 << CURSOR_FORMAT_SHIFT) /* New style CUR*CNTR flags */ #define CURSOR_MODE 0x27 #define CURSOR_MODE_DISABLE 0x00 #define CURSOR_MODE_64_32B_AX 0x07 #define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX) #define MCURSOR_PIPE_SELECT (1 << 28) #define MCURSOR_PIPE_A 0x00 #define MCURSOR_PIPE_B (1 << 28) #define MCURSOR_GAMMA_ENABLE (1 << 26) #define _CURABASE 0x70084 #define _CURAPOS 0x70088 #define CURSOR_POS_MASK 0x007FF #define CURSOR_POS_SIGN 0x8000 #define CURSOR_X_SHIFT 0 #define CURSOR_Y_SHIFT 16 #define CURSIZE 0x700a0 #define _CURBCNTR 0x700c0 #define _CURBBASE 0x700c4 #define _CURBPOS 0x700c8 #define _CURBCNTR_IVB 0x71080 #define _CURBBASE_IVB 0x71084 #define _CURBPOS_IVB 0x71088 #define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR) #define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE) #define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS) #define CURCNTR_IVB(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR_IVB) #define CURBASE_IVB(pipe) _PIPE(pipe, _CURABASE, _CURBBASE_IVB) #define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB) /* Display A control */ #define _DSPACNTR 0x70180 #define DISPLAY_PLANE_ENABLE (1<<31) #define DISPLAY_PLANE_DISABLE 0 #define DISPPLANE_GAMMA_ENABLE (1<<30) #define DISPPLANE_GAMMA_DISABLE 0 #define DISPPLANE_PIXFORMAT_MASK (0xf<<26) #define DISPPLANE_8BPP (0x2<<26) #define DISPPLANE_15_16BPP (0x4<<26) #define DISPPLANE_16BPP (0x5<<26) #define DISPPLANE_32BPP_NO_ALPHA (0x6<<26) #define DISPPLANE_32BPP (0x7<<26) #define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26) #define DISPPLANE_STEREO_ENABLE (1<<25) #define DISPPLANE_STEREO_DISABLE 0 #define DISPPLANE_SEL_PIPE_SHIFT 24 #define DISPPLANE_SEL_PIPE_MASK (3< * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) { struct drm_i915_private *dev_priv = dev->dev_private; u32 dpll_reg; /* On IVB, 3rd pipe shares PLL with another one */ if (pipe > 1) return false; if (HAS_PCH_SPLIT(dev)) - dpll_reg = PCH_DPLL(pipe); + dpll_reg = _PCH_DPLL(pipe); else dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B; return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE); } static void i915_save_palette(struct drm_device *dev, enum pipe pipe) { struct drm_i915_private *dev_priv = dev->dev_private; unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B); u32 *array; int i; if (!i915_pipe_enabled(dev, pipe)) return; if (HAS_PCH_SPLIT(dev)) reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; if (pipe == PIPE_A) array = dev_priv->save_palette_a; else array = dev_priv->save_palette_b; for (i = 0; i < 256; i++) array[i] = I915_READ(reg + (i << 2)); } static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) { struct drm_i915_private *dev_priv = dev->dev_private; unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B); u32 *array; int i; if (!i915_pipe_enabled(dev, pipe)) return; if (HAS_PCH_SPLIT(dev)) reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; if (pipe == PIPE_A) array = dev_priv->save_palette_a; else array = dev_priv->save_palette_b; for (i = 0; i < 256; i++) I915_WRITE(reg + (i << 2), array[i]); } static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg) { struct drm_i915_private *dev_priv = dev->dev_private; I915_WRITE8(index_port, reg); return I915_READ8(data_port); } static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable) { struct drm_i915_private *dev_priv = dev->dev_private; I915_READ8(st01); I915_WRITE8(VGA_AR_INDEX, palette_enable | reg); return I915_READ8(VGA_AR_DATA_READ); } static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable) { struct drm_i915_private *dev_priv = dev->dev_private; I915_READ8(st01); I915_WRITE8(VGA_AR_INDEX, palette_enable | reg); I915_WRITE8(VGA_AR_DATA_WRITE, val); } static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val) { struct drm_i915_private *dev_priv = dev->dev_private; I915_WRITE8(index_port, reg); I915_WRITE8(data_port, val); } static void i915_save_vga(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; u16 cr_index, cr_data, st01; /* VGA color palette registers */ dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK); /* MSR bits */ dev_priv->saveMSR = I915_READ8(VGA_MSR_READ); if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { cr_index = VGA_CR_INDEX_CGA; cr_data = VGA_CR_DATA_CGA; st01 = VGA_ST01_CGA; } else { cr_index = VGA_CR_INDEX_MDA; cr_data = VGA_CR_DATA_MDA; st01 = VGA_ST01_MDA; } /* CRT controller regs */ i915_write_indexed(dev, cr_index, cr_data, 0x11, i915_read_indexed(dev, cr_index, cr_data, 0x11) & (~0x80)); for (i = 0; i <= 0x24; i++) dev_priv->saveCR[i] = i915_read_indexed(dev, cr_index, cr_data, i); /* Make sure we don't turn off CR group 0 writes */ dev_priv->saveCR[0x11] &= ~0x80; /* Attribute controller registers */ I915_READ8(st01); dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX); for (i = 0; i <= 0x14; i++) dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0); I915_READ8(st01); I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX); I915_READ8(st01); /* Graphics controller registers */ for (i = 0; i < 9; i++) dev_priv->saveGR[i] = i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i); dev_priv->saveGR[0x10] = i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10); dev_priv->saveGR[0x11] = i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11); dev_priv->saveGR[0x18] = i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18); /* Sequencer registers */ for (i = 0; i < 8; i++) dev_priv->saveSR[i] = i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i); } static void i915_restore_vga(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; u16 cr_index, cr_data, st01; /* MSR bits */ I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR); if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { cr_index = VGA_CR_INDEX_CGA; cr_data = VGA_CR_DATA_CGA; st01 = VGA_ST01_CGA; } else { cr_index = VGA_CR_INDEX_MDA; cr_data = VGA_CR_DATA_MDA; st01 = VGA_ST01_MDA; } /* Sequencer registers, don't write SR07 */ for (i = 0; i < 7; i++) i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i, dev_priv->saveSR[i]); /* CRT controller regs */ /* Enable CR group 0 writes */ i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]); for (i = 0; i <= 0x24; i++) i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]); /* Graphics controller regs */ for (i = 0; i < 9; i++) i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i, dev_priv->saveGR[i]); i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10, dev_priv->saveGR[0x10]); i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11, dev_priv->saveGR[0x11]); i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18, dev_priv->saveGR[0x18]); /* Attribute controller registers */ I915_READ8(st01); /* switch back to index mode */ for (i = 0; i <= 0x14; i++) i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0); I915_READ8(st01); /* switch back to index mode */ I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20); I915_READ8(st01); /* VGA color palette registers */ I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK); } static void i915_save_modeset_reg(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; if (drm_core_check_feature(dev, DRIVER_MODESET)) return; /* Cursor state */ dev_priv->saveCURACNTR = I915_READ(_CURACNTR); dev_priv->saveCURAPOS = I915_READ(_CURAPOS); dev_priv->saveCURABASE = I915_READ(_CURABASE); dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR); dev_priv->saveCURBPOS = I915_READ(_CURBPOS); dev_priv->saveCURBBASE = I915_READ(_CURBBASE); if (IS_GEN2(dev)) dev_priv->saveCURSIZE = I915_READ(CURSIZE); if (HAS_PCH_SPLIT(dev)) { dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); } /* Pipe & plane A info */ dev_priv->savePIPEACONF = I915_READ(_PIPEACONF); dev_priv->savePIPEASRC = I915_READ(_PIPEASRC); if (HAS_PCH_SPLIT(dev)) { dev_priv->saveFPA0 = I915_READ(_PCH_FPA0); dev_priv->saveFPA1 = I915_READ(_PCH_FPA1); dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A); } else { dev_priv->saveFPA0 = I915_READ(_FPA0); dev_priv->saveFPA1 = I915_READ(_FPA1); dev_priv->saveDPLL_A = I915_READ(_DPLL_A); } if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD); dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A); dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A); dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A); dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A); dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A); dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A); if (!HAS_PCH_SPLIT(dev)) dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A); if (HAS_PCH_SPLIT(dev)) { dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1); dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1); dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1); dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1); dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL); dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL); dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1); dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ); dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS); dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF); dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A); dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A); dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A); dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A); dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A); dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A); } dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR); dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE); dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE); dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS); dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR); if (INTEL_INFO(dev)->gen >= 4) { dev_priv->saveDSPASURF = I915_READ(_DSPASURF); dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF); } i915_save_palette(dev, PIPE_A); dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT); /* Pipe & plane B info */ dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF); dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC); if (HAS_PCH_SPLIT(dev)) { dev_priv->saveFPB0 = I915_READ(_PCH_FPB0); dev_priv->saveFPB1 = I915_READ(_PCH_FPB1); dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B); } else { dev_priv->saveFPB0 = I915_READ(_FPB0); dev_priv->saveFPB1 = I915_READ(_FPB1); dev_priv->saveDPLL_B = I915_READ(_DPLL_B); } if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD); dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B); dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B); dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B); dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B); dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B); dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B); if (!HAS_PCH_SPLIT(dev)) dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B); if (HAS_PCH_SPLIT(dev)) { dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1); dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1); dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1); dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1); dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL); dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL); dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1); dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ); dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS); dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF); dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B); dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B); dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B); dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B); dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B); dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B); } dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR); dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE); dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE); dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS); dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR); if (INTEL_INFO(dev)->gen >= 4) { dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF); dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF); } i915_save_palette(dev, PIPE_B); dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT); /* Fences */ switch (INTEL_INFO(dev)->gen) { case 7: case 6: for (i = 0; i < 16; i++) dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); break; case 5: case 4: for (i = 0; i < 16; i++) dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); break; case 3: if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) for (i = 0; i < 8; i++) dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); case 2: for (i = 0; i < 8; i++) dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); break; } return; } static void i915_restore_modeset_reg(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int dpll_a_reg, fpa0_reg, fpa1_reg; int dpll_b_reg, fpb0_reg, fpb1_reg; int i; if (drm_core_check_feature(dev, DRIVER_MODESET)) return; /* Fences */ switch (INTEL_INFO(dev)->gen) { case 7: case 6: for (i = 0; i < 16; i++) I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); break; case 5: case 4: for (i = 0; i < 16; i++) I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); break; case 3: case 2: if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) for (i = 0; i < 8; i++) I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); for (i = 0; i < 8; i++) I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); break; } if (HAS_PCH_SPLIT(dev)) { dpll_a_reg = _PCH_DPLL_A; dpll_b_reg = _PCH_DPLL_B; fpa0_reg = _PCH_FPA0; fpb0_reg = _PCH_FPB0; fpa1_reg = _PCH_FPA1; fpb1_reg = _PCH_FPB1; } else { dpll_a_reg = _DPLL_A; dpll_b_reg = _DPLL_B; fpa0_reg = _FPA0; fpb0_reg = _FPB0; fpa1_reg = _FPA1; fpb1_reg = _FPB1; } if (HAS_PCH_SPLIT(dev)) { I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL); I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL); } /* Pipe & plane A info */ /* Prime the clock */ if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A & ~DPLL_VCO_ENABLE); POSTING_READ(dpll_a_reg); DRM_UDELAY(150); } I915_WRITE(fpa0_reg, dev_priv->saveFPA0); I915_WRITE(fpa1_reg, dev_priv->saveFPA1); /* Actually enable it */ I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A); POSTING_READ(dpll_a_reg); DRM_UDELAY(150); if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD); POSTING_READ(_DPLL_A_MD); } DRM_UDELAY(150); /* Restore mode */ I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A); I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A); I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A); I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A); I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A); I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A); if (!HAS_PCH_SPLIT(dev)) I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A); if (HAS_PCH_SPLIT(dev)) { I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1); I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1); I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1); I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1); I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL); I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL); I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1); I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ); I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS); I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF); I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A); I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A); I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A); I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A); I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A); I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A); } /* Restore plane info */ I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE); I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS); I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC); I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR); I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE); if (INTEL_INFO(dev)->gen >= 4) { I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF); I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF); } I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF); i915_restore_palette(dev, PIPE_A); /* Enable the plane */ I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR); I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR)); /* Pipe & plane B info */ if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B & ~DPLL_VCO_ENABLE); POSTING_READ(dpll_b_reg); DRM_UDELAY(150); } I915_WRITE(fpb0_reg, dev_priv->saveFPB0); I915_WRITE(fpb1_reg, dev_priv->saveFPB1); /* Actually enable it */ I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); POSTING_READ(dpll_b_reg); DRM_UDELAY(150); if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD); POSTING_READ(_DPLL_B_MD); } DRM_UDELAY(150); /* Restore mode */ I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B); I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B); I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B); I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B); I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B); I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B); if (!HAS_PCH_SPLIT(dev)) I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B); if (HAS_PCH_SPLIT(dev)) { I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1); I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1); I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1); I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1); I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL); I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL); I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1); I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ); I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS); I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF); I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B); I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B); I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B); I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B); I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B); I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B); } /* Restore plane info */ I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE); I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS); I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC); I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR); I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); if (INTEL_INFO(dev)->gen >= 4) { I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF); I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); } I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF); i915_restore_palette(dev, PIPE_B); /* Enable the plane */ I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR); I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR)); /* Cursor state */ I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS); I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR); I915_WRITE(_CURABASE, dev_priv->saveCURABASE); I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS); I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR); I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE); if (IS_GEN2(dev)) I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); return; } static void i915_save_display(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; /* Display arbitration control */ dev_priv->saveDSPARB = I915_READ(DSPARB); /* This is only meaningful in non-KMS mode */ /* Don't save them in KMS mode */ i915_save_modeset_reg(dev); /* CRT state */ if (HAS_PCH_SPLIT(dev)) { dev_priv->saveADPA = I915_READ(PCH_ADPA); } else { dev_priv->saveADPA = I915_READ(ADPA); } /* LVDS state */ if (HAS_PCH_SPLIT(dev)) { dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL); dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); dev_priv->saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL); dev_priv->saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2); dev_priv->saveLVDS = I915_READ(PCH_LVDS); } else { dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL); if (INTEL_INFO(dev)->gen >= 4) dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); if (IS_MOBILE(dev) && !IS_I830(dev)) dev_priv->saveLVDS = I915_READ(LVDS); } if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); if (HAS_PCH_SPLIT(dev)) { dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); } else { dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); } /* Display Port state */ if (SUPPORTS_INTEGRATED_DP(dev)) { dev_priv->saveDP_B = I915_READ(DP_B); dev_priv->saveDP_C = I915_READ(DP_C); dev_priv->saveDP_D = I915_READ(DP_D); dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M); dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M); dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N); dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N); dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M); dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M); dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N); dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N); } /* FIXME: save TV & SDVO state */ /* Only save FBC state on the platform that supports FBC */ if (I915_HAS_FBC(dev)) { if (HAS_PCH_SPLIT(dev)) { dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); } else if (IS_GM45(dev)) { dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); } else { dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); } } /* VGA state */ dev_priv->saveVGA0 = I915_READ(VGA0); dev_priv->saveVGA1 = I915_READ(VGA1); dev_priv->saveVGA_PD = I915_READ(VGA_PD); if (HAS_PCH_SPLIT(dev)) dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL); else dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); i915_save_vga(dev); } static void i915_restore_display(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; /* Display arbitration */ I915_WRITE(DSPARB, dev_priv->saveDSPARB); /* Display port ratios (must be done before clock is set) */ if (SUPPORTS_INTEGRATED_DP(dev)) { I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M); I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M); I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N); I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N); I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M); I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M); I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N); I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N); } /* This is only meaningful in non-KMS mode */ /* Don't restore them in KMS mode */ i915_restore_modeset_reg(dev); /* CRT state */ if (HAS_PCH_SPLIT(dev)) I915_WRITE(PCH_ADPA, dev_priv->saveADPA); else I915_WRITE(ADPA, dev_priv->saveADPA); /* LVDS state */ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); if (HAS_PCH_SPLIT(dev)) { I915_WRITE(PCH_LVDS, dev_priv->saveLVDS); } else if (IS_MOBILE(dev) && !IS_I830(dev)) I915_WRITE(LVDS, dev_priv->saveLVDS); if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); if (HAS_PCH_SPLIT(dev)) { I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL); I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2); I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL); I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2); I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); I915_WRITE(RSTDBYCTL, dev_priv->saveMCHBAR_RENDER_STANDBY); } else { I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); I915_WRITE(BLC_HIST_CTL, dev_priv->saveBLC_HIST_CTL); I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); } /* Display Port state */ if (SUPPORTS_INTEGRATED_DP(dev)) { I915_WRITE(DP_B, dev_priv->saveDP_B); I915_WRITE(DP_C, dev_priv->saveDP_C); I915_WRITE(DP_D, dev_priv->saveDP_D); } /* FIXME: restore TV & SDVO state */ /* only restore FBC info on the platform that supports FBC*/ intel_disable_fbc(dev); if (I915_HAS_FBC(dev)) { if (HAS_PCH_SPLIT(dev)) { I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); } else if (IS_GM45(dev)) { I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); } else { I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); } } /* VGA state */ if (HAS_PCH_SPLIT(dev)) I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); else I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); I915_WRITE(VGA0, dev_priv->saveVGA0); I915_WRITE(VGA1, dev_priv->saveVGA1); I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); POSTING_READ(VGA_PD); DRM_UDELAY(150); i915_restore_vga(dev); } int i915_save_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; dev_priv->saveLBB = pci_read_config(dev->device, LBB, 1); /* Hardware status page */ dev_priv->saveHWS = I915_READ(HWS_PGA); i915_save_display(dev); /* Interrupt state */ if (HAS_PCH_SPLIT(dev)) { dev_priv->saveDEIER = I915_READ(DEIER); dev_priv->saveDEIMR = I915_READ(DEIMR); dev_priv->saveGTIER = I915_READ(GTIER); dev_priv->saveGTIMR = I915_READ(GTIMR); dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR); dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); dev_priv->saveMCHBAR_RENDER_STANDBY = I915_READ(RSTDBYCTL); dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG); } else { dev_priv->saveIER = I915_READ(IER); dev_priv->saveIMR = I915_READ(IMR); } if (IS_IRONLAKE_M(dev)) ironlake_disable_drps(dev); if (INTEL_INFO(dev)->gen >= 6) gen6_disable_rps(dev); /* Cache mode state */ dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); /* Memory Arbitration state */ dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); /* Scratch space */ for (i = 0; i < 16; i++) { dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2)); dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); } for (i = 0; i < 3; i++) dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); return 0; } int i915_restore_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; pci_write_config(dev->device, LBB, dev_priv->saveLBB, 1); /* Hardware status page */ I915_WRITE(HWS_PGA, dev_priv->saveHWS); i915_restore_display(dev); /* Interrupt state */ if (HAS_PCH_SPLIT(dev)) { I915_WRITE(DEIER, dev_priv->saveDEIER); I915_WRITE(DEIMR, dev_priv->saveDEIMR); I915_WRITE(GTIER, dev_priv->saveGTIER); I915_WRITE(GTIMR, dev_priv->saveGTIMR); I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR); I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR); I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG); } else { I915_WRITE(IER, dev_priv->saveIER); I915_WRITE(IMR, dev_priv->saveIMR); } - DRM_UNLOCK(dev); - - if (drm_core_check_feature(dev, DRIVER_MODESET)) - intel_init_clock_gating(dev); - - if (IS_IRONLAKE_M(dev)) { - ironlake_enable_drps(dev); - intel_init_emon(dev); - } - - if (INTEL_INFO(dev)->gen >= 6) { - gen6_enable_rps(dev_priv); - gen6_update_ring_freq(dev_priv); - } - - DRM_LOCK(dev); /* Cache mode state */ I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); /* Memory arbitration state */ I915_WRITE(MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000); for (i = 0; i < 16; i++) { I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]); I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]); } for (i = 0; i < 3; i++) I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); intel_iic_reset(dev); return 0; } Index: head/sys/dev/drm2/i915/intel_bios.c =================================================================== --- head/sys/dev/drm2/i915/intel_bios.c (revision 277486) +++ head/sys/dev/drm2/i915/intel_bios.c (revision 277487) @@ -1,737 +1,778 @@ /* * Copyright © 2006 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Eric Anholt * * $FreeBSD$ */ #include #include #include #include #include #include #define SLAVE_ADDR1 0x70 #define SLAVE_ADDR2 0x72 static int panel_type; static void * find_section(struct bdb_header *bdb, int section_id) { u8 *base = (u8 *)bdb; int index = 0; u16 total, current_size; u8 current_id; /* skip to first section */ index += bdb->header_size; total = bdb->bdb_size; /* walk the sections looking for section_id */ while (index < total) { current_id = *(base + index); index++; current_size = *((u16 *)(base + index)); index += 2; if (current_id == section_id) return base + index; index += current_size; } return NULL; } static u16 get_blocksize(void *p) { u16 *block_ptr, block_size; block_ptr = (u16 *)((char *)p - 2); block_size = *block_ptr; return block_size; } static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, const struct lvds_dvo_timing *dvo_timing) { panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) | dvo_timing->hactive_lo; panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay + ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo); panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start + dvo_timing->hsync_pulse_width; panel_fixed_mode->htotal = panel_fixed_mode->hdisplay + ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo); panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) | dvo_timing->vactive_lo; panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay + dvo_timing->vsync_off; panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start + dvo_timing->vsync_pulse_width; panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay + ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo); panel_fixed_mode->clock = dvo_timing->clock * 10; panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; if (dvo_timing->hsync_positive) panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC; else panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC; if (dvo_timing->vsync_positive) panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC; else panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; /* Some VBTs have bogus h/vtotal values */ if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal) panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1; drm_mode_set_name(panel_fixed_mode); } static bool lvds_dvo_timing_equal_size(const struct lvds_dvo_timing *a, const struct lvds_dvo_timing *b) { if (a->hactive_hi != b->hactive_hi || a->hactive_lo != b->hactive_lo) return false; if (a->hsync_off_hi != b->hsync_off_hi || a->hsync_off_lo != b->hsync_off_lo) return false; if (a->hsync_pulse_width != b->hsync_pulse_width) return false; if (a->hblank_hi != b->hblank_hi || a->hblank_lo != b->hblank_lo) return false; if (a->vactive_hi != b->vactive_hi || a->vactive_lo != b->vactive_lo) return false; if (a->vsync_off != b->vsync_off) return false; if (a->vsync_pulse_width != b->vsync_pulse_width) return false; if (a->vblank_hi != b->vblank_hi || a->vblank_lo != b->vblank_lo) return false; return true; } static const struct lvds_dvo_timing * get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data, const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs, int index) { /* * the size of fp_timing varies on the different platform. * So calculate the DVO timing relative offset in LVDS data * entry to get the DVO timing entry */ int lfp_data_size = lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset - lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset; int dvo_timing_offset = lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset - lvds_lfp_data_ptrs->ptr[0].fp_timing_offset; const char *entry = (const char *)lvds_lfp_data->data + lfp_data_size * index; return (const struct lvds_dvo_timing *)(entry + dvo_timing_offset); } +/* get lvds_fp_timing entry + * this function may return NULL if the corresponding entry is invalid + */ +static const struct lvds_fp_timing * +get_lvds_fp_timing(const struct bdb_header *bdb, + const struct bdb_lvds_lfp_data *data, + const struct bdb_lvds_lfp_data_ptrs *ptrs, + int index) +{ + size_t data_ofs = (const u8 *)data - (const u8 *)bdb; + u16 data_size = ((const u16 *)data)[-1]; /* stored in header */ + size_t ofs; + + if (index >= DRM_ARRAY_SIZE(ptrs->ptr)) + return NULL; + ofs = ptrs->ptr[index].fp_timing_offset; + if (ofs < data_ofs || + ofs + sizeof(struct lvds_fp_timing) > data_ofs + data_size) + return NULL; + return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs); +} + /* Try to find integrated panel data */ static void parse_lfp_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { const struct bdb_lvds_options *lvds_options; const struct bdb_lvds_lfp_data *lvds_lfp_data; const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs; const struct lvds_dvo_timing *panel_dvo_timing; + const struct lvds_fp_timing *fp_timing; struct drm_display_mode *panel_fixed_mode; int i, downclock; lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); if (!lvds_options) return; dev_priv->lvds_dither = lvds_options->pixel_dither; if (lvds_options->panel_type == 0xff) return; panel_type = lvds_options->panel_type; lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); if (!lvds_lfp_data) return; lvds_lfp_data_ptrs = find_section(bdb, BDB_LVDS_LFP_DATA_PTRS); if (!lvds_lfp_data_ptrs) return; dev_priv->lvds_vbt = 1; panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data, lvds_lfp_data_ptrs, lvds_options->panel_type); panel_fixed_mode = malloc(sizeof(*panel_fixed_mode), DRM_MEM_KMS, M_WAITOK | M_ZERO); fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing); dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode; DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n"); drm_mode_debug_printmodeline(panel_fixed_mode); /* * Iterate over the LVDS panel timing info to find the lowest clock * for the native resolution. */ downclock = panel_dvo_timing->clock; for (i = 0; i < 16; i++) { const struct lvds_dvo_timing *dvo_timing; dvo_timing = get_lvds_dvo_timing(lvds_lfp_data, lvds_lfp_data_ptrs, i); if (lvds_dvo_timing_equal_size(dvo_timing, panel_dvo_timing) && dvo_timing->clock < downclock) downclock = dvo_timing->clock; } if (downclock < panel_dvo_timing->clock && i915_lvds_downclock) { dev_priv->lvds_downclock_avail = 1; dev_priv->lvds_downclock = downclock * 10; DRM_DEBUG("LVDS downclock is found in VBT. " "Normal Clock %dKHz, downclock %dKHz\n", panel_fixed_mode->clock, 10 * downclock); } + + fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data, + lvds_lfp_data_ptrs, + lvds_options->panel_type); + if (fp_timing) { + /* check the resolution, just to be sure */ + if (fp_timing->x_res == panel_fixed_mode->hdisplay && + fp_timing->y_res == panel_fixed_mode->vdisplay) { + dev_priv->bios_lvds_val = fp_timing->lvds_reg_val; + DRM_DEBUG_KMS("VBT initial LVDS value %x\n", + dev_priv->bios_lvds_val); + } + } } /* Try to find sdvo panel data */ static void parse_sdvo_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct lvds_dvo_timing *dvo_timing; struct drm_display_mode *panel_fixed_mode; int index; index = i915_vbt_sdvo_panel_type; + if (index == -2) { + DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n"); + return; + } + if (index == -1) { struct bdb_sdvo_lvds_options *sdvo_lvds_options; sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS); if (!sdvo_lvds_options) return; index = sdvo_lvds_options->panel_type; } dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS); if (!dvo_timing) return; panel_fixed_mode = malloc(sizeof(*panel_fixed_mode), DRM_MEM_KMS, M_WAITOK | M_ZERO); fill_detail_timing_data(panel_fixed_mode, dvo_timing + index); dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode; DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n"); drm_mode_debug_printmodeline(panel_fixed_mode); } static int intel_bios_ssc_frequency(struct drm_device *dev, bool alternate) { switch (INTEL_INFO(dev)->gen) { case 2: return alternate ? 66 : 48; case 3: case 4: return alternate ? 100 : 96; default: return alternate ? 100 : 120; } } static void parse_general_features(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct drm_device *dev = dev_priv->dev; struct bdb_general_features *general; general = find_section(bdb, BDB_GENERAL_FEATURES); if (general) { dev_priv->int_tv_support = general->int_tv_support; dev_priv->int_crt_support = general->int_crt_support; dev_priv->lvds_use_ssc = general->enable_ssc; dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, general->ssc_freq); dev_priv->display_clock_mode = general->display_clock_mode; DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d\n", dev_priv->int_tv_support, dev_priv->int_crt_support, dev_priv->lvds_use_ssc, dev_priv->lvds_ssc_freq, dev_priv->display_clock_mode); } } static void parse_general_definitions(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct bdb_general_definitions *general; general = find_section(bdb, BDB_GENERAL_DEFINITIONS); if (general) { u16 block_size = get_blocksize(general); if (block_size >= sizeof(*general)) { int bus_pin = general->crt_ddc_gmbus_pin; DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin); - if (bus_pin >= 1 && bus_pin <= 6) + if (intel_gmbus_is_port_valid(bus_pin)) dev_priv->crt_ddc_pin = bus_pin; } else { DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n", - block_size); + block_size); } } } static void parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct sdvo_device_mapping *p_mapping; struct bdb_general_definitions *p_defs; struct child_device_config *p_child; int i, child_device_num, count; u16 block_size; p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); if (!p_defs) { DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n"); return; } /* judge whether the size of child device meets the requirements. * If the child device size obtained from general definition block * is different with sizeof(struct child_device_config), skip the * parsing of sdvo device info */ if (p_defs->child_dev_size != sizeof(*p_child)) { /* different child dev size . Ignore it */ DRM_DEBUG_KMS("different child size is found. Invalid.\n"); return; } /* get the block size of general definitions */ block_size = get_blocksize(p_defs); /* get the number of child device */ child_device_num = (block_size - sizeof(*p_defs)) / sizeof(*p_child); count = 0; for (i = 0; i < child_device_num; i++) { p_child = &(p_defs->devices[i]); if (!p_child->device_type) { /* skip the device block if device type is invalid */ continue; } if (p_child->slave_addr != SLAVE_ADDR1 && p_child->slave_addr != SLAVE_ADDR2) { /* * If the slave address is neither 0x70 nor 0x72, * it is not a SDVO device. Skip it. */ continue; } if (p_child->dvo_port != DEVICE_PORT_DVOB && p_child->dvo_port != DEVICE_PORT_DVOC) { /* skip the incorrect SDVO port */ DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n"); continue; } DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on" " %s port\n", p_child->slave_addr, (p_child->dvo_port == DEVICE_PORT_DVOB) ? "SDVOB" : "SDVOC"); p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]); if (!p_mapping->initialized) { p_mapping->dvo_port = p_child->dvo_port; p_mapping->slave_addr = p_child->slave_addr; p_mapping->dvo_wiring = p_child->dvo_wiring; p_mapping->ddc_pin = p_child->ddc_pin; p_mapping->i2c_pin = p_child->i2c_pin; p_mapping->initialized = 1; DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n", p_mapping->dvo_port, p_mapping->slave_addr, p_mapping->dvo_wiring, p_mapping->ddc_pin, p_mapping->i2c_pin); } else { DRM_DEBUG_KMS("Maybe one SDVO port is shared by " "two SDVO device.\n"); } if (p_child->slave2_addr) { /* Maybe this is a SDVO device with multiple inputs */ /* And the mapping info is not added */ DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this" " is a SDVO device with multiple inputs.\n"); } count++; } if (!count) { /* No SDVO device info is found */ DRM_DEBUG_KMS("No SDVO device info is found in VBT\n"); } return; } static void parse_driver_features(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct drm_device *dev = dev_priv->dev; struct bdb_driver_features *driver; driver = find_section(bdb, BDB_DRIVER_FEATURES); if (!driver) return; if (SUPPORTS_EDP(dev) && driver->lvds_config == BDB_DRIVER_FEATURE_EDP) dev_priv->edp.support = 1; if (driver->dual_frequency) dev_priv->render_reclock_avail = true; } static void parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct bdb_edp *edp; struct edp_power_seq *edp_pps; struct edp_link_params *edp_link_params; edp = find_section(bdb, BDB_EDP); if (!edp) { if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) { DRM_DEBUG_KMS("No eDP BDB found but eDP panel " "supported, assume %dbpp panel color " "depth.\n", dev_priv->edp.bpp); } return; } switch ((edp->color_depth >> (panel_type * 2)) & 3) { case EDP_18BPP: dev_priv->edp.bpp = 18; break; case EDP_24BPP: dev_priv->edp.bpp = 24; break; case EDP_30BPP: dev_priv->edp.bpp = 30; break; } /* Get the eDP sequencing and link info */ edp_pps = &edp->power_seqs[panel_type]; edp_link_params = &edp->link_params[panel_type]; dev_priv->edp.pps = *edp_pps; dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 : DP_LINK_BW_1_62; switch (edp_link_params->lanes) { case 0: dev_priv->edp.lanes = 1; break; case 1: dev_priv->edp.lanes = 2; break; case 3: default: dev_priv->edp.lanes = 4; break; } switch (edp_link_params->preemphasis) { case 0: dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0; break; case 1: dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; break; case 2: dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6; break; case 3: dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; break; } switch (edp_link_params->vswing) { case 0: dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400; break; case 1: dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600; break; case 2: dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800; break; case 3: dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200; break; } } static void parse_device_mapping(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct bdb_general_definitions *p_defs; struct child_device_config *p_child, *child_dev_ptr; int i, child_device_num, count; u16 block_size; p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); if (!p_defs) { DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n"); return; } /* judge whether the size of child device meets the requirements. * If the child device size obtained from general definition block * is different with sizeof(struct child_device_config), skip the * parsing of sdvo device info */ if (p_defs->child_dev_size != sizeof(*p_child)) { /* different child dev size . Ignore it */ DRM_DEBUG_KMS("different child size is found. Invalid.\n"); return; } /* get the block size of general definitions */ block_size = get_blocksize(p_defs); /* get the number of child device */ child_device_num = (block_size - sizeof(*p_defs)) / sizeof(*p_child); count = 0; /* get the number of child device that is present */ for (i = 0; i < child_device_num; i++) { p_child = &(p_defs->devices[i]); if (!p_child->device_type) { /* skip the device block if device type is invalid */ continue; } count++; } if (!count) { DRM_DEBUG_KMS("no child dev is parsed from VBT\n"); return; } dev_priv->child_dev = malloc(sizeof(*p_child) * count, DRM_MEM_KMS, M_WAITOK | M_ZERO); dev_priv->child_dev_num = count; count = 0; for (i = 0; i < child_device_num; i++) { p_child = &(p_defs->devices[i]); if (!p_child->device_type) { /* skip the device block if device type is invalid */ continue; } child_dev_ptr = dev_priv->child_dev + count; count++; memcpy((void *)child_dev_ptr, (void *)p_child, sizeof(*p_child)); } return; } static void init_vbt_defaults(struct drm_i915_private *dev_priv) { struct drm_device *dev = dev_priv->dev; dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC; /* LFP panel data */ dev_priv->lvds_dither = 1; dev_priv->lvds_vbt = 0; /* SDVO panel data */ dev_priv->sdvo_lvds_vbt_mode = NULL; /* general features */ dev_priv->int_tv_support = 1; dev_priv->int_crt_support = 1; /* Default to using SSC */ dev_priv->lvds_use_ssc = 1; dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq); /* eDP data */ dev_priv->edp.bpp = 18; } static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id) { DRM_DEBUG_KMS("Falling back to manually reading VBT from " "VBIOS ROM for %s\n", id->ident); return 1; } static const struct dmi_system_id intel_no_opregion_vbt[] = { { .callback = intel_no_opregion_vbt_callback, .ident = "ThinkCentre A57", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"), }, }, { } }; /** * intel_parse_bios - find VBT and initialize settings from the BIOS * @dev: DRM device * * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers * to appropriate values. * * Returns 0 on success, nonzero on failure. */ bool intel_parse_bios(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct bdb_header *bdb = NULL; u8 *bios; init_vbt_defaults(dev_priv); /* XXX Should this validation be moved to intel_opregion.c? */ if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt) { struct vbt_header *vbt = dev_priv->opregion.vbt; if (memcmp(vbt->signature, "$VBT", 4) == 0) { DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n", vbt->signature); bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset); } else dev_priv->opregion.vbt = NULL; } bios = NULL; #if 1 if (bdb == NULL) { KIB_NOTYET(); return (-1); } #else if (bdb == NULL) { struct vbt_header *vbt = NULL; size_t size; int i; bios = pci_map_rom(pdev, &size); if (!bios) return -1; /* Scour memory looking for the VBT signature */ for (i = 0; i + 4 < size; i++) { if (!memcmp(bios + i, "$VBT", 4)) { vbt = (struct vbt_header *)(bios + i); break; } } if (!vbt) { DRM_DEBUG_DRIVER("VBT signature missing\n"); pci_unmap_rom(pdev, bios); return -1; } bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset); } #endif /* Grab useful general definitions */ parse_general_features(dev_priv, bdb); parse_general_definitions(dev_priv, bdb); parse_lfp_panel_data(dev_priv, bdb); parse_sdvo_panel_data(dev_priv, bdb); parse_sdvo_device_mapping(dev_priv, bdb); parse_device_mapping(dev_priv, bdb); parse_driver_features(dev_priv, bdb); parse_edp(dev_priv, bdb); #if 0 if (bios) pci_unmap_rom(pdev, bios); #endif return 0; } /* Ensure that vital registers have been initialised, even if the BIOS * is absent or just failing to do its job. */ void intel_setup_bios(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; /* Set the Panel Power On/Off timings if uninitialized. */ if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) { /* Set T2 to 40ms and T5 to 200ms */ I915_WRITE(PP_ON_DELAYS, 0x019007d0); /* Set T3 to 35ms and Tx to 200ms */ I915_WRITE(PP_OFF_DELAYS, 0x015e07d0); } } Index: head/sys/dev/drm2/i915/intel_crt.c =================================================================== --- head/sys/dev/drm2/i915/intel_crt.c (revision 277486) +++ head/sys/dev/drm2/i915/intel_crt.c (revision 277487) @@ -1,624 +1,662 @@ /* * Copyright © 2006-2007 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include /* Here's the desired hotplug mode */ #define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \ ADPA_CRT_HOTPLUG_WARMUP_10MS | \ ADPA_CRT_HOTPLUG_SAMPLE_4S | \ ADPA_CRT_HOTPLUG_VOLTAGE_50 | \ ADPA_CRT_HOTPLUG_VOLREF_325MV | \ ADPA_CRT_HOTPLUG_ENABLE) struct intel_crt { struct intel_encoder base; bool force_hotplug_required; }; static struct intel_crt *intel_attached_crt(struct drm_connector *connector) { return container_of(intel_attached_encoder(connector), struct intel_crt, base); } -static void intel_crt_dpms(struct drm_encoder *encoder, int mode) +static void pch_crt_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 temp, reg; + u32 temp; - if (HAS_PCH_SPLIT(dev)) - reg = PCH_ADPA; - else - reg = ADPA; + temp = I915_READ(PCH_ADPA); + temp &= ~ADPA_DAC_ENABLE; - temp = I915_READ(reg); + switch (mode) { + case DRM_MODE_DPMS_ON: + temp |= ADPA_DAC_ENABLE; + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + /* Just leave port enable cleared */ + break; + } + + I915_WRITE(PCH_ADPA, temp); +} + +static void gmch_crt_dpms(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 temp; + + temp = I915_READ(ADPA); temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); temp &= ~ADPA_DAC_ENABLE; switch (mode) { case DRM_MODE_DPMS_ON: temp |= ADPA_DAC_ENABLE; break; case DRM_MODE_DPMS_STANDBY: temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE; break; case DRM_MODE_DPMS_SUSPEND: temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE; break; case DRM_MODE_DPMS_OFF: temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE; break; } - I915_WRITE(reg, temp); + I915_WRITE(ADPA, temp); } static int intel_crt_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; int max_clock = 0; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; if (mode->clock < 25000) return MODE_CLOCK_LOW; if (IS_GEN2(dev)) max_clock = 350000; else max_clock = 400000; if (mode->clock > max_clock) return MODE_CLOCK_HIGH; return MODE_OK; } static bool intel_crt_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, + struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; } static void intel_crt_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_i915_private *dev_priv = dev->dev_private; int dpll_md_reg; u32 adpa, dpll_md; u32 adpa_reg; dpll_md_reg = DPLL_MD(intel_crtc->pipe); if (HAS_PCH_SPLIT(dev)) adpa_reg = PCH_ADPA; else adpa_reg = ADPA; /* * Disable separate mode multiplier used when cloning SDVO to CRT * XXX this needs to be adjusted when we really are cloning */ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { dpll_md = I915_READ(dpll_md_reg); I915_WRITE(dpll_md_reg, dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); } adpa = ADPA_HOTPLUG_BITS; if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) adpa |= ADPA_HSYNC_ACTIVE_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) adpa |= ADPA_VSYNC_ACTIVE_HIGH; /* For CPT allow 3 pipe config, for others just use A or B */ if (HAS_PCH_CPT(dev)) adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); else if (intel_crtc->pipe == 0) adpa |= ADPA_PIPE_A_SELECT; else adpa |= ADPA_PIPE_B_SELECT; if (!HAS_PCH_SPLIT(dev)) I915_WRITE(BCLRPAT(intel_crtc->pipe), 0); I915_WRITE(adpa_reg, adpa); } static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct intel_crt *crt = intel_attached_crt(connector); struct drm_i915_private *dev_priv = dev->dev_private; u32 adpa; bool ret; /* The first time through, trigger an explicit detection cycle */ if (crt->force_hotplug_required) { bool turn_off_dac = HAS_PCH_SPLIT(dev); u32 save_adpa; crt->force_hotplug_required = 0; save_adpa = adpa = I915_READ(PCH_ADPA); DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; if (turn_off_dac) adpa &= ~ADPA_DAC_ENABLE; I915_WRITE(PCH_ADPA, adpa); if (_intel_wait_for(dev, (I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, 1000, 1, "915crt")) DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER\n"); if (turn_off_dac) { I915_WRITE(PCH_ADPA, save_adpa); POSTING_READ(PCH_ADPA); } } /* Check the status to see if both blue and green are on now */ adpa = I915_READ(PCH_ADPA); if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) ret = true; else ret = false; DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret); return ret; } /** * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence. * * Not for i915G/i915GM * * \return true if CRT is connected. * \return false if CRT is disconnected. */ static bool intel_crt_detect_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 hotplug_en, orig, stat; bool ret = false; int i, tries = 0; if (HAS_PCH_SPLIT(dev)) return intel_ironlake_crt_detect_hotplug(connector); /* * On 4 series desktop, CRT detect sequence need to be done twice * to get a reliable result. */ if (IS_G4X(dev) && !IS_GM45(dev)) tries = 2; else tries = 1; hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN); hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; for (i = 0; i < tries ; i++) { /* turn on the FORCE_DETECT */ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); /* wait for FORCE_DETECT to go off */ if (_intel_wait_for(dev, (I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT) == 0, 1000, 1, "915cr2")) DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off"); } stat = I915_READ(PORT_HOTPLUG_STAT); if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE) ret = true; /* clear the interrupt we just generated, if any */ I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS); /* and put the bits back */ I915_WRITE(PORT_HOTPLUG_EN, orig); return ret; } static bool intel_crt_detect_ddc(struct drm_connector *connector) { struct intel_crt *crt = intel_attached_crt(connector); struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private; /* CRT should always be at 0, but check anyway */ if (crt->base.type != INTEL_OUTPUT_ANALOG) return false; if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) { struct edid *edid; bool is_digital = false; + device_t iic; - edid = drm_get_edid(connector, - dev_priv->gmbus[dev_priv->crt_ddc_pin]); + iic = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin); + edid = drm_get_edid(connector, iic); /* * This may be a DVI-I connector with a shared DDC * link between analog and digital outputs, so we * have to check the EDID input spec of the attached device. * * On the other hand, what should we do if it is a broken EDID? */ if (edid != NULL) { is_digital = edid->input & DRM_EDID_INPUT_DIGITAL; connector->display_info.raw_edid = NULL; free(edid, DRM_MEM_KMS); } if (!is_digital) { DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); return true; } else { DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); } } return false; } static enum drm_connector_status intel_crt_load_detect(struct intel_crt *crt) { struct drm_device *dev = crt->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t pipe = to_intel_crtc(crt->base.base.crtc)->pipe; uint32_t save_bclrpat; uint32_t save_vtotal; uint32_t vtotal, vactive; uint32_t vsample; uint32_t vblank, vblank_start, vblank_end; uint32_t dsl; uint32_t bclrpat_reg; uint32_t vtotal_reg; uint32_t vblank_reg; uint32_t vsync_reg; uint32_t pipeconf_reg; uint32_t pipe_dsl_reg; uint8_t st00; enum drm_connector_status status; DRM_DEBUG_KMS("starting load-detect on CRT\n"); bclrpat_reg = BCLRPAT(pipe); vtotal_reg = VTOTAL(pipe); vblank_reg = VBLANK(pipe); vsync_reg = VSYNC(pipe); pipeconf_reg = PIPECONF(pipe); pipe_dsl_reg = PIPEDSL(pipe); save_bclrpat = I915_READ(bclrpat_reg); save_vtotal = I915_READ(vtotal_reg); vblank = I915_READ(vblank_reg); vtotal = ((save_vtotal >> 16) & 0xfff) + 1; vactive = (save_vtotal & 0x7ff) + 1; vblank_start = (vblank & 0xfff) + 1; vblank_end = ((vblank >> 16) & 0xfff) + 1; /* Set the border color to purple. */ I915_WRITE(bclrpat_reg, 0x500050); if (!IS_GEN2(dev)) { uint32_t pipeconf = I915_READ(pipeconf_reg); I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER); POSTING_READ(pipeconf_reg); /* Wait for next Vblank to substitue * border color for Color info */ intel_wait_for_vblank(dev, pipe); st00 = I915_READ8(VGA_MSR_WRITE); status = ((st00 & (1 << 4)) != 0) ? connector_status_connected : connector_status_disconnected; I915_WRITE(pipeconf_reg, pipeconf); } else { bool restore_vblank = false; int count, detect; /* * If there isn't any border, add some. * Yes, this will flicker */ if (vblank_start <= vactive && vblank_end >= vtotal) { uint32_t vsync = I915_READ(vsync_reg); uint32_t vsync_start = (vsync & 0xffff) + 1; vblank_start = vsync_start; I915_WRITE(vblank_reg, (vblank_start - 1) | ((vblank_end - 1) << 16)); restore_vblank = true; } /* sample in the vertical border, selecting the larger one */ if (vblank_start - vactive >= vtotal - vblank_end) vsample = (vblank_start + vactive) >> 1; else vsample = (vtotal + vblank_end) >> 1; /* * Wait for the border to be displayed */ while (I915_READ(pipe_dsl_reg) >= vactive) ; while ((dsl = I915_READ(pipe_dsl_reg)) <= vsample) ; /* * Watch ST00 for an entire scanline */ detect = 0; count = 0; do { count++; /* Read the ST00 VGA status register */ st00 = I915_READ8(VGA_MSR_WRITE); if (st00 & (1 << 4)) detect++; } while ((I915_READ(pipe_dsl_reg) == dsl)); /* restore vblank if necessary */ if (restore_vblank) I915_WRITE(vblank_reg, vblank); /* * If more than 3/4 of the scanline detected a monitor, * then it is assumed to be present. This works even on i830, * where there isn't any way to force the border color across * the screen */ status = detect * 4 > count * 3 ? connector_status_connected : connector_status_disconnected; } /* Restore previous settings */ I915_WRITE(bclrpat_reg, save_bclrpat); return status; } static enum drm_connector_status intel_crt_detect(struct drm_connector *connector, bool force) { struct drm_device *dev = connector->dev; struct intel_crt *crt = intel_attached_crt(connector); enum drm_connector_status status; struct intel_load_detect_pipe tmp; if (I915_HAS_HOTPLUG(dev)) { if (intel_crt_detect_hotplug(connector)) { DRM_DEBUG_KMS("CRT detected via hotplug\n"); return connector_status_connected; } else { DRM_DEBUG_KMS("CRT not detected via hotplug\n"); return connector_status_disconnected; } } if (intel_crt_detect_ddc(connector)) return connector_status_connected; if (!force) return connector->status; /* for pre-945g platforms use load detect */ if (intel_get_load_detect_pipe(&crt->base, connector, NULL, &tmp)) { if (intel_crt_detect_ddc(connector)) status = connector_status_connected; else status = intel_crt_load_detect(crt); intel_release_load_detect_pipe(&crt->base, connector, &tmp); } else status = connector_status_unknown; return status; } static void intel_crt_destroy(struct drm_connector *connector) { #if 0 drm_sysfs_connector_remove(connector); #endif drm_connector_cleanup(connector); free(connector, DRM_MEM_KMS); } static int intel_crt_get_modes(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; int ret; + device_t iic; - ret = intel_ddc_get_modes(connector, - dev_priv->gmbus[dev_priv->crt_ddc_pin]); + iic = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin); + ret = intel_ddc_get_modes(connector, iic); if (ret || !IS_G4X(dev)) return ret; /* Try to probe digital port for output in DVI-I -> VGA mode. */ - return (intel_ddc_get_modes(connector, - dev_priv->gmbus[GMBUS_PORT_DPB])); + iic = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB); + return intel_ddc_get_modes(connector, iic); } static int intel_crt_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t value) { return 0; } static void intel_crt_reset(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct intel_crt *crt = intel_attached_crt(connector); if (HAS_PCH_SPLIT(dev)) crt->force_hotplug_required = 1; } /* * Routines for controlling stuff on the analog port */ -static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = { - .dpms = intel_crt_dpms, +static const struct drm_encoder_helper_funcs pch_encoder_funcs = { .mode_fixup = intel_crt_mode_fixup, .prepare = intel_encoder_prepare, .commit = intel_encoder_commit, .mode_set = intel_crt_mode_set, + .dpms = pch_crt_dpms, }; +static const struct drm_encoder_helper_funcs gmch_encoder_funcs = { + .mode_fixup = intel_crt_mode_fixup, + .prepare = intel_encoder_prepare, + .commit = intel_encoder_commit, + .mode_set = intel_crt_mode_set, + .dpms = gmch_crt_dpms, +}; + static const struct drm_connector_funcs intel_crt_connector_funcs = { .reset = intel_crt_reset, .dpms = drm_helper_connector_dpms, .detect = intel_crt_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = intel_crt_destroy, .set_property = intel_crt_set_property, }; static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { .mode_valid = intel_crt_mode_valid, .get_modes = intel_crt_get_modes, .best_encoder = intel_best_encoder, }; static const struct drm_encoder_funcs intel_crt_enc_funcs = { .destroy = intel_encoder_destroy, }; static int intel_no_crt_dmi_callback(const struct dmi_system_id *id) { - DRM_DEBUG_KMS("Skipping CRT initialization for %s\n", id->ident); + DRM_INFO("Skipping CRT initialization for %s\n", id->ident); return 1; } static const struct dmi_system_id intel_no_crt[] = { { .callback = intel_no_crt_dmi_callback, .ident = "ACER ZGB", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ACER"), DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"), }, }, { } }; void intel_crt_init(struct drm_device *dev) { struct drm_connector *connector; struct intel_crt *crt; struct intel_connector *intel_connector; struct drm_i915_private *dev_priv = dev->dev_private; + const struct drm_encoder_helper_funcs *encoder_helper_funcs; /* Skip machines without VGA that falsely report hotplug events */ if (dmi_check_system(intel_no_crt)) return; crt = malloc(sizeof(struct intel_crt), DRM_MEM_KMS, M_WAITOK | M_ZERO); intel_connector = malloc(sizeof(struct intel_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO); connector = &intel_connector->base; drm_connector_init(dev, &intel_connector->base, &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC); intel_connector_attach_encoder(intel_connector, &crt->base); crt->base.type = INTEL_OUTPUT_ANALOG; crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT | 1 << INTEL_ANALOG_CLONE_BIT | 1 << INTEL_SDVO_LVDS_CLONE_BIT); - crt->base.crtc_mask = (1 << 0) | (1 << 1); + if (IS_HASWELL(dev)) + crt->base.crtc_mask = (1 << 0); + else + crt->base.crtc_mask = (1 << 0) | (1 << 1); + if (IS_GEN2(dev)) connector->interlace_allowed = 0; else connector->interlace_allowed = 1; connector->doublescan_allowed = 0; - drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs); + if (HAS_PCH_SPLIT(dev)) + encoder_helper_funcs = &pch_encoder_funcs; + else + encoder_helper_funcs = &gmch_encoder_funcs; + + drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs); drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); #if 0 drm_sysfs_connector_add(connector); #endif if (I915_HAS_HOTPLUG(dev)) connector->polled = DRM_CONNECTOR_POLL_HPD; else connector->polled = DRM_CONNECTOR_POLL_CONNECT; /* * Configure the automatic hotplug detection stuff */ crt->force_hotplug_required = 0; if (HAS_PCH_SPLIT(dev)) { u32 adpa; adpa = I915_READ(PCH_ADPA); adpa &= ~ADPA_CRT_HOTPLUG_MASK; adpa |= ADPA_HOTPLUG_BITS; I915_WRITE(PCH_ADPA, adpa); POSTING_READ(PCH_ADPA); DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa); crt->force_hotplug_required = 1; } dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; } Index: head/sys/dev/drm2/i915/intel_ddi.c =================================================================== --- head/sys/dev/drm2/i915/intel_ddi.c (nonexistent) +++ head/sys/dev/drm2/i915/intel_ddi.c (revision 277487) @@ -0,0 +1,761 @@ +/* + * Copyright © 2012 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eugeni Dodonov + * + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include + +/* HDMI/DVI modes ignore everything but the last 2 items. So we share + * them for both DP and FDI transports, allowing those ports to + * automatically adapt to HDMI connections as well + */ +static const u32 hsw_ddi_translations_dp[] = { + 0x00FFFFFF, 0x0006000E, /* DP parameters */ + 0x00D75FFF, 0x0005000A, + 0x00C30FFF, 0x00040006, + 0x80AAAFFF, 0x000B0000, + 0x00FFFFFF, 0x0005000A, + 0x00D75FFF, 0x000C0004, + 0x80C30FFF, 0x000B0000, + 0x00FFFFFF, 0x00040006, + 0x80D75FFF, 0x000B0000, + 0x00FFFFFF, 0x00040006 /* HDMI parameters */ +}; + +static const u32 hsw_ddi_translations_fdi[] = { + 0x00FFFFFF, 0x0007000E, /* FDI parameters */ + 0x00D75FFF, 0x000F000A, + 0x00C30FFF, 0x00060006, + 0x00AAAFFF, 0x001E0000, + 0x00FFFFFF, 0x000F000A, + 0x00D75FFF, 0x00160004, + 0x00C30FFF, 0x001E0000, + 0x00FFFFFF, 0x00060006, + 0x00D75FFF, 0x001E0000, + 0x00FFFFFF, 0x00040006 /* HDMI parameters */ +}; + +/* On Haswell, DDI port buffers must be programmed with correct values + * in advance. The buffer values are different for FDI and DP modes, + * but the HDMI/DVI fields are shared among those. So we program the DDI + * in either FDI or DP modes only, as HDMI connections will work with both + * of those + */ +static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, bool use_fdi_mode) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 reg; + int i; + const u32 *ddi_translations = ((use_fdi_mode) ? + hsw_ddi_translations_fdi : + hsw_ddi_translations_dp); + + DRM_DEBUG_DRIVER("Initializing DDI buffers for port %c in %s mode\n", + port_name(port), + use_fdi_mode ? "FDI" : "DP"); + + if (use_fdi_mode && (port != PORT_E)) + DRM_DEBUG_KMS("Programming port %c in FDI mode, this probably will not work.\n", + port_name(port)); + + for (i=0, reg=DDI_BUF_TRANS(port); i < DRM_ARRAY_SIZE(hsw_ddi_translations_fdi); i++) { + I915_WRITE(reg, ddi_translations[i]); + reg += 4; + } +} + +/* Program DDI buffers translations for DP. By default, program ports A-D in DP + * mode and port E for FDI. + */ +void intel_prepare_ddi(struct drm_device *dev) +{ + int port; + + if (IS_HASWELL(dev)) { + for (port = PORT_A; port < PORT_E; port++) + intel_prepare_ddi_buffers(dev, port, false); + + /* DDI E is the suggested one to work in FDI mode, so program is as such by + * default. It will have to be re-programmed in case a digital DP output + * will be detected on it + */ + intel_prepare_ddi_buffers(dev, PORT_E, true); + } +} + +static const long hsw_ddi_buf_ctl_values[] = { + DDI_BUF_EMP_400MV_0DB_HSW, + DDI_BUF_EMP_400MV_3_5DB_HSW, + DDI_BUF_EMP_400MV_6DB_HSW, + DDI_BUF_EMP_400MV_9_5DB_HSW, + DDI_BUF_EMP_600MV_0DB_HSW, + DDI_BUF_EMP_600MV_3_5DB_HSW, + DDI_BUF_EMP_600MV_6DB_HSW, + DDI_BUF_EMP_800MV_0DB_HSW, + DDI_BUF_EMP_800MV_3_5DB_HSW +}; + + +/* Starting with Haswell, different DDI ports can work in FDI mode for + * connection to the PCH-located connectors. For this, it is necessary to train + * both the DDI port and PCH receiver for the desired DDI buffer settings. + * + * The recommended port to work in FDI mode is DDI E, which we use here. Also, + * please note that when FDI mode is active on DDI E, it shares 2 lines with + * DDI A (which is used for eDP) + */ + +void hsw_fdi_link_train(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + u32 reg, temp, i; + + /* Configure CPU PLL, wait for warmup */ + I915_WRITE(SPLL_CTL, + SPLL_PLL_ENABLE | + SPLL_PLL_FREQ_1350MHz | + SPLL_PLL_SCC); + + /* Use SPLL to drive the output when in FDI mode */ + I915_WRITE(PORT_CLK_SEL(PORT_E), + PORT_CLK_SEL_SPLL); + I915_WRITE(PIPE_CLK_SEL(pipe), + PIPE_CLK_SEL_PORT(PORT_E)); + + DELAY(20); + + /* Start the training iterating through available voltages and emphasis */ + for (i=0; i < DRM_ARRAY_SIZE(hsw_ddi_buf_ctl_values); i++) { + /* Configure DP_TP_CTL with auto-training */ + I915_WRITE(DP_TP_CTL(PORT_E), + DP_TP_CTL_FDI_AUTOTRAIN | + DP_TP_CTL_ENHANCED_FRAME_ENABLE | + DP_TP_CTL_LINK_TRAIN_PAT1 | + DP_TP_CTL_ENABLE); + + /* Configure and enable DDI_BUF_CTL for DDI E with next voltage */ + temp = I915_READ(DDI_BUF_CTL(PORT_E)); + temp = (temp & ~DDI_BUF_EMP_MASK); + I915_WRITE(DDI_BUF_CTL(PORT_E), + temp | + DDI_BUF_CTL_ENABLE | + DDI_PORT_WIDTH_X2 | + hsw_ddi_buf_ctl_values[i]); + + DELAY(600); + + /* Enable CPU FDI Receiver with auto-training */ + reg = FDI_RX_CTL(pipe); + I915_WRITE(reg, + I915_READ(reg) | + FDI_LINK_TRAIN_AUTO | + FDI_RX_ENABLE | + FDI_LINK_TRAIN_PATTERN_1_CPT | + FDI_RX_ENHANCE_FRAME_ENABLE | + FDI_PORT_WIDTH_2X_LPT | + FDI_RX_PLL_ENABLE); + POSTING_READ(reg); + DELAY(100); + + temp = I915_READ(DP_TP_STATUS(PORT_E)); + if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) { + DRM_DEBUG_DRIVER("BUF_CTL training done on %d step\n", i); + + /* Enable normal pixel sending for FDI */ + I915_WRITE(DP_TP_CTL(PORT_E), + DP_TP_CTL_FDI_AUTOTRAIN | + DP_TP_CTL_LINK_TRAIN_NORMAL | + DP_TP_CTL_ENHANCED_FRAME_ENABLE | + DP_TP_CTL_ENABLE); + + /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in FDI mode */ + temp = I915_READ(DDI_FUNC_CTL(pipe)); + temp &= ~PIPE_DDI_PORT_MASK; + temp |= PIPE_DDI_SELECT_PORT(PORT_E) | + PIPE_DDI_MODE_SELECT_FDI | + PIPE_DDI_FUNC_ENABLE | + PIPE_DDI_PORT_WIDTH_X2; + I915_WRITE(DDI_FUNC_CTL(pipe), + temp); + break; + } else { + DRM_ERROR("Error training BUF_CTL %d\n", i); + + /* Disable DP_TP_CTL and FDI_RX_CTL) and retry */ + I915_WRITE(DP_TP_CTL(PORT_E), + I915_READ(DP_TP_CTL(PORT_E)) & + ~DP_TP_CTL_ENABLE); + I915_WRITE(FDI_RX_CTL(pipe), + I915_READ(FDI_RX_CTL(pipe)) & + ~FDI_RX_PLL_ENABLE); + continue; + } + } + + DRM_DEBUG_KMS("FDI train done.\n"); +} + +/* For DDI connections, it is possible to support different outputs over the + * same DDI port, such as HDMI or DP or even VGA via FDI. So we don't know by + * the time the output is detected what exactly is on the other end of it. This + * function aims at providing support for this detection and proper output + * configuration. + */ +void intel_ddi_init(struct drm_device *dev, enum port port) +{ + /* For now, we don't do any proper output detection and assume that we + * handle HDMI only */ + + switch(port){ + case PORT_A: + /* We don't handle eDP and DP yet */ + DRM_DEBUG_DRIVER("Found digital output on DDI port A\n"); + break; + /* Assume that the ports B, C and D are working in HDMI mode for now */ + case PORT_B: + case PORT_C: + case PORT_D: + intel_hdmi_init(dev, DDI_BUF_CTL(port)); + break; + default: + DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n", + port); + break; + } +} + +/* WRPLL clock dividers */ +struct wrpll_tmds_clock { + u32 clock; + u16 p; /* Post divider */ + u16 n2; /* Feedback divider */ + u16 r2; /* Reference divider */ +}; + +/* Table of matching values for WRPLL clocks programming for each frequency */ +static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = { + {19750, 38, 25, 18}, + {20000, 48, 32, 18}, + {21000, 36, 21, 15}, + {21912, 42, 29, 17}, + {22000, 36, 22, 15}, + {23000, 36, 23, 15}, + {23500, 40, 40, 23}, + {23750, 26, 16, 14}, + {23750, 26, 16, 14}, + {24000, 36, 24, 15}, + {25000, 36, 25, 15}, + {25175, 26, 40, 33}, + {25200, 30, 21, 15}, + {26000, 36, 26, 15}, + {27000, 30, 21, 14}, + {27027, 18, 100, 111}, + {27500, 30, 29, 19}, + {28000, 34, 30, 17}, + {28320, 26, 30, 22}, + {28322, 32, 42, 25}, + {28750, 24, 23, 18}, + {29000, 30, 29, 18}, + {29750, 32, 30, 17}, + {30000, 30, 25, 15}, + {30750, 30, 41, 24}, + {31000, 30, 31, 18}, + {31500, 30, 28, 16}, + {32000, 30, 32, 18}, + {32500, 28, 32, 19}, + {33000, 24, 22, 15}, + {34000, 28, 30, 17}, + {35000, 26, 32, 19}, + {35500, 24, 30, 19}, + {36000, 26, 26, 15}, + {36750, 26, 46, 26}, + {37000, 24, 23, 14}, + {37762, 22, 40, 26}, + {37800, 20, 21, 15}, + {38000, 24, 27, 16}, + {38250, 24, 34, 20}, + {39000, 24, 26, 15}, + {40000, 24, 32, 18}, + {40500, 20, 21, 14}, + {40541, 22, 147, 89}, + {40750, 18, 19, 14}, + {41000, 16, 17, 14}, + {41500, 22, 44, 26}, + {41540, 22, 44, 26}, + {42000, 18, 21, 15}, + {42500, 22, 45, 26}, + {43000, 20, 43, 27}, + {43163, 20, 24, 15}, + {44000, 18, 22, 15}, + {44900, 20, 108, 65}, + {45000, 20, 25, 15}, + {45250, 20, 52, 31}, + {46000, 18, 23, 15}, + {46750, 20, 45, 26}, + {47000, 20, 40, 23}, + {48000, 18, 24, 15}, + {49000, 18, 49, 30}, + {49500, 16, 22, 15}, + {50000, 18, 25, 15}, + {50500, 18, 32, 19}, + {51000, 18, 34, 20}, + {52000, 18, 26, 15}, + {52406, 14, 34, 25}, + {53000, 16, 22, 14}, + {54000, 16, 24, 15}, + {54054, 16, 173, 108}, + {54500, 14, 24, 17}, + {55000, 12, 22, 18}, + {56000, 14, 45, 31}, + {56250, 16, 25, 15}, + {56750, 14, 25, 17}, + {57000, 16, 27, 16}, + {58000, 16, 43, 25}, + {58250, 16, 38, 22}, + {58750, 16, 40, 23}, + {59000, 14, 26, 17}, + {59341, 14, 40, 26}, + {59400, 16, 44, 25}, + {60000, 16, 32, 18}, + {60500, 12, 39, 29}, + {61000, 14, 49, 31}, + {62000, 14, 37, 23}, + {62250, 14, 42, 26}, + {63000, 12, 21, 15}, + {63500, 14, 28, 17}, + {64000, 12, 27, 19}, + {65000, 14, 32, 19}, + {65250, 12, 29, 20}, + {65500, 12, 32, 22}, + {66000, 12, 22, 15}, + {66667, 14, 38, 22}, + {66750, 10, 21, 17}, + {67000, 14, 33, 19}, + {67750, 14, 58, 33}, + {68000, 14, 30, 17}, + {68179, 14, 46, 26}, + {68250, 14, 46, 26}, + {69000, 12, 23, 15}, + {70000, 12, 28, 18}, + {71000, 12, 30, 19}, + {72000, 12, 24, 15}, + {73000, 10, 23, 17}, + {74000, 12, 23, 14}, + {74176, 8, 100, 91}, + {74250, 10, 22, 16}, + {74481, 12, 43, 26}, + {74500, 10, 29, 21}, + {75000, 12, 25, 15}, + {75250, 10, 39, 28}, + {76000, 12, 27, 16}, + {77000, 12, 53, 31}, + {78000, 12, 26, 15}, + {78750, 12, 28, 16}, + {79000, 10, 38, 26}, + {79500, 10, 28, 19}, + {80000, 12, 32, 18}, + {81000, 10, 21, 14}, + {81081, 6, 100, 111}, + {81624, 8, 29, 24}, + {82000, 8, 17, 14}, + {83000, 10, 40, 26}, + {83950, 10, 28, 18}, + {84000, 10, 28, 18}, + {84750, 6, 16, 17}, + {85000, 6, 17, 18}, + {85250, 10, 30, 19}, + {85750, 10, 27, 17}, + {86000, 10, 43, 27}, + {87000, 10, 29, 18}, + {88000, 10, 44, 27}, + {88500, 10, 41, 25}, + {89000, 10, 28, 17}, + {89012, 6, 90, 91}, + {89100, 10, 33, 20}, + {90000, 10, 25, 15}, + {91000, 10, 32, 19}, + {92000, 10, 46, 27}, + {93000, 10, 31, 18}, + {94000, 10, 40, 23}, + {94500, 10, 28, 16}, + {95000, 10, 44, 25}, + {95654, 10, 39, 22}, + {95750, 10, 39, 22}, + {96000, 10, 32, 18}, + {97000, 8, 23, 16}, + {97750, 8, 42, 29}, + {98000, 8, 45, 31}, + {99000, 8, 22, 15}, + {99750, 8, 34, 23}, + {100000, 6, 20, 18}, + {100500, 6, 19, 17}, + {101000, 6, 37, 33}, + {101250, 8, 21, 14}, + {102000, 6, 17, 15}, + {102250, 6, 25, 22}, + {103000, 8, 29, 19}, + {104000, 8, 37, 24}, + {105000, 8, 28, 18}, + {106000, 8, 22, 14}, + {107000, 8, 46, 29}, + {107214, 8, 27, 17}, + {108000, 8, 24, 15}, + {108108, 8, 173, 108}, + {109000, 6, 23, 19}, + {109000, 6, 23, 19}, + {110000, 6, 22, 18}, + {110013, 6, 22, 18}, + {110250, 8, 49, 30}, + {110500, 8, 36, 22}, + {111000, 8, 23, 14}, + {111264, 8, 150, 91}, + {111375, 8, 33, 20}, + {112000, 8, 63, 38}, + {112500, 8, 25, 15}, + {113100, 8, 57, 34}, + {113309, 8, 42, 25}, + {114000, 8, 27, 16}, + {115000, 6, 23, 18}, + {116000, 8, 43, 25}, + {117000, 8, 26, 15}, + {117500, 8, 40, 23}, + {118000, 6, 38, 29}, + {119000, 8, 30, 17}, + {119500, 8, 46, 26}, + {119651, 8, 39, 22}, + {120000, 8, 32, 18}, + {121000, 6, 39, 29}, + {121250, 6, 31, 23}, + {121750, 6, 23, 17}, + {122000, 6, 42, 31}, + {122614, 6, 30, 22}, + {123000, 6, 41, 30}, + {123379, 6, 37, 27}, + {124000, 6, 51, 37}, + {125000, 6, 25, 18}, + {125250, 4, 13, 14}, + {125750, 4, 27, 29}, + {126000, 6, 21, 15}, + {127000, 6, 24, 17}, + {127250, 6, 41, 29}, + {128000, 6, 27, 19}, + {129000, 6, 43, 30}, + {129859, 4, 25, 26}, + {130000, 6, 26, 18}, + {130250, 6, 42, 29}, + {131000, 6, 32, 22}, + {131500, 6, 38, 26}, + {131850, 6, 41, 28}, + {132000, 6, 22, 15}, + {132750, 6, 28, 19}, + {133000, 6, 34, 23}, + {133330, 6, 37, 25}, + {134000, 6, 61, 41}, + {135000, 6, 21, 14}, + {135250, 6, 167, 111}, + {136000, 6, 62, 41}, + {137000, 6, 35, 23}, + {138000, 6, 23, 15}, + {138500, 6, 40, 26}, + {138750, 6, 37, 24}, + {139000, 6, 34, 22}, + {139050, 6, 34, 22}, + {139054, 6, 34, 22}, + {140000, 6, 28, 18}, + {141000, 6, 36, 23}, + {141500, 6, 22, 14}, + {142000, 6, 30, 19}, + {143000, 6, 27, 17}, + {143472, 4, 17, 16}, + {144000, 6, 24, 15}, + {145000, 6, 29, 18}, + {146000, 6, 47, 29}, + {146250, 6, 26, 16}, + {147000, 6, 49, 30}, + {147891, 6, 23, 14}, + {148000, 6, 23, 14}, + {148250, 6, 28, 17}, + {148352, 4, 100, 91}, + {148500, 6, 33, 20}, + {149000, 6, 48, 29}, + {150000, 6, 25, 15}, + {151000, 4, 19, 17}, + {152000, 6, 27, 16}, + {152280, 6, 44, 26}, + {153000, 6, 34, 20}, + {154000, 6, 53, 31}, + {155000, 6, 31, 18}, + {155250, 6, 50, 29}, + {155750, 6, 45, 26}, + {156000, 6, 26, 15}, + {157000, 6, 61, 35}, + {157500, 6, 28, 16}, + {158000, 6, 65, 37}, + {158250, 6, 44, 25}, + {159000, 6, 53, 30}, + {159500, 6, 39, 22}, + {160000, 6, 32, 18}, + {161000, 4, 31, 26}, + {162000, 4, 18, 15}, + {162162, 4, 131, 109}, + {162500, 4, 53, 44}, + {163000, 4, 29, 24}, + {164000, 4, 17, 14}, + {165000, 4, 22, 18}, + {166000, 4, 32, 26}, + {167000, 4, 26, 21}, + {168000, 4, 46, 37}, + {169000, 4, 104, 83}, + {169128, 4, 64, 51}, + {169500, 4, 39, 31}, + {170000, 4, 34, 27}, + {171000, 4, 19, 15}, + {172000, 4, 51, 40}, + {172750, 4, 32, 25}, + {172800, 4, 32, 25}, + {173000, 4, 41, 32}, + {174000, 4, 49, 38}, + {174787, 4, 22, 17}, + {175000, 4, 35, 27}, + {176000, 4, 30, 23}, + {177000, 4, 38, 29}, + {178000, 4, 29, 22}, + {178500, 4, 37, 28}, + {179000, 4, 53, 40}, + {179500, 4, 73, 55}, + {180000, 4, 20, 15}, + {181000, 4, 55, 41}, + {182000, 4, 31, 23}, + {183000, 4, 42, 31}, + {184000, 4, 30, 22}, + {184750, 4, 26, 19}, + {185000, 4, 37, 27}, + {186000, 4, 51, 37}, + {187000, 4, 36, 26}, + {188000, 4, 32, 23}, + {189000, 4, 21, 15}, + {190000, 4, 38, 27}, + {190960, 4, 41, 29}, + {191000, 4, 41, 29}, + {192000, 4, 27, 19}, + {192250, 4, 37, 26}, + {193000, 4, 20, 14}, + {193250, 4, 53, 37}, + {194000, 4, 23, 16}, + {194208, 4, 23, 16}, + {195000, 4, 26, 18}, + {196000, 4, 45, 31}, + {197000, 4, 35, 24}, + {197750, 4, 41, 28}, + {198000, 4, 22, 15}, + {198500, 4, 25, 17}, + {199000, 4, 28, 19}, + {200000, 4, 37, 25}, + {201000, 4, 61, 41}, + {202000, 4, 112, 75}, + {202500, 4, 21, 14}, + {203000, 4, 146, 97}, + {204000, 4, 62, 41}, + {204750, 4, 44, 29}, + {205000, 4, 38, 25}, + {206000, 4, 29, 19}, + {207000, 4, 23, 15}, + {207500, 4, 40, 26}, + {208000, 4, 37, 24}, + {208900, 4, 48, 31}, + {209000, 4, 48, 31}, + {209250, 4, 31, 20}, + {210000, 4, 28, 18}, + {211000, 4, 25, 16}, + {212000, 4, 22, 14}, + {213000, 4, 30, 19}, + {213750, 4, 38, 24}, + {214000, 4, 46, 29}, + {214750, 4, 35, 22}, + {215000, 4, 43, 27}, + {216000, 4, 24, 15}, + {217000, 4, 37, 23}, + {218000, 4, 42, 26}, + {218250, 4, 42, 26}, + {218750, 4, 34, 21}, + {219000, 4, 47, 29}, + {219000, 4, 47, 29}, + {220000, 4, 44, 27}, + {220640, 4, 49, 30}, + {220750, 4, 36, 22}, + {221000, 4, 36, 22}, + {222000, 4, 23, 14}, + {222525, 4, 28, 17}, + {222750, 4, 33, 20}, + {227000, 4, 37, 22}, + {230250, 4, 29, 17}, + {233500, 4, 38, 22}, + {235000, 4, 40, 23}, + {238000, 4, 30, 17}, + {241500, 2, 17, 19}, + {245250, 2, 20, 22}, + {247750, 2, 22, 24}, + {253250, 2, 15, 16}, + {256250, 2, 18, 19}, + {262500, 2, 31, 32}, + {267250, 2, 66, 67}, + {268500, 2, 94, 95}, + {270000, 2, 14, 14}, + {272500, 2, 77, 76}, + {273750, 2, 57, 56}, + {280750, 2, 24, 23}, + {281250, 2, 23, 22}, + {286000, 2, 17, 16}, + {291750, 2, 26, 24}, + {296703, 2, 56, 51}, + {297000, 2, 22, 20}, + {298000, 2, 21, 19}, +}; + +void intel_ddi_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc = encoder->crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + int port = intel_hdmi->ddi_port; + int pipe = intel_crtc->pipe; + int p, n2, r2, valid=0; + u32 temp, i; + + /* On Haswell, we need to enable the clocks and prepare DDI function to + * work in HDMI mode for this pipe. + */ + DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe)); + + for (i=0; i < DRM_ARRAY_SIZE(wrpll_tmds_clock_table); i++) { + if (crtc->mode.clock == wrpll_tmds_clock_table[i].clock) { + p = wrpll_tmds_clock_table[i].p; + n2 = wrpll_tmds_clock_table[i].n2; + r2 = wrpll_tmds_clock_table[i].r2; + + DRM_DEBUG_KMS("WR PLL clock: found settings for %dKHz refresh rate: p=%d, n2=%d, r2=%d\n", + crtc->mode.clock, + p, n2, r2); + + valid = 1; + break; + } + } + + if (!valid) { + DRM_ERROR("Unable to find WR PLL clock settings for %dKHz refresh rate\n", + crtc->mode.clock); + return; + } + + /* Enable LCPLL if disabled */ + temp = I915_READ(LCPLL_CTL); + if (temp & LCPLL_PLL_DISABLE) + I915_WRITE(LCPLL_CTL, + temp & ~LCPLL_PLL_DISABLE); + + /* Configure WR PLL 1, program the correct divider values for + * the desired frequency and wait for warmup */ + I915_WRITE(WRPLL_CTL1, + WRPLL_PLL_ENABLE | + WRPLL_PLL_SELECT_LCPLL_2700 | + WRPLL_DIVIDER_REFERENCE(r2) | + WRPLL_DIVIDER_FEEDBACK(n2) | + WRPLL_DIVIDER_POST(p)); + + DELAY(20); + + /* Use WRPLL1 clock to drive the output to the port, and tell the pipe to use + * this port for connection. + */ + I915_WRITE(PORT_CLK_SEL(port), + PORT_CLK_SEL_WRPLL1); + I915_WRITE(PIPE_CLK_SEL(pipe), + PIPE_CLK_SEL_PORT(port)); + + DELAY(20); + + if (intel_hdmi->has_audio) { + /* Proper support for digital audio needs a new logic and a new set + * of registers, so we leave it for future patch bombing. + */ + DRM_DEBUG_DRIVER("HDMI audio on pipe %c not yet supported on DDI\n", + pipe_name(intel_crtc->pipe)); + } + + /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */ + temp = I915_READ(DDI_FUNC_CTL(pipe)); + temp &= ~PIPE_DDI_PORT_MASK; + temp &= ~PIPE_DDI_BPC_12; + temp |= PIPE_DDI_SELECT_PORT(port) | + PIPE_DDI_MODE_SELECT_HDMI | + ((intel_crtc->bpp > 24) ? + PIPE_DDI_BPC_12 : + PIPE_DDI_BPC_8) | + PIPE_DDI_FUNC_ENABLE; + + I915_WRITE(DDI_FUNC_CTL(pipe), temp); + + intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); + intel_hdmi_set_spd_infoframe(encoder); +} + +void intel_ddi_dpms(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + int port = intel_hdmi->ddi_port; + u32 temp; + + temp = I915_READ(DDI_BUF_CTL(port)); + + if (mode != DRM_MODE_DPMS_ON) { + temp &= ~DDI_BUF_CTL_ENABLE; + } else { + temp |= DDI_BUF_CTL_ENABLE; + } + + /* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width, + * and swing/emphasis values are ignored so nothing special needs + * to be done besides enabling the port. + */ + I915_WRITE(DDI_BUF_CTL(port), + temp); +} Property changes on: head/sys/dev/drm2/i915/intel_ddi.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/sys/dev/drm2/i915/intel_display.c =================================================================== --- head/sys/dev/drm2/i915/intel_display.c (revision 277486) +++ head/sys/dev/drm2/i915/intel_display.c (revision 277487) @@ -1,9532 +1,7281 @@ /* * Copyright © 2006-2007 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include -#include #include #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) bool intel_pipe_has_type(struct drm_crtc *crtc, int type); -static void intel_update_watermarks(struct drm_device *dev); static void intel_increase_pllclock(struct drm_crtc *crtc); static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); typedef struct { /* given values */ int n; int m1, m2; int p1, p2; /* derived values */ int dot; int vco; int m; int p; } intel_clock_t; typedef struct { int min, max; } intel_range_t; typedef struct { int dot_limit; int p2_slow, p2_fast; } intel_p2_t; #define INTEL_P2_NUM 2 typedef struct intel_limit intel_limit_t; struct intel_limit { intel_range_t dot, vco, n, m, m1, m2, p, p1; intel_p2_t p2; bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, int, int, intel_clock_t *, intel_clock_t *); }; /* FDI */ #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ static bool intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *match_clock, intel_clock_t *best_clock); static bool intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *match_clock, intel_clock_t *best_clock); static bool intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *match_clock, intel_clock_t *best_clock); static bool intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *match_clock, intel_clock_t *best_clock); static inline u32 /* units of 100MHz */ intel_fdi_link_freq(struct drm_device *dev) { if (IS_GEN5(dev)) { struct drm_i915_private *dev_priv = dev->dev_private; return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; } else return 27; } static const intel_limit_t intel_limits_i8xx_dvo = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 930000, .max = 1400000 }, .n = { .min = 3, .max = 16 }, .m = { .min = 96, .max = 140 }, .m1 = { .min = 18, .max = 26 }, .m2 = { .min = 6, .max = 16 }, .p = { .min = 4, .max = 128 }, .p1 = { .min = 2, .max = 33 }, .p2 = { .dot_limit = 165000, .p2_slow = 4, .p2_fast = 2 }, .find_pll = intel_find_best_PLL, }; static const intel_limit_t intel_limits_i8xx_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 930000, .max = 1400000 }, .n = { .min = 3, .max = 16 }, .m = { .min = 96, .max = 140 }, .m1 = { .min = 18, .max = 26 }, .m2 = { .min = 6, .max = 16 }, .p = { .min = 4, .max = 128 }, .p1 = { .min = 1, .max = 6 }, .p2 = { .dot_limit = 165000, .p2_slow = 14, .p2_fast = 7 }, .find_pll = intel_find_best_PLL, }; static const intel_limit_t intel_limits_i9xx_sdvo = { .dot = { .min = 20000, .max = 400000 }, .vco = { .min = 1400000, .max = 2800000 }, .n = { .min = 1, .max = 6 }, .m = { .min = 70, .max = 120 }, .m1 = { .min = 10, .max = 22 }, .m2 = { .min = 5, .max = 9 }, .p = { .min = 5, .max = 80 }, .p1 = { .min = 1, .max = 8 }, .p2 = { .dot_limit = 200000, .p2_slow = 10, .p2_fast = 5 }, .find_pll = intel_find_best_PLL, }; static const intel_limit_t intel_limits_i9xx_lvds = { .dot = { .min = 20000, .max = 400000 }, .vco = { .min = 1400000, .max = 2800000 }, .n = { .min = 1, .max = 6 }, .m = { .min = 70, .max = 120 }, .m1 = { .min = 10, .max = 22 }, .m2 = { .min = 5, .max = 9 }, .p = { .min = 7, .max = 98 }, .p1 = { .min = 1, .max = 8 }, .p2 = { .dot_limit = 112000, .p2_slow = 14, .p2_fast = 7 }, .find_pll = intel_find_best_PLL, }; static const intel_limit_t intel_limits_g4x_sdvo = { .dot = { .min = 25000, .max = 270000 }, .vco = { .min = 1750000, .max = 3500000}, .n = { .min = 1, .max = 4 }, .m = { .min = 104, .max = 138 }, .m1 = { .min = 17, .max = 23 }, .m2 = { .min = 5, .max = 11 }, .p = { .min = 10, .max = 30 }, .p1 = { .min = 1, .max = 3}, .p2 = { .dot_limit = 270000, .p2_slow = 10, .p2_fast = 10 }, .find_pll = intel_g4x_find_best_PLL, }; static const intel_limit_t intel_limits_g4x_hdmi = { .dot = { .min = 22000, .max = 400000 }, .vco = { .min = 1750000, .max = 3500000}, .n = { .min = 1, .max = 4 }, .m = { .min = 104, .max = 138 }, .m1 = { .min = 16, .max = 23 }, .m2 = { .min = 5, .max = 11 }, .p = { .min = 5, .max = 80 }, .p1 = { .min = 1, .max = 8}, .p2 = { .dot_limit = 165000, .p2_slow = 10, .p2_fast = 5 }, .find_pll = intel_g4x_find_best_PLL, }; static const intel_limit_t intel_limits_g4x_single_channel_lvds = { .dot = { .min = 20000, .max = 115000 }, .vco = { .min = 1750000, .max = 3500000 }, .n = { .min = 1, .max = 3 }, .m = { .min = 104, .max = 138 }, .m1 = { .min = 17, .max = 23 }, .m2 = { .min = 5, .max = 11 }, .p = { .min = 28, .max = 112 }, .p1 = { .min = 2, .max = 8 }, .p2 = { .dot_limit = 0, .p2_slow = 14, .p2_fast = 14 }, .find_pll = intel_g4x_find_best_PLL, }; static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { .dot = { .min = 80000, .max = 224000 }, .vco = { .min = 1750000, .max = 3500000 }, .n = { .min = 1, .max = 3 }, .m = { .min = 104, .max = 138 }, .m1 = { .min = 17, .max = 23 }, .m2 = { .min = 5, .max = 11 }, .p = { .min = 14, .max = 42 }, .p1 = { .min = 2, .max = 6 }, .p2 = { .dot_limit = 0, .p2_slow = 7, .p2_fast = 7 }, .find_pll = intel_g4x_find_best_PLL, }; static const intel_limit_t intel_limits_g4x_display_port = { .dot = { .min = 161670, .max = 227000 }, .vco = { .min = 1750000, .max = 3500000}, .n = { .min = 1, .max = 2 }, .m = { .min = 97, .max = 108 }, .m1 = { .min = 0x10, .max = 0x12 }, .m2 = { .min = 0x05, .max = 0x06 }, .p = { .min = 10, .max = 20 }, .p1 = { .min = 1, .max = 2}, .p2 = { .dot_limit = 0, .p2_slow = 10, .p2_fast = 10 }, .find_pll = intel_find_pll_g4x_dp, }; static const intel_limit_t intel_limits_pineview_sdvo = { .dot = { .min = 20000, .max = 400000}, .vco = { .min = 1700000, .max = 3500000 }, /* Pineview's Ncounter is a ring counter */ .n = { .min = 3, .max = 6 }, .m = { .min = 2, .max = 256 }, /* Pineview only has one combined m divider, which we treat as m2. */ .m1 = { .min = 0, .max = 0 }, .m2 = { .min = 0, .max = 254 }, .p = { .min = 5, .max = 80 }, .p1 = { .min = 1, .max = 8 }, .p2 = { .dot_limit = 200000, .p2_slow = 10, .p2_fast = 5 }, .find_pll = intel_find_best_PLL, }; static const intel_limit_t intel_limits_pineview_lvds = { .dot = { .min = 20000, .max = 400000 }, .vco = { .min = 1700000, .max = 3500000 }, .n = { .min = 3, .max = 6 }, .m = { .min = 2, .max = 256 }, .m1 = { .min = 0, .max = 0 }, .m2 = { .min = 0, .max = 254 }, .p = { .min = 7, .max = 112 }, .p1 = { .min = 1, .max = 8 }, .p2 = { .dot_limit = 112000, .p2_slow = 14, .p2_fast = 14 }, .find_pll = intel_find_best_PLL, }; /* Ironlake / Sandybridge * * We calculate clock using (register_value + 2) for N/M1/M2, so here * the range value for them is (actual_value - 2). */ static const intel_limit_t intel_limits_ironlake_dac = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 5 }, .m = { .min = 79, .max = 127 }, .m1 = { .min = 12, .max = 22 }, .m2 = { .min = 5, .max = 9 }, .p = { .min = 5, .max = 80 }, .p1 = { .min = 1, .max = 8 }, .p2 = { .dot_limit = 225000, .p2_slow = 10, .p2_fast = 5 }, .find_pll = intel_g4x_find_best_PLL, }; static const intel_limit_t intel_limits_ironlake_single_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, .m = { .min = 79, .max = 118 }, .m1 = { .min = 12, .max = 22 }, .m2 = { .min = 5, .max = 9 }, .p = { .min = 28, .max = 112 }, .p1 = { .min = 2, .max = 8 }, .p2 = { .dot_limit = 225000, .p2_slow = 14, .p2_fast = 14 }, .find_pll = intel_g4x_find_best_PLL, }; static const intel_limit_t intel_limits_ironlake_dual_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, .m = { .min = 79, .max = 127 }, .m1 = { .min = 12, .max = 22 }, .m2 = { .min = 5, .max = 9 }, .p = { .min = 14, .max = 56 }, .p1 = { .min = 2, .max = 8 }, .p2 = { .dot_limit = 225000, .p2_slow = 7, .p2_fast = 7 }, .find_pll = intel_g4x_find_best_PLL, }; /* LVDS 100mhz refclk limits. */ static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 2 }, .m = { .min = 79, .max = 126 }, .m1 = { .min = 12, .max = 22 }, .m2 = { .min = 5, .max = 9 }, .p = { .min = 28, .max = 112 }, .p1 = { .min = 2, .max = 8 }, .p2 = { .dot_limit = 225000, .p2_slow = 14, .p2_fast = 14 }, .find_pll = intel_g4x_find_best_PLL, }; static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, .m = { .min = 79, .max = 126 }, .m1 = { .min = 12, .max = 22 }, .m2 = { .min = 5, .max = 9 }, .p = { .min = 14, .max = 42 }, .p1 = { .min = 2, .max = 6 }, .p2 = { .dot_limit = 225000, .p2_slow = 7, .p2_fast = 7 }, .find_pll = intel_g4x_find_best_PLL, }; static const intel_limit_t intel_limits_ironlake_display_port = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000}, .n = { .min = 1, .max = 2 }, .m = { .min = 81, .max = 90 }, .m1 = { .min = 12, .max = 22 }, .m2 = { .min = 5, .max = 9 }, .p = { .min = 10, .max = 20 }, .p1 = { .min = 1, .max = 2}, .p2 = { .dot_limit = 0, .p2_slow = 10, .p2_fast = 10 }, .find_pll = intel_find_pll_ironlake_dp, }; +u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg) +{ + u32 val = 0; + + mtx_lock(&dev_priv->dpio_lock); + if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { + DRM_ERROR("DPIO idle wait timed out\n"); + goto out_unlock; + } + + I915_WRITE(DPIO_REG, reg); + I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID | + DPIO_BYTE); + if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { + DRM_ERROR("DPIO read wait timed out\n"); + goto out_unlock; + } + val = I915_READ(DPIO_DATA); + +out_unlock: + mtx_unlock(&dev_priv->dpio_lock); + return val; +} + +#if 0 +static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg, + u32 val) +{ + + mtx_lock(&dev_priv->dpio_lock); + if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { + DRM_ERROR("DPIO idle wait timed out\n"); + goto out_unlock; + } + + I915_WRITE(DPIO_DATA, val); + I915_WRITE(DPIO_REG, reg); + I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID | + DPIO_BYTE); + if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) + DRM_ERROR("DPIO write wait timed out\n"); + +out_unlock: + mtx_unlock(&dev_priv->dpio_lock); +} +#endif + +static void vlv_init_dpio(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* Reset the DPIO config */ + I915_WRITE(DPIO_CTL, 0); + POSTING_READ(DPIO_CTL); + I915_WRITE(DPIO_CTL, 1); + POSTING_READ(DPIO_CTL); +} + +static int intel_dual_link_lvds_callback(const struct dmi_system_id *id) +{ + DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident); + return 1; +} + +static const struct dmi_system_id intel_dual_link_lvds[] = { + { + .callback = intel_dual_link_lvds_callback, + .ident = "Apple MacBook Pro (Core i5/i7 Series)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"), + }, + }, + { } /* terminating entry */ +}; + +static bool is_dual_link_lvds(struct drm_i915_private *dev_priv, + unsigned int reg) +{ + unsigned int val; + + /* use the module option value if specified */ + if (i915_lvds_channel_mode > 0) + return i915_lvds_channel_mode == 2; + + if (dmi_check_system(intel_dual_link_lvds)) + return true; + + if (dev_priv->lvds_val) + val = dev_priv->lvds_val; + else { + /* BIOS should set the proper LVDS register value at boot, but + * in reality, it doesn't set the value when the lid is closed; + * we need to check "the value to be set" in VBT when LVDS + * register is uninitialized. + */ + val = I915_READ(reg); + if (!(val & ~LVDS_DETECTED)) + val = dev_priv->bios_lvds_val; + dev_priv->lvds_val = val; + } + return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP; +} + static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, int refclk) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; const intel_limit_t *limit; if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { - if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == - LVDS_CLKB_POWER_UP) { + if (is_dual_link_lvds(dev_priv, PCH_LVDS)) { /* LVDS dual channel */ if (refclk == 100000) limit = &intel_limits_ironlake_dual_lvds_100m; else limit = &intel_limits_ironlake_dual_lvds; } else { if (refclk == 100000) limit = &intel_limits_ironlake_single_lvds_100m; else limit = &intel_limits_ironlake_single_lvds; } } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || HAS_eDP) limit = &intel_limits_ironlake_display_port; else limit = &intel_limits_ironlake_dac; return limit; } static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; const intel_limit_t *limit; if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { - if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == - LVDS_CLKB_POWER_UP) + if (is_dual_link_lvds(dev_priv, LVDS)) /* LVDS with dual channel */ limit = &intel_limits_g4x_dual_channel_lvds; else /* LVDS with dual channel */ limit = &intel_limits_g4x_single_channel_lvds; } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { limit = &intel_limits_g4x_hdmi; } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { limit = &intel_limits_g4x_sdvo; } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { limit = &intel_limits_g4x_display_port; } else /* The option is for other outputs */ limit = &intel_limits_i9xx_sdvo; return limit; } static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) { struct drm_device *dev = crtc->dev; const intel_limit_t *limit; if (HAS_PCH_SPLIT(dev)) limit = intel_ironlake_limit(crtc, refclk); else if (IS_G4X(dev)) { limit = intel_g4x_limit(crtc); } else if (IS_PINEVIEW(dev)) { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) limit = &intel_limits_pineview_lvds; else limit = &intel_limits_pineview_sdvo; } else if (!IS_GEN2(dev)) { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) limit = &intel_limits_i9xx_lvds; else limit = &intel_limits_i9xx_sdvo; } else { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) limit = &intel_limits_i8xx_lvds; else limit = &intel_limits_i8xx_dvo; } return limit; } /* m1 is reserved as 0 in Pineview, n is a ring counter */ static void pineview_clock(int refclk, intel_clock_t *clock) { clock->m = clock->m2 + 2; clock->p = clock->p1 * clock->p2; clock->vco = refclk * clock->m / clock->n; clock->dot = clock->vco / clock->p; } static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock) { if (IS_PINEVIEW(dev)) { pineview_clock(refclk, clock); return; } clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); clock->p = clock->p1 * clock->p2; clock->vco = refclk * clock->m / (clock->n + 2); clock->dot = clock->vco / clock->p; } /** * Returns whether any output on the specified pipe is of the specified type */ bool intel_pipe_has_type(struct drm_crtc *crtc, int type) { struct drm_device *dev = crtc->dev; struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *encoder; list_for_each_entry(encoder, &mode_config->encoder_list, base.head) if (encoder->base.crtc == crtc && encoder->type == type) return true; return false; } #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) /** * Returns whether the given set of divisors are valid for a given refclk with * the given connectors. */ static bool intel_PLL_is_valid(struct drm_device *dev, const intel_limit_t *limit, const intel_clock_t *clock) { if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) INTELPllInvalid("p1 out of range\n"); if (clock->p < limit->p.min || limit->p.max < clock->p) INTELPllInvalid("p out of range\n"); if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) INTELPllInvalid("m2 out of range\n"); if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) INTELPllInvalid("m1 out of range\n"); if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev)) INTELPllInvalid("m1 <= m2\n"); if (clock->m < limit->m.min || limit->m.max < clock->m) INTELPllInvalid("m out of range\n"); if (clock->n < limit->n.min || limit->n.max < clock->n) INTELPllInvalid("n out of range\n"); if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) INTELPllInvalid("vco out of range\n"); /* XXX: We may need to be checking "Dot clock" depending on the multiplier, * connector, etc., rather than just a single range. */ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) INTELPllInvalid("dot out of range\n"); return true; } static bool intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *match_clock, intel_clock_t *best_clock) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; intel_clock_t clock; int err = target; if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && (I915_READ(LVDS)) != 0) { /* * For LVDS, if the panel is on, just rely on its current * settings for dual-channel. We haven't figured out how to * reliably set up different single/dual channel state, if we * even can. */ - if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == - LVDS_CLKB_POWER_UP) + if (is_dual_link_lvds(dev_priv, LVDS)) clock.p2 = limit->p2.p2_fast; else clock.p2 = limit->p2.p2_slow; } else { if (target < limit->p2.dot_limit) clock.p2 = limit->p2.p2_slow; else clock.p2 = limit->p2.p2_fast; } memset(best_clock, 0, sizeof(*best_clock)); for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) { /* m1 is always 0 in Pineview */ if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev)) break; for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) { for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max; clock.p1++) { int this_err; intel_clock(dev, refclk, &clock); if (!intel_PLL_is_valid(dev, limit, &clock)) continue; if (match_clock && clock.p != match_clock->p) continue; this_err = abs(clock.dot - target); if (this_err < err) { *best_clock = clock; err = this_err; } } } } } return (err != target); } static bool intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *match_clock, intel_clock_t *best_clock) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; intel_clock_t clock; int max_n; bool found; /* approximately equals target * 0.00585 */ int err_most = (target >> 8) + (target >> 9); found = false; if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { int lvds_reg; if (HAS_PCH_SPLIT(dev)) lvds_reg = PCH_LVDS; else lvds_reg = LVDS; if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) clock.p2 = limit->p2.p2_fast; else clock.p2 = limit->p2.p2_slow; } else { if (target < limit->p2.dot_limit) clock.p2 = limit->p2.p2_slow; else clock.p2 = limit->p2.p2_fast; } memset(best_clock, 0, sizeof(*best_clock)); max_n = limit->n.max; /* based on hardware requirement, prefer smaller n to precision */ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { /* based on hardware requirement, prefere larger m1,m2 */ for (clock.m1 = limit->m1.max; clock.m1 >= limit->m1.min; clock.m1--) { for (clock.m2 = limit->m2.max; clock.m2 >= limit->m2.min; clock.m2--) { for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { int this_err; intel_clock(dev, refclk, &clock); if (!intel_PLL_is_valid(dev, limit, &clock)) continue; if (match_clock && clock.p != match_clock->p) continue; this_err = abs(clock.dot - target); if (this_err < err_most) { *best_clock = clock; err_most = this_err; max_n = clock.n; found = true; } } } } } return found; } static bool intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *match_clock, intel_clock_t *best_clock) { struct drm_device *dev = crtc->dev; intel_clock_t clock; if (target < 200000) { clock.n = 1; clock.p1 = 2; clock.p2 = 10; clock.m1 = 12; clock.m2 = 9; } else { clock.n = 2; clock.p1 = 1; clock.p2 = 10; clock.m1 = 14; clock.m2 = 8; } intel_clock(dev, refclk, &clock); memcpy(best_clock, &clock, sizeof(intel_clock_t)); return true; } /* DisplayPort has only two frequencies, 162MHz and 270MHz */ static bool intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *match_clock, intel_clock_t *best_clock) { intel_clock_t clock; if (target < 200000) { clock.p1 = 2; clock.p2 = 10; clock.n = 2; clock.m1 = 23; clock.m2 = 8; } else { clock.p1 = 1; clock.p2 = 10; clock.n = 1; clock.m1 = 14; clock.m2 = 2; } clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); clock.p = (clock.p1 * clock.p2); clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; clock.vco = 0; memcpy(best_clock, &clock, sizeof(intel_clock_t)); return true; } +static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 frame, frame_reg = PIPEFRAME(pipe); + + frame = I915_READ(frame_reg); + + if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50)) + DRM_DEBUG_KMS("vblank wait timed out\n"); +} + /** * intel_wait_for_vblank - wait for vblank on a given pipe * @dev: drm device * @pipe: pipe to wait for * * Wait for vblank to occur on a given pipe. Needed for various bits of * mode setting code. */ void intel_wait_for_vblank(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; int pipestat_reg = PIPESTAT(pipe); + if (INTEL_INFO(dev)->gen >= 5) { + ironlake_wait_for_vblank(dev, pipe); + return; + } + /* Clear existing vblank status. Note this will clear any other * sticky status fields as well. * * This races with i915_driver_irq_handler() with the result * that either function could miss a vblank event. Here it is not * fatal, as we will either wait upon the next vblank interrupt or * timeout. Generally speaking intel_wait_for_vblank() is only * called during modeset at which time the GPU should be idle and * should *not* be performing page flips and thus not waiting on * vblanks... * Currently, the result of us stealing a vblank from the irq * handler is that a single frame will be skipped during swapbuffers. */ I915_WRITE(pipestat_reg, I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); /* Wait for vblank interrupt bit to set */ if (_intel_wait_for(dev, I915_READ(pipestat_reg) & PIPE_VBLANK_INTERRUPT_STATUS, 50, 1, "915vbl")) DRM_DEBUG_KMS("vblank wait timed out\n"); } /* * intel_wait_for_pipe_off - wait for pipe to turn off * @dev: drm device * @pipe: pipe to wait for * * After disabling a pipe, we can't wait for vblank in the usual way, * spinning on the vblank interrupt status bit, since we won't actually * see an interrupt when the pipe is disabled. * * On Gen4 and above: * wait for the pipe register state bit to turn off * * Otherwise: * wait for the display line value to settle (it usually * ends up stopping at the start of the next frame). * */ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; if (INTEL_INFO(dev)->gen >= 4) { int reg = PIPECONF(pipe); /* Wait for the Pipe State to go off */ if (_intel_wait_for(dev, (I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 100, 1, "915pip")) DRM_DEBUG_KMS("pipe_off wait timed out\n"); } else { - u32 last_line; + u32 last_line, line_mask; int reg = PIPEDSL(pipe); unsigned long timeout = jiffies + msecs_to_jiffies(100); + if (IS_GEN2(dev)) + line_mask = DSL_LINEMASK_GEN2; + else + line_mask = DSL_LINEMASK_GEN3; + /* Wait for the display line to settle */ do { - last_line = I915_READ(reg) & DSL_LINEMASK; + last_line = I915_READ(reg) & line_mask; DELAY(5000); - } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) && + } while (((I915_READ(reg) & line_mask) != last_line) && time_after(timeout, jiffies)); if (time_after(jiffies, timeout)) DRM_DEBUG_KMS("pipe_off wait timed out\n"); } } static const char *state_string(bool enabled) { return enabled ? "on" : "off"; } /* Only for pre-ILK configs */ static void assert_pll(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { int reg; u32 val; bool cur_state; reg = DPLL(pipe); val = I915_READ(reg); cur_state = !!(val & DPLL_VCO_ENABLE); if (cur_state != state) printf("PLL state assertion failure (expected %s, current %s)\n", state_string(state), state_string(cur_state)); } #define assert_pll_enabled(d, p) assert_pll(d, p, true) #define assert_pll_disabled(d, p) assert_pll(d, p, false) /* For ILK+ */ static void assert_pch_pll(struct drm_i915_private *dev_priv, - enum pipe pipe, bool state) + struct intel_crtc *intel_crtc, bool state) { int reg; u32 val; bool cur_state; + if (HAS_PCH_LPT(dev_priv->dev)) { + DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n"); + return; + } + + if (!intel_crtc->pch_pll) { + printf("asserting PCH PLL enabled with no PLL\n"); + return; + } + if (HAS_PCH_CPT(dev_priv->dev)) { u32 pch_dpll; pch_dpll = I915_READ(PCH_DPLL_SEL); /* Make sure the selected PLL is enabled to the transcoder */ - KASSERT(((pch_dpll >> (4 * pipe)) & 8) != 0, - ("transcoder %d PLL not enabled\n", pipe)); - - /* Convert the transcoder pipe number to a pll pipe number */ - pipe = (pch_dpll >> (4 * pipe)) & 1; + KASSERT(((pch_dpll >> (4 * intel_crtc->pipe)) & 8) != 0, + ("transcoder %d PLL not enabled\n", intel_crtc->pipe)); } - reg = PCH_DPLL(pipe); + reg = intel_crtc->pch_pll->pll_reg; val = I915_READ(reg); cur_state = !!(val & DPLL_VCO_ENABLE); if (cur_state != state) printf("PCH PLL state assertion failure (expected %s, current %s)\n", state_string(state), state_string(cur_state)); } #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true) #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false) static void assert_fdi_tx(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { int reg; u32 val; bool cur_state; - reg = FDI_TX_CTL(pipe); - val = I915_READ(reg); - cur_state = !!(val & FDI_TX_ENABLE); + if (IS_HASWELL(dev_priv->dev)) { + /* On Haswell, DDI is used instead of FDI_TX_CTL */ + reg = DDI_FUNC_CTL(pipe); + val = I915_READ(reg); + cur_state = !!(val & PIPE_DDI_FUNC_ENABLE); + } else { + reg = FDI_TX_CTL(pipe); + val = I915_READ(reg); + cur_state = !!(val & FDI_TX_ENABLE); + } if (cur_state != state) printf("FDI TX state assertion failure (expected %s, current %s)\n", state_string(state), state_string(cur_state)); } #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) static void assert_fdi_rx(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { int reg; u32 val; bool cur_state; - reg = FDI_RX_CTL(pipe); - val = I915_READ(reg); - cur_state = !!(val & FDI_RX_ENABLE); + if (IS_HASWELL(dev_priv->dev) && pipe > 0) { + DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n"); + return; + } else { + reg = FDI_RX_CTL(pipe); + val = I915_READ(reg); + cur_state = !!(val & FDI_RX_ENABLE); + } if (cur_state != state) printf("FDI RX state assertion failure (expected %s, current %s)\n", state_string(state), state_string(cur_state)); } #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, enum pipe pipe) { int reg; u32 val; /* ILK FDI PLL is always enabled */ if (dev_priv->info->gen == 5) return; + /* On Haswell, DDI ports are responsible for the FDI PLL setup */ + if (IS_HASWELL(dev_priv->dev)) + return; + reg = FDI_TX_CTL(pipe); val = I915_READ(reg); if (!(val & FDI_TX_PLL_ENABLE)) printf("FDI TX PLL assertion failure, should be active but is disabled\n"); } static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv, enum pipe pipe) { int reg; u32 val; + if (IS_HASWELL(dev_priv->dev) && pipe > 0) { + DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n"); + return; + } reg = FDI_RX_CTL(pipe); val = I915_READ(reg); if (!(val & FDI_RX_PLL_ENABLE)) printf("FDI RX PLL assertion failure, should be active but is disabled\n"); } static void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) { int pp_reg, lvds_reg; u32 val; enum pipe panel_pipe = PIPE_A; bool locked = true; if (HAS_PCH_SPLIT(dev_priv->dev)) { pp_reg = PCH_PP_CONTROL; lvds_reg = PCH_LVDS; } else { pp_reg = PP_CONTROL; lvds_reg = LVDS; } val = I915_READ(pp_reg); if (!(val & PANEL_POWER_ON) || ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)) locked = false; if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT) panel_pipe = PIPE_B; if (panel_pipe == pipe && locked) printf("panel assertion failure, pipe %c regs locked\n", pipe_name(pipe)); } void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { int reg; u32 val; bool cur_state; /* if we need the pipe A quirk it must be always on */ if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) state = true; reg = PIPECONF(pipe); val = I915_READ(reg); cur_state = !!(val & PIPECONF_ENABLE); if (cur_state != state) printf("pipe %c assertion failure (expected %s, current %s)\n", pipe_name(pipe), state_string(state), state_string(cur_state)); } static void assert_plane(struct drm_i915_private *dev_priv, enum plane plane, bool state) { int reg; u32 val; bool cur_state; reg = DSPCNTR(plane); val = I915_READ(reg); cur_state = !!(val & DISPLAY_PLANE_ENABLE); if (cur_state != state) printf("plane %c assertion failure, (expected %s, current %s)\n", plane_name(plane), state_string(state), state_string(cur_state)); } #define assert_plane_enabled(d, p) assert_plane(d, p, true) #define assert_plane_disabled(d, p) assert_plane(d, p, false) static void assert_planes_disabled(struct drm_i915_private *dev_priv, enum pipe pipe) { int reg, i; u32 val; int cur_pipe; /* Planes are fixed to pipes on ILK+ */ if (HAS_PCH_SPLIT(dev_priv->dev)) { reg = DSPCNTR(pipe); val = I915_READ(reg); if ((val & DISPLAY_PLANE_ENABLE) != 0) printf("plane %c assertion failure, should be disabled but not\n", plane_name(pipe)); return; } /* Need to check both planes against the pipe */ for (i = 0; i < 2; i++) { reg = DSPCNTR(i); val = I915_READ(reg); cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> DISPPLANE_SEL_PIPE_SHIFT; if ((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe) printf("plane %c assertion failure, should be off on pipe %c but is still active\n", plane_name(i), pipe_name(pipe)); } } static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) { u32 val; bool enabled; + if (HAS_PCH_LPT(dev_priv->dev)) { + DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n"); + return; + } + val = I915_READ(PCH_DREF_CONTROL); enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | DREF_SUPERSPREAD_SOURCE_MASK)); if (!enabled) printf("PCH refclk assertion failure, should be active but is disabled\n"); } static void assert_transcoder_disabled(struct drm_i915_private *dev_priv, enum pipe pipe) { int reg; u32 val; bool enabled; reg = TRANSCONF(pipe); val = I915_READ(reg); enabled = !!(val & TRANS_ENABLE); if (enabled) printf("transcoder assertion failed, should be off on pipe %c but is still active\n", pipe_name(pipe)); } static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, enum pipe pipe, u32 val) { if ((val & PORT_ENABLE) == 0) return false; if (HAS_PCH_CPT(dev_priv->dev)) { if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) return false; } else { if ((val & TRANSCODER_MASK) != TRANSCODER(pipe)) return false; } return true; } static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, enum pipe pipe, u32 val) { if ((val & LVDS_PORT_EN) == 0) return false; if (HAS_PCH_CPT(dev_priv->dev)) { if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) return false; } else { if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) return false; } return true; } static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, enum pipe pipe, u32 val) { if ((val & ADPA_DAC_ENABLE) == 0) return false; if (HAS_PCH_CPT(dev_priv->dev)) { if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) return false; } else { if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) return false; } return true; } static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, enum pipe pipe, u32 port_sel, u32 val) { if ((val & DP_PORT_EN) == 0) return false; if (HAS_PCH_CPT(dev_priv->dev)) { u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe); u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg); if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) return false; } else { if ((val & DP_PIPE_MASK) != (pipe << 30)) return false; } return true; } static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 port_sel) { u32 val = I915_READ(reg); if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) printf("PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", reg, pipe_name(pipe)); } static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, enum pipe pipe, int reg) { u32 val = I915_READ(reg); if (hdmi_pipe_enabled(dev_priv, val, pipe)) printf("PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", reg, pipe_name(pipe)); } static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, enum pipe pipe) { int reg; u32 val; assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); reg = PCH_ADPA; val = I915_READ(reg); if (adpa_pipe_enabled(dev_priv, val, pipe)) printf("PCH VGA enabled on transcoder %c, should be disabled\n", pipe_name(pipe)); reg = PCH_LVDS; val = I915_READ(reg); if (lvds_pipe_enabled(dev_priv, val, pipe)) printf("PCH LVDS enabled on transcoder %c, should be disabled\n", pipe_name(pipe)); assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB); assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC); assert_pch_hdmi_disabled(dev_priv, pipe, HDMID); } /** * intel_enable_pll - enable a PLL * @dev_priv: i915 private structure * @pipe: pipe PLL to enable * * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to * make sure the PLL reg is writable first though, since the panel write * protect mechanism may be enabled. * * Note! This is for pre-ILK only. */ static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) { int reg; u32 val; /* No really, not for ILK+ */ KASSERT(dev_priv->info->gen < 5, ("Wrong device gen")); /* PLL is protected by panel, make sure we can write it */ if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) assert_panel_unlocked(dev_priv, pipe); reg = DPLL(pipe); val = I915_READ(reg); val |= DPLL_VCO_ENABLE; /* We do this three times for luck */ I915_WRITE(reg, val); POSTING_READ(reg); DELAY(150); /* wait for warmup */ I915_WRITE(reg, val); POSTING_READ(reg); DELAY(150); /* wait for warmup */ I915_WRITE(reg, val); POSTING_READ(reg); DELAY(150); /* wait for warmup */ } /** * intel_disable_pll - disable a PLL * @dev_priv: i915 private structure * @pipe: pipe PLL to disable * * Disable the PLL for @pipe, making sure the pipe is off first. * * Note! This is for pre-ILK only. */ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) { int reg; u32 val; /* Don't disable pipe A or pipe A PLLs if needed */ if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) return; /* Make sure the pipe isn't still relying on us */ assert_pipe_disabled(dev_priv, pipe); reg = DPLL(pipe); val = I915_READ(reg); val &= ~DPLL_VCO_ENABLE; I915_WRITE(reg, val); POSTING_READ(reg); } +/* SBI access */ +static void +intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value) +{ + + mtx_lock(&dev_priv->dpio_lock); + if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0, + 100)) { + DRM_ERROR("timeout waiting for SBI to become ready\n"); + goto out_unlock; + } + + I915_WRITE(SBI_ADDR, + (reg << 16)); + I915_WRITE(SBI_DATA, + value); + I915_WRITE(SBI_CTL_STAT, + SBI_BUSY | + SBI_CTL_OP_CRWR); + + if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0, + 100)) { + DRM_ERROR("timeout waiting for SBI to complete write transaction\n"); + goto out_unlock; + } + +out_unlock: + mtx_unlock(&dev_priv->dpio_lock); +} + +static u32 +intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg) +{ + u32 value; + + value = 0; + mtx_lock(&dev_priv->dpio_lock); + if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0, + 100)) { + DRM_ERROR("timeout waiting for SBI to become ready\n"); + goto out_unlock; + } + + I915_WRITE(SBI_ADDR, + (reg << 16)); + I915_WRITE(SBI_CTL_STAT, + SBI_BUSY | + SBI_CTL_OP_CRRD); + + if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0, + 100)) { + DRM_ERROR("timeout waiting for SBI to complete read transaction\n"); + goto out_unlock; + } + + value = I915_READ(SBI_DATA); + +out_unlock: + mtx_unlock(&dev_priv->dpio_lock); + return value; +} + /** * intel_enable_pch_pll - enable PCH PLL * @dev_priv: i915 private structure * @pipe: pipe PLL to enable * * The PCH PLL needs to be enabled before the PCH transcoder, since it * drives the transcoder clock. */ -static void intel_enable_pch_pll(struct drm_i915_private *dev_priv, - enum pipe pipe) +static void intel_enable_pch_pll(struct intel_crtc *intel_crtc) { + struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; + struct intel_pch_pll *pll; int reg; u32 val; - if (pipe > 1) + /* PCH PLLs only available on ILK, SNB and IVB */ + KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen")); + pll = intel_crtc->pch_pll; + if (pll == NULL) return; - /* PCH only available on ILK+ */ - KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen")); + if (pll->refcount == 0) { + DRM_DEBUG_KMS("pll->refcount == 0\n"); + return; + } + DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n", + pll->pll_reg, pll->active, pll->on, + intel_crtc->base.base.id); + /* PCH refclock must be enabled first */ assert_pch_refclk_enabled(dev_priv); - reg = PCH_DPLL(pipe); + if (pll->active++ && pll->on) { + assert_pch_pll_enabled(dev_priv, intel_crtc); + return; + } + + DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg); + + reg = pll->pll_reg; val = I915_READ(reg); val |= DPLL_VCO_ENABLE; I915_WRITE(reg, val); POSTING_READ(reg); DELAY(200); + + pll->on = true; } -static void intel_disable_pch_pll(struct drm_i915_private *dev_priv, - enum pipe pipe) +static void intel_disable_pch_pll(struct intel_crtc *intel_crtc) { + struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; + struct intel_pch_pll *pll = intel_crtc->pch_pll; int reg; - u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL, - pll_sel = TRANSC_DPLL_ENABLE; + u32 val; - if (pipe > 1) - return; - /* PCH only available on ILK+ */ KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen")); + if (pll == NULL) + return; - /* Make sure transcoder isn't still depending on us */ - assert_transcoder_disabled(dev_priv, pipe); + if (pll->refcount == 0) { + DRM_DEBUG_KMS("pll->refcount == 0\n"); + return; + } - if (pipe == 0) - pll_sel |= TRANSC_DPLLA_SEL; - else if (pipe == 1) - pll_sel |= TRANSC_DPLLB_SEL; + DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n", + pll->pll_reg, pll->active, pll->on, + intel_crtc->base.base.id); + if (pll->active == 0) { + DRM_DEBUG_KMS("pll->active == 0\n"); + assert_pch_pll_disabled(dev_priv, intel_crtc); + return; + } - if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel) + if (--pll->active) { + assert_pch_pll_enabled(dev_priv, intel_crtc); return; + } - reg = PCH_DPLL(pipe); + DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg); + + /* Make sure transcoder isn't still depending on us */ + assert_transcoder_disabled(dev_priv, intel_crtc->pipe); + + reg = pll->pll_reg; val = I915_READ(reg); val &= ~DPLL_VCO_ENABLE; I915_WRITE(reg, val); POSTING_READ(reg); DELAY(200); + + pll->on = false; } static void intel_enable_transcoder(struct drm_i915_private *dev_priv, enum pipe pipe) { int reg; u32 val, pipeconf_val; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; /* PCH only available on ILK+ */ KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen")); /* Make sure PCH DPLL is enabled */ - assert_pch_pll_enabled(dev_priv, pipe); + assert_pch_pll_enabled(dev_priv, to_intel_crtc(crtc)); /* FDI must be feeding us bits for PCH ports */ assert_fdi_tx_enabled(dev_priv, pipe); assert_fdi_rx_enabled(dev_priv, pipe); - + if (IS_HASWELL(dev_priv->dev) && pipe > 0) { + DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n"); + return; + } reg = TRANSCONF(pipe); val = I915_READ(reg); pipeconf_val = I915_READ(PIPECONF(pipe)); - if (HAS_PCH_IBX(dev_priv->dev)) { /* * make the BPC in transcoder be consistent with * that in pipeconf reg. */ val &= ~PIPE_BPC_MASK; val |= pipeconf_val & PIPE_BPC_MASK; } val &= ~TRANS_INTERLACE_MASK; if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) if (HAS_PCH_IBX(dev_priv->dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) val |= TRANS_LEGACY_INTERLACED_ILK; else val |= TRANS_INTERLACED; else val |= TRANS_PROGRESSIVE; I915_WRITE(reg, val | TRANS_ENABLE); if (_intel_wait_for(dev_priv->dev, I915_READ(reg) & TRANS_STATE_ENABLE, 100, 1, "915trc")) DRM_ERROR("failed to enable transcoder %d\n", pipe); } static void intel_disable_transcoder(struct drm_i915_private *dev_priv, enum pipe pipe) { int reg; u32 val; /* FDI relies on the transcoder */ assert_fdi_tx_disabled(dev_priv, pipe); assert_fdi_rx_disabled(dev_priv, pipe); /* Ports must be off as well */ assert_pch_ports_disabled(dev_priv, pipe); reg = TRANSCONF(pipe); val = I915_READ(reg); val &= ~TRANS_ENABLE; I915_WRITE(reg, val); /* wait for PCH transcoder off, transcoder state */ if (_intel_wait_for(dev_priv->dev, (I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50, 1, "915trd")) DRM_ERROR("failed to disable transcoder %d\n", pipe); } /** * intel_enable_pipe - enable a pipe, asserting requirements * @dev_priv: i915 private structure * @pipe: pipe to enable * @pch_port: on ILK+, is this pipe driving a PCH port or not * * Enable @pipe, making sure that various hardware specific requirements * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. * * @pipe should be %PIPE_A or %PIPE_B. * * Will wait until the pipe is actually running (i.e. first vblank) before * returning. */ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool pch_port) { int reg; u32 val; /* * A pipe without a PLL won't actually be able to drive bits from * a plane. On ILK+ the pipe PLLs are integrated, so we don't * need the check. */ if (!HAS_PCH_SPLIT(dev_priv->dev)) assert_pll_enabled(dev_priv, pipe); else { if (pch_port) { /* if driving the PCH, we need FDI enabled */ assert_fdi_rx_pll_enabled(dev_priv, pipe); assert_fdi_tx_pll_enabled(dev_priv, pipe); } /* FIXME: assert CPU port conditions for SNB+ */ } reg = PIPECONF(pipe); val = I915_READ(reg); if (val & PIPECONF_ENABLE) return; I915_WRITE(reg, val | PIPECONF_ENABLE); intel_wait_for_vblank(dev_priv->dev, pipe); } /** * intel_disable_pipe - disable a pipe, asserting requirements * @dev_priv: i915 private structure * @pipe: pipe to disable * * Disable @pipe, making sure that various hardware specific requirements * are met, if applicable, e.g. plane disabled, panel fitter off, etc. * * @pipe should be %PIPE_A or %PIPE_B. * * Will wait until the pipe has shut down before returning. */ static void intel_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) { int reg; u32 val; /* * Make sure planes won't keep trying to pump pixels to us, * or we might hang the display. */ assert_planes_disabled(dev_priv, pipe); /* Don't disable pipe A or pipe A PLLs if needed */ if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) return; reg = PIPECONF(pipe); val = I915_READ(reg); if ((val & PIPECONF_ENABLE) == 0) return; I915_WRITE(reg, val & ~PIPECONF_ENABLE); intel_wait_for_pipe_off(dev_priv->dev, pipe); } /* * Plane regs are double buffered, going from enabled->disabled needs a * trigger in order to latch. The display address reg provides this. */ -static void intel_flush_display_plane(struct drm_i915_private *dev_priv, +void intel_flush_display_plane(struct drm_i915_private *dev_priv, enum plane plane) { I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); } /** * intel_enable_plane - enable a display plane on a given pipe * @dev_priv: i915 private structure * @plane: plane to enable * @pipe: pipe being fed * * Enable @plane on @pipe, making sure that @pipe is running first. */ static void intel_enable_plane(struct drm_i915_private *dev_priv, enum plane plane, enum pipe pipe) { int reg; u32 val; /* If the pipe isn't enabled, we can't pump pixels and may hang */ assert_pipe_enabled(dev_priv, pipe); reg = DSPCNTR(plane); val = I915_READ(reg); if (val & DISPLAY_PLANE_ENABLE) return; I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); intel_flush_display_plane(dev_priv, plane); intel_wait_for_vblank(dev_priv->dev, pipe); } /** * intel_disable_plane - disable a display plane * @dev_priv: i915 private structure * @plane: plane to disable * @pipe: pipe consuming the data * * Disable @plane; should be an independent operation. */ static void intel_disable_plane(struct drm_i915_private *dev_priv, enum plane plane, enum pipe pipe) { int reg; u32 val; reg = DSPCNTR(plane); val = I915_READ(reg); if ((val & DISPLAY_PLANE_ENABLE) == 0) return; I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); intel_flush_display_plane(dev_priv, plane); intel_wait_for_vblank(dev_priv->dev, pipe); } static void disable_pch_dp(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 port_sel) { u32 val = I915_READ(reg); if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) { DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe); I915_WRITE(reg, val & ~DP_PORT_EN); } } static void disable_pch_hdmi(struct drm_i915_private *dev_priv, enum pipe pipe, int reg) { u32 val = I915_READ(reg); if (hdmi_pipe_enabled(dev_priv, val, pipe)) { DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", reg, pipe); I915_WRITE(reg, val & ~PORT_ENABLE); } } /* Disable any ports connected to this transcoder */ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, enum pipe pipe) { u32 reg, val; val = I915_READ(PCH_PP_CONTROL); I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS); disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); reg = PCH_ADPA; val = I915_READ(reg); if (adpa_pipe_enabled(dev_priv, val, pipe)) I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); reg = PCH_LVDS; val = I915_READ(reg); if (lvds_pipe_enabled(dev_priv, val, pipe)) { DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val); I915_WRITE(reg, val & ~LVDS_PORT_EN); POSTING_READ(reg); DELAY(100); } disable_pch_hdmi(dev_priv, pipe, HDMIB); disable_pch_hdmi(dev_priv, pipe, HDMIC); disable_pch_hdmi(dev_priv, pipe, HDMID); } -static void i8xx_disable_fbc(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 fbc_ctl; - - /* Disable compression */ - fbc_ctl = I915_READ(FBC_CONTROL); - if ((fbc_ctl & FBC_CTL_EN) == 0) - return; - - fbc_ctl &= ~FBC_CTL_EN; - I915_WRITE(FBC_CONTROL, fbc_ctl); - - /* Wait for compressing bit to clear */ - if (_intel_wait_for(dev, - (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10, - 1, "915fbd")) { - DRM_DEBUG_KMS("FBC idle timed out\n"); - return; - } - - DRM_DEBUG_KMS("disabled FBC\n"); -} - -static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_framebuffer *fb = crtc->fb; - struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj = intel_fb->obj; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int cfb_pitch; - int plane, i; - u32 fbc_ctl, fbc_ctl2; - - cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; - if (fb->pitches[0] < cfb_pitch) - cfb_pitch = fb->pitches[0]; - - /* FBC_CTL wants 64B units */ - cfb_pitch = (cfb_pitch / 64) - 1; - plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; - - /* Clear old tags */ - for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) - I915_WRITE(FBC_TAG + (i * 4), 0); - - /* Set it up... */ - fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; - fbc_ctl2 |= plane; - I915_WRITE(FBC_CONTROL2, fbc_ctl2); - I915_WRITE(FBC_FENCE_OFF, crtc->y); - - /* enable it... */ - fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; - if (IS_I945GM(dev)) - fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ - fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; - fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; - fbc_ctl |= obj->fence_reg; - I915_WRITE(FBC_CONTROL, fbc_ctl); - - DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ", - cfb_pitch, crtc->y, intel_crtc->plane); -} - -static bool i8xx_fbc_enabled(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - return I915_READ(FBC_CONTROL) & FBC_CTL_EN; -} - -static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_framebuffer *fb = crtc->fb; - struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj = intel_fb->obj; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; - unsigned long stall_watermark = 200; - u32 dpfc_ctl; - - dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; - dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; - I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); - - I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | - (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | - (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); - I915_WRITE(DPFC_FENCE_YOFF, crtc->y); - - /* enable it... */ - I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); - - DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); -} - -static void g4x_disable_fbc(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 dpfc_ctl; - - /* Disable compression */ - dpfc_ctl = I915_READ(DPFC_CONTROL); - if (dpfc_ctl & DPFC_CTL_EN) { - dpfc_ctl &= ~DPFC_CTL_EN; - I915_WRITE(DPFC_CONTROL, dpfc_ctl); - - DRM_DEBUG_KMS("disabled FBC\n"); - } -} - -static bool g4x_fbc_enabled(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; -} - -static void sandybridge_blit_fbc_update(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 blt_ecoskpd; - - /* Make sure blitter notifies FBC of writes */ - gen6_gt_force_wake_get(dev_priv); - blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); - blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << - GEN6_BLITTER_LOCK_SHIFT; - I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); - blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY; - I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); - blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY << - GEN6_BLITTER_LOCK_SHIFT); - I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); - POSTING_READ(GEN6_BLITTER_ECOSKPD); - gen6_gt_force_wake_put(dev_priv); -} - -static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_framebuffer *fb = crtc->fb; - struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj = intel_fb->obj; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; - unsigned long stall_watermark = 200; - u32 dpfc_ctl; - - dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); - dpfc_ctl &= DPFC_RESERVED; - dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); - /* Set persistent mode for front-buffer rendering, ala X. */ - dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE; - dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg); - I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); - - I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | - (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | - (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); - I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); - I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); - /* enable it... */ - I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); - - if (IS_GEN6(dev)) { - I915_WRITE(SNB_DPFC_CTL_SA, - SNB_CPU_FENCE_ENABLE | obj->fence_reg); - I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); - sandybridge_blit_fbc_update(dev); - } - - DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); -} - -static void ironlake_disable_fbc(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 dpfc_ctl; - - /* Disable compression */ - dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); - if (dpfc_ctl & DPFC_CTL_EN) { - dpfc_ctl &= ~DPFC_CTL_EN; - I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); - - DRM_DEBUG_KMS("disabled FBC\n"); - } -} - -static bool ironlake_fbc_enabled(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; -} - -bool intel_fbc_enabled(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (!dev_priv->display.fbc_enabled) - return false; - - return dev_priv->display.fbc_enabled(dev); -} - -static void intel_fbc_work_fn(void *arg, int pending) -{ - struct intel_fbc_work *work = arg; - struct drm_device *dev = work->crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - - DRM_LOCK(dev); - if (work == dev_priv->fbc_work) { - /* Double check that we haven't switched fb without cancelling - * the prior work. - */ - if (work->crtc->fb == work->fb) { - dev_priv->display.enable_fbc(work->crtc, - work->interval); - - dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane; - dev_priv->cfb_fb = work->crtc->fb->base.id; - dev_priv->cfb_y = work->crtc->y; - } - - dev_priv->fbc_work = NULL; - } - DRM_UNLOCK(dev); - - free(work, DRM_MEM_KMS); -} - -static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv) -{ - u_int pending; - - if (dev_priv->fbc_work == NULL) - return; - - DRM_DEBUG_KMS("cancelling pending FBC enable\n"); - - /* Synchronisation is provided by struct_mutex and checking of - * dev_priv->fbc_work, so we can perform the cancellation - * entirely asynchronously. - */ - if (taskqueue_cancel_timeout(dev_priv->tq, &dev_priv->fbc_work->task, - &pending) == 0) - /* tasklet was killed before being run, clean up */ - free(dev_priv->fbc_work, DRM_MEM_KMS); - - /* Mark the work as no longer wanted so that if it does - * wake-up (because the work was already running and waiting - * for our mutex), it will discover that is no longer - * necessary to run. - */ - dev_priv->fbc_work = NULL; -} - -static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) -{ - struct intel_fbc_work *work; - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - - if (!dev_priv->display.enable_fbc) - return; - - intel_cancel_fbc_work(dev_priv); - - work = malloc(sizeof(*work), DRM_MEM_KMS, M_WAITOK | M_ZERO); - work->crtc = crtc; - work->fb = crtc->fb; - work->interval = interval; - TIMEOUT_TASK_INIT(dev_priv->tq, &work->task, 0, intel_fbc_work_fn, - work); - - dev_priv->fbc_work = work; - - DRM_DEBUG_KMS("scheduling delayed FBC enable\n"); - - /* Delay the actual enabling to let pageflipping cease and the - * display to settle before starting the compression. Note that - * this delay also serves a second purpose: it allows for a - * vblank to pass after disabling the FBC before we attempt - * to modify the control registers. - * - * A more complicated solution would involve tracking vblanks - * following the termination of the page-flipping sequence - * and indeed performing the enable as a co-routine and not - * waiting synchronously upon the vblank. - */ - taskqueue_enqueue_timeout(dev_priv->tq, &work->task, - msecs_to_jiffies(50)); -} - -void intel_disable_fbc(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - intel_cancel_fbc_work(dev_priv); - - if (!dev_priv->display.disable_fbc) - return; - - dev_priv->display.disable_fbc(dev); - dev_priv->cfb_plane = -1; -} - -/** - * intel_update_fbc - enable/disable FBC as needed - * @dev: the drm_device - * - * Set up the framebuffer compression hardware at mode set time. We - * enable it if possible: - * - plane A only (on pre-965) - * - no pixel mulitply/line duplication - * - no alpha buffer discard - * - no dual wide - * - framebuffer <= 2048 in width, 1536 in height - * - * We can't assume that any compression will take place (worst case), - * so the compressed buffer has to be the same size as the uncompressed - * one. It also must reside (along with the line length buffer) in - * stolen memory. - * - * We need to enable/disable FBC on a global basis. - */ -static void intel_update_fbc(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc = NULL, *tmp_crtc; - struct intel_crtc *intel_crtc; - struct drm_framebuffer *fb; - struct intel_framebuffer *intel_fb; - struct drm_i915_gem_object *obj; - int enable_fbc; - - DRM_DEBUG_KMS("\n"); - - if (!i915_powersave) - return; - - if (!I915_HAS_FBC(dev)) - return; - - /* - * If FBC is already on, we just have to verify that we can - * keep it that way... - * Need to disable if: - * - more than one pipe is active - * - changing FBC params (stride, fence, mode) - * - new fb is too large to fit in compressed buffer - * - going to an unsupported config (interlace, pixel multiply, etc.) - */ - list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { - if (tmp_crtc->enabled && tmp_crtc->fb) { - if (crtc) { - DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); - dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; - goto out_disable; - } - crtc = tmp_crtc; - } - } - - if (!crtc || crtc->fb == NULL) { - DRM_DEBUG_KMS("no output, disabling\n"); - dev_priv->no_fbc_reason = FBC_NO_OUTPUT; - goto out_disable; - } - - intel_crtc = to_intel_crtc(crtc); - fb = crtc->fb; - intel_fb = to_intel_framebuffer(fb); - obj = intel_fb->obj; - - enable_fbc = i915_enable_fbc; - if (enable_fbc < 0) { - DRM_DEBUG_KMS("fbc set to per-chip default\n"); - enable_fbc = 1; - if (INTEL_INFO(dev)->gen <= 6) - enable_fbc = 0; - } - if (!enable_fbc) { - DRM_DEBUG_KMS("fbc disabled per module param\n"); - dev_priv->no_fbc_reason = FBC_MODULE_PARAM; - goto out_disable; - } - if (intel_fb->obj->base.size > dev_priv->cfb_size) { - DRM_DEBUG_KMS("framebuffer too large, disabling " - "compression\n"); - dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; - goto out_disable; - } - if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || - (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { - DRM_DEBUG_KMS("mode incompatible with compression, " - "disabling\n"); - dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; - goto out_disable; - } - if ((crtc->mode.hdisplay > 2048) || - (crtc->mode.vdisplay > 1536)) { - DRM_DEBUG_KMS("mode too large for compression, disabling\n"); - dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; - goto out_disable; - } - if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) { - DRM_DEBUG_KMS("plane not 0, disabling compression\n"); - dev_priv->no_fbc_reason = FBC_BAD_PLANE; - goto out_disable; - } - if (obj->tiling_mode != I915_TILING_X || - obj->fence_reg == I915_FENCE_REG_NONE) { - DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n"); - dev_priv->no_fbc_reason = FBC_NOT_TILED; - goto out_disable; - } - - /* If the kernel debugger is active, always disable compression */ - if (kdb_active) - goto out_disable; - - /* If the scanout has not changed, don't modify the FBC settings. - * Note that we make the fundamental assumption that the fb->obj - * cannot be unpinned (and have its GTT offset and fence revoked) - * without first being decoupled from the scanout and FBC disabled. - */ - if (dev_priv->cfb_plane == intel_crtc->plane && - dev_priv->cfb_fb == fb->base.id && - dev_priv->cfb_y == crtc->y) - return; - - if (intel_fbc_enabled(dev)) { - /* We update FBC along two paths, after changing fb/crtc - * configuration (modeswitching) and after page-flipping - * finishes. For the latter, we know that not only did - * we disable the FBC at the start of the page-flip - * sequence, but also more than one vblank has passed. - * - * For the former case of modeswitching, it is possible - * to switch between two FBC valid configurations - * instantaneously so we do need to disable the FBC - * before we can modify its control registers. We also - * have to wait for the next vblank for that to take - * effect. However, since we delay enabling FBC we can - * assume that a vblank has passed since disabling and - * that we can safely alter the registers in the deferred - * callback. - * - * In the scenario that we go from a valid to invalid - * and then back to valid FBC configuration we have - * no strict enforcement that a vblank occurred since - * disabling the FBC. However, along all current pipe - * disabling paths we do need to wait for a vblank at - * some point. And we wait before enabling FBC anyway. - */ - DRM_DEBUG_KMS("disabling active FBC for update\n"); - intel_disable_fbc(dev); - } - - intel_enable_fbc(crtc, 500); - return; - -out_disable: - /* Multiple disables should be harmless */ - if (intel_fbc_enabled(dev)) { - DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); - intel_disable_fbc(dev); - } -} - int intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_i915_gem_object *obj, struct intel_ring_buffer *pipelined) { struct drm_i915_private *dev_priv = dev->dev_private; u32 alignment; int ret; alignment = 0; /* shut gcc */ switch (obj->tiling_mode) { case I915_TILING_NONE: if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) alignment = 128 * 1024; else if (INTEL_INFO(dev)->gen >= 4) alignment = 4 * 1024; else alignment = 64 * 1024; break; case I915_TILING_X: /* pin() will align the object as required by fence */ alignment = 0; break; case I915_TILING_Y: /* FIXME: Is this true? */ DRM_ERROR("Y tiled not allowed for scan out buffers\n"); return -EINVAL; default: KASSERT(0, ("Wrong tiling for fb obj")); } dev_priv->mm.interruptible = false; ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); if (ret) goto err_interruptible; /* Install a fence for tiled scan-out. Pre-i965 always needs a * fence, whereas 965+ only requires a fence if using * framebuffer compression. For simplicity, we always install * a fence as the cost is not that onerous. */ - if (obj->tiling_mode != I915_TILING_NONE) { - ret = i915_gem_object_get_fence(obj, pipelined); - if (ret) - goto err_unpin; + ret = i915_gem_object_get_fence(obj); + if (ret) + goto err_unpin; - i915_gem_object_pin_fence(obj); - } + i915_gem_object_pin_fence(obj); dev_priv->mm.interruptible = true; return 0; err_unpin: - i915_gem_object_unpin(obj); + i915_gem_object_unpin_from_display_plane(obj); err_interruptible: dev_priv->mm.interruptible = true; return ret; } void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) { i915_gem_object_unpin_fence(obj); - i915_gem_object_unpin(obj); + i915_gem_object_unpin_from_display_plane(obj); } static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_framebuffer *intel_fb; struct drm_i915_gem_object *obj; int plane = intel_crtc->plane; unsigned long Start, Offset; u32 dspcntr; u32 reg; switch (plane) { case 0: case 1: break; default: DRM_ERROR("Can't update plane %d in SAREA\n", plane); return -EINVAL; } intel_fb = to_intel_framebuffer(fb); obj = intel_fb->obj; reg = DSPCNTR(plane); dspcntr = I915_READ(reg); /* Mask out pixel format bits in case we change it */ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; switch (fb->bits_per_pixel) { case 8: dspcntr |= DISPPLANE_8BPP; break; case 16: if (fb->depth == 15) dspcntr |= DISPPLANE_15_16BPP; else dspcntr |= DISPPLANE_16BPP; break; case 24: case 32: dspcntr |= DISPPLANE_32BPP_NO_ALPHA; break; default: DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); return -EINVAL; } if (INTEL_INFO(dev)->gen >= 4) { if (obj->tiling_mode != I915_TILING_NONE) dspcntr |= DISPPLANE_TILED; else dspcntr &= ~DISPPLANE_TILED; } I915_WRITE(reg, dspcntr); Start = obj->gtt_offset; Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", Start, Offset, x, y, fb->pitches[0]); I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); if (INTEL_INFO(dev)->gen >= 4) { - I915_WRITE(DSPSURF(plane), Start); + I915_MODIFY_DISPBASE(DSPSURF(plane), Start); I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); I915_WRITE(DSPADDR(plane), Offset); } else I915_WRITE(DSPADDR(plane), Start + Offset); POSTING_READ(reg); return (0); } static int ironlake_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_framebuffer *intel_fb; struct drm_i915_gem_object *obj; int plane = intel_crtc->plane; unsigned long Start, Offset; u32 dspcntr; u32 reg; switch (plane) { case 0: case 1: case 2: break; default: DRM_ERROR("Can't update plane %d in SAREA\n", plane); return -EINVAL; } intel_fb = to_intel_framebuffer(fb); obj = intel_fb->obj; reg = DSPCNTR(plane); dspcntr = I915_READ(reg); /* Mask out pixel format bits in case we change it */ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; switch (fb->bits_per_pixel) { case 8: dspcntr |= DISPPLANE_8BPP; break; case 16: if (fb->depth != 16) { DRM_ERROR("bpp 16, depth %d\n", fb->depth); return -EINVAL; } dspcntr |= DISPPLANE_16BPP; break; case 24: case 32: if (fb->depth == 24) dspcntr |= DISPPLANE_32BPP_NO_ALPHA; else if (fb->depth == 30) dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA; else { DRM_ERROR("bpp %d depth %d\n", fb->bits_per_pixel, fb->depth); return -EINVAL; } break; default: DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); return -EINVAL; } if (obj->tiling_mode != I915_TILING_NONE) dspcntr |= DISPPLANE_TILED; else dspcntr &= ~DISPPLANE_TILED; /* must disable */ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; I915_WRITE(reg, dspcntr); Start = obj->gtt_offset; Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", Start, Offset, x, y, fb->pitches[0]); I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); - I915_WRITE(DSPSURF(plane), Start); + I915_MODIFY_DISPBASE(DSPSURF(plane), Start); I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); I915_WRITE(DSPADDR(plane), Offset); POSTING_READ(reg); return 0; } /* Assume fb object is pinned & idle & fenced and just update base pointers */ static int intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, enum mode_set_atomic state) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - ret = dev_priv->display.update_plane(crtc, fb, x, y); - if (ret) - return ret; - - intel_update_fbc(dev); + if (dev_priv->display.disable_fbc) + dev_priv->display.disable_fbc(dev); intel_increase_pllclock(crtc); - return 0; + return dev_priv->display.update_plane(crtc, fb, x, y); } static int intel_finish_fb(struct drm_framebuffer *old_fb) { struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; bool was_interruptible = dev_priv->mm.interruptible; int ret; mtx_lock(&dev->event_lock); while (!atomic_load_acq_int(&dev_priv->mm.wedged) && atomic_load_acq_int(&obj->pending_flip) != 0) { msleep(&obj->pending_flip, &dev->event_lock, 0, "915flp", 0); } mtx_unlock(&dev->event_lock); /* Big Hammer, we also need to ensure that any pending * MI_WAIT_FOR_EVENT inside a user batch buffer on the * current scanout is retired before unpinning the old * framebuffer. * * This should only fail upon a hung GPU, in which case we * can safely continue. */ dev_priv->mm.interruptible = false; ret = i915_gem_object_finish_gpu(obj); dev_priv->mm.interruptible = was_interruptible; return ret; } static int intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; #if 0 struct drm_i915_master_private *master_priv; -#else - drm_i915_private_t *dev_priv = dev->dev_private; #endif struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int ret; /* no fb bound */ if (!crtc->fb) { DRM_ERROR("No FB bound\n"); return 0; } - switch (intel_crtc->plane) { - case 0: - case 1: - break; - case 2: - if (IS_IVYBRIDGE(dev)) - break; - /* fall through otherwise */ - default: - DRM_ERROR("no plane for crtc\n"); + if(intel_crtc->plane > dev_priv->num_pipe) { + DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n", + intel_crtc->plane, + dev_priv->num_pipe); return -EINVAL; } DRM_LOCK(dev); ret = intel_pin_and_fence_fb_obj(dev, to_intel_framebuffer(crtc->fb)->obj, NULL); if (ret != 0) { DRM_UNLOCK(dev); DRM_ERROR("pin & fence failed\n"); return ret; } if (old_fb) intel_finish_fb(old_fb); - ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, - LEAVE_ATOMIC_MODE_SET); + ret = dev_priv->display.update_plane(crtc, crtc->fb, x, y); if (ret) { intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); DRM_UNLOCK(dev); DRM_ERROR("failed to update base address\n"); return ret; } if (old_fb) { intel_wait_for_vblank(dev, intel_crtc->pipe); intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); } + intel_update_fbc(dev); DRM_UNLOCK(dev); #if 0 if (!dev->primary->master) return 0; master_priv = dev->primary->master->driver_priv; if (!master_priv->sarea_priv) return 0; if (intel_crtc->pipe) { master_priv->sarea_priv->pipeB_x = x; master_priv->sarea_priv->pipeB_y = y; } else { master_priv->sarea_priv->pipeA_x = x; master_priv->sarea_priv->pipeA_y = y; } #else if (!dev_priv->sarea_priv) return 0; if (intel_crtc->pipe) { dev_priv->sarea_priv->planeB_x = x; dev_priv->sarea_priv->planeB_y = y; } else { dev_priv->sarea_priv->planeA_x = x; dev_priv->sarea_priv->planeA_y = y; } #endif return 0; } static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 dpa_ctl; DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock); dpa_ctl = I915_READ(DP_A); dpa_ctl &= ~DP_PLL_FREQ_MASK; if (clock < 200000) { u32 temp; dpa_ctl |= DP_PLL_FREQ_160MHZ; /* workaround for 160Mhz: 1) program 0x4600c bits 15:0 = 0x8124 2) program 0x46010 bit 0 = 1 3) program 0x46034 bit 24 = 1 4) program 0x64000 bit 14 = 1 */ temp = I915_READ(0x4600c); temp &= 0xffff0000; I915_WRITE(0x4600c, temp | 0x8124); temp = I915_READ(0x46010); I915_WRITE(0x46010, temp | 1); temp = I915_READ(0x46034); I915_WRITE(0x46034, temp | (1 << 24)); } else { dpa_ctl |= DP_PLL_FREQ_270MHZ; } I915_WRITE(DP_A, dpa_ctl); POSTING_READ(DP_A); DELAY(500); } static void intel_fdi_normal_train(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; u32 reg, temp; /* enable normal train */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); if (IS_IVYBRIDGE(dev)) { temp &= ~FDI_LINK_TRAIN_NONE_IVB; temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; } else { temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; } I915_WRITE(reg, temp); reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); if (HAS_PCH_CPT(dev)) { temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_NORMAL_CPT; } else { temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_NONE; } I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); /* wait one idle pattern time */ POSTING_READ(reg); DELAY(1000); /* IVB wants error correction enabled */ if (IS_IVYBRIDGE(dev)) I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE); } static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; u32 flags = I915_READ(SOUTH_CHICKEN1); flags |= FDI_PHASE_SYNC_OVR(pipe); I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */ flags |= FDI_PHASE_SYNC_EN(pipe); I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */ POSTING_READ(SOUTH_CHICKEN1); } /* The FDI link training functions for ILK/Ibexpeak. */ static void ironlake_fdi_link_train(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; u32 reg, temp, tries; /* FDI needs bits from pipe & plane first */ assert_pipe_enabled(dev_priv, pipe); assert_plane_enabled(dev_priv, plane); /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit for train result */ reg = FDI_RX_IMR(pipe); temp = I915_READ(reg); temp &= ~FDI_RX_SYMBOL_LOCK; temp &= ~FDI_RX_BIT_LOCK; I915_WRITE(reg, temp); I915_READ(reg); DELAY(150); /* enable CPU FDI TX and PCH FDI RX */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); temp &= ~(7 << 19); temp |= (intel_crtc->fdi_lanes - 1) << 19; temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; I915_WRITE(reg, temp | FDI_TX_ENABLE); reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; I915_WRITE(reg, temp | FDI_RX_ENABLE); POSTING_READ(reg); DELAY(150); /* Ironlake workaround, enable clock pointer after FDI enable*/ if (HAS_PCH_IBX(dev)) { I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN); } reg = FDI_RX_IIR(pipe); for (tries = 0; tries < 5; tries++) { temp = I915_READ(reg); DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); if ((temp & FDI_RX_BIT_LOCK)) { DRM_DEBUG_KMS("FDI train 1 done.\n"); I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); break; } } if (tries == 5) DRM_ERROR("FDI train 1 fail!\n"); /* Train 2 */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_2; I915_WRITE(reg, temp); reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_2; I915_WRITE(reg, temp); POSTING_READ(reg); DELAY(150); reg = FDI_RX_IIR(pipe); for (tries = 0; tries < 5; tries++) { temp = I915_READ(reg); DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); if (temp & FDI_RX_SYMBOL_LOCK) { I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); DRM_DEBUG_KMS("FDI train 2 done.\n"); break; } } if (tries == 5) DRM_ERROR("FDI train 2 fail!\n"); DRM_DEBUG_KMS("FDI train done\n"); } static const int snb_b_fdi_train_param[] = { FDI_LINK_TRAIN_400MV_0DB_SNB_B, FDI_LINK_TRAIN_400MV_6DB_SNB_B, FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, FDI_LINK_TRAIN_800MV_0DB_SNB_B, }; /* The FDI link training functions for SNB/Cougarpoint. */ static void gen6_fdi_link_train(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - u32 reg, temp, i; + u32 reg, temp, i, retry; /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit for train result */ reg = FDI_RX_IMR(pipe); temp = I915_READ(reg); temp &= ~FDI_RX_SYMBOL_LOCK; temp &= ~FDI_RX_BIT_LOCK; I915_WRITE(reg, temp); POSTING_READ(reg); DELAY(150); /* enable CPU FDI TX and PCH FDI RX */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); temp &= ~(7 << 19); temp |= (intel_crtc->fdi_lanes - 1) << 19; temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; /* SNB-B */ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; I915_WRITE(reg, temp | FDI_TX_ENABLE); reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); if (HAS_PCH_CPT(dev)) { temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; } else { temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; } I915_WRITE(reg, temp | FDI_RX_ENABLE); POSTING_READ(reg); DELAY(150); if (HAS_PCH_CPT(dev)) cpt_phase_pointer_enable(dev, pipe); for (i = 0; i < 4; i++) { reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; temp |= snb_b_fdi_train_param[i]; I915_WRITE(reg, temp); POSTING_READ(reg); DELAY(500); - reg = FDI_RX_IIR(pipe); - temp = I915_READ(reg); - DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); + for (retry = 0; retry < 5; retry++) { + reg = FDI_RX_IIR(pipe); + temp = I915_READ(reg); + DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); - if (temp & FDI_RX_BIT_LOCK) { - I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); - DRM_DEBUG_KMS("FDI train 1 done.\n"); - break; + if (temp & FDI_RX_BIT_LOCK) { + I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); + DRM_DEBUG_KMS("FDI train 1 done.\n"); + break; + } + DELAY(50); } + if (retry < 5) + break; } if (i == 4) DRM_ERROR("FDI train 1 fail!\n"); /* Train 2 */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_2; if (IS_GEN6(dev)) { temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; /* SNB-B */ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; } I915_WRITE(reg, temp); reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); if (HAS_PCH_CPT(dev)) { temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; } else { temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_2; } I915_WRITE(reg, temp); POSTING_READ(reg); DELAY(150); for (i = 0; i < 4; i++) { reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; temp |= snb_b_fdi_train_param[i]; I915_WRITE(reg, temp); POSTING_READ(reg); DELAY(500); - reg = FDI_RX_IIR(pipe); - temp = I915_READ(reg); - DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); + for (retry = 0; retry < 5; retry++) { + reg = FDI_RX_IIR(pipe); + temp = I915_READ(reg); + DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); - if (temp & FDI_RX_SYMBOL_LOCK) { - I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); - DRM_DEBUG_KMS("FDI train 2 done.\n"); - break; + if (temp & FDI_RX_SYMBOL_LOCK) { + I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); + DRM_DEBUG_KMS("FDI train 2 done.\n"); + break; + } + DELAY(50); } + if (retry < 5) + break; } if (i == 4) DRM_ERROR("FDI train 2 fail!\n"); DRM_DEBUG_KMS("FDI train done.\n"); } /* Manual link training for Ivy Bridge A0 parts */ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; u32 reg, temp, i; /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit for train result */ reg = FDI_RX_IMR(pipe); temp = I915_READ(reg); temp &= ~FDI_RX_SYMBOL_LOCK; temp &= ~FDI_RX_BIT_LOCK; I915_WRITE(reg, temp); POSTING_READ(reg); DELAY(150); /* enable CPU FDI TX and PCH FDI RX */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); temp &= ~(7 << 19); temp |= (intel_crtc->fdi_lanes - 1) << 19; temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; temp |= FDI_COMPOSITE_SYNC; I915_WRITE(reg, temp | FDI_TX_ENABLE); reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_AUTO; temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; temp |= FDI_COMPOSITE_SYNC; I915_WRITE(reg, temp | FDI_RX_ENABLE); POSTING_READ(reg); DELAY(150); for (i = 0; i < 4; i++) { reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; temp |= snb_b_fdi_train_param[i]; I915_WRITE(reg, temp); POSTING_READ(reg); DELAY(500); reg = FDI_RX_IIR(pipe); temp = I915_READ(reg); DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); if (temp & FDI_RX_BIT_LOCK || (I915_READ(reg) & FDI_RX_BIT_LOCK)) { I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); DRM_DEBUG_KMS("FDI train 1 done.\n"); break; } } if (i == 4) DRM_ERROR("FDI train 1 fail!\n"); /* Train 2 */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_NONE_IVB; temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; I915_WRITE(reg, temp); reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; I915_WRITE(reg, temp); POSTING_READ(reg); DELAY(150); for (i = 0; i < 4; i++ ) { reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; temp |= snb_b_fdi_train_param[i]; I915_WRITE(reg, temp); POSTING_READ(reg); DELAY(500); reg = FDI_RX_IIR(pipe); temp = I915_READ(reg); DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); if (temp & FDI_RX_SYMBOL_LOCK) { I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); DRM_DEBUG_KMS("FDI train 2 done.\n"); break; } } if (i == 4) DRM_ERROR("FDI train 2 fail!\n"); DRM_DEBUG_KMS("FDI train done.\n"); } static void ironlake_fdi_pll_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; u32 reg, temp; /* Write the TU size bits so error detection works */ I915_WRITE(FDI_RX_TUSIZE1(pipe), I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); temp &= ~((0x7 << 19) | (0x7 << 16)); temp |= (intel_crtc->fdi_lanes - 1) << 19; temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); POSTING_READ(reg); DELAY(200); /* Switch from Rawclk to PCDclk */ temp = I915_READ(reg); I915_WRITE(reg, temp | FDI_PCDCLK); POSTING_READ(reg); DELAY(200); - /* Enable CPU FDI TX PLL, always on for Ironlake */ - reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); - if ((temp & FDI_TX_PLL_ENABLE) == 0) { - I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); + /* On Haswell, the PLL configuration for ports and pipes is handled + * separately, as part of DDI setup */ + if (!IS_HASWELL(dev)) { + /* Enable CPU FDI TX PLL, always on for Ironlake */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + if ((temp & FDI_TX_PLL_ENABLE) == 0) { + I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); - POSTING_READ(reg); - DELAY(100); - } + POSTING_READ(reg); + DELAY(100); + } + } } static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; u32 flags = I915_READ(SOUTH_CHICKEN1); flags &= ~(FDI_PHASE_SYNC_EN(pipe)); I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */ flags &= ~(FDI_PHASE_SYNC_OVR(pipe)); I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */ POSTING_READ(SOUTH_CHICKEN1); } static void ironlake_fdi_disable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; u32 reg, temp; /* disable CPU FDI tx and PCH FDI rx */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); I915_WRITE(reg, temp & ~FDI_TX_ENABLE); POSTING_READ(reg); reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); temp &= ~(0x7 << 16); temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; I915_WRITE(reg, temp & ~FDI_RX_ENABLE); POSTING_READ(reg); DELAY(100); /* Ironlake workaround, disable clock pointer after downing FDI */ if (HAS_PCH_IBX(dev)) { I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); I915_WRITE(FDI_RX_CHICKEN(pipe), I915_READ(FDI_RX_CHICKEN(pipe) & ~FDI_RX_PHASE_SYNC_POINTER_EN)); } else if (HAS_PCH_CPT(dev)) { cpt_phase_pointer_disable(dev, pipe); } /* still set train pattern 1 */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; I915_WRITE(reg, temp); reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); if (HAS_PCH_CPT(dev)) { temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; } else { temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; } /* BPC in FDI rx is consistent with that in PIPECONF */ temp &= ~(0x07 << 16); temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; I915_WRITE(reg, temp); POSTING_READ(reg); DELAY(100); } -/* - * When we disable a pipe, we need to clear any pending scanline wait events - * to avoid hanging the ring, which we assume we are waiting on. - */ -static void intel_clear_scanline_wait(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; - u32 tmp; - - if (IS_GEN2(dev)) - /* Can't break the hang on i8xx */ - return; - - ring = LP_RING(dev_priv); - tmp = I915_READ_CTL(ring); - if (tmp & RING_WAIT) - I915_WRITE_CTL(ring, tmp); -} - static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) { - struct drm_i915_gem_object *obj; - struct drm_i915_private *dev_priv; - struct drm_device *dev; + struct drm_device *dev = crtc->dev; if (crtc->fb == NULL) return; - obj = to_intel_framebuffer(crtc->fb)->obj; - dev = crtc->dev; - dev_priv = dev->dev_private; - mtx_lock(&dev->event_lock); - while (atomic_load_acq_int(&obj->pending_flip) != 0) - msleep(&obj->pending_flip, &dev->event_lock, 0, "915wfl", 0); - mtx_unlock(&dev->event_lock); + DRM_LOCK(dev); + intel_finish_fb(crtc->fb); + DRM_UNLOCK(dev); } static bool intel_crtc_driving_pch(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *encoder; /* * If there's a non-PCH eDP on this crtc, it must be DP_A, and that * must be driven by its own crtc; no sharing is possible. */ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { if (encoder->base.crtc != crtc) continue; + /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell + * CPU handles all others */ + if (IS_HASWELL(dev)) { + /* It is still unclear how this will work on PPT, so throw up a warning */ + if (!HAS_PCH_LPT(dev)) + DRM_DEBUG_KMS("Haswell: PPT\n"); + + if (encoder->type == DRM_MODE_ENCODER_DAC) { + DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n"); + return true; + } else { + DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n", + encoder->type); + return false; + } + } + switch (encoder->type) { case INTEL_OUTPUT_EDP: if (!intel_encoder_is_pch_edp(&encoder->base)) return false; continue; } } return true; } +/* Program iCLKIP clock to the desired frequency */ +static void lpt_program_iclkip(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 divsel, phaseinc, auxdiv, phasedir = 0; + u32 temp; + + /* It is necessary to ungate the pixclk gate prior to programming + * the divisors, and gate it back when it is done. + */ + I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); + + /* Disable SSCCTL */ + intel_sbi_write(dev_priv, SBI_SSCCTL6, + intel_sbi_read(dev_priv, SBI_SSCCTL6) | + SBI_SSCCTL_DISABLE); + + /* 20MHz is a corner case which is out of range for the 7-bit divisor */ + if (crtc->mode.clock == 20000) { + auxdiv = 1; + divsel = 0x41; + phaseinc = 0x20; + } else { + /* The iCLK virtual clock root frequency is in MHz, + * but the crtc->mode.clock in in KHz. To get the divisors, + * it is necessary to divide one by another, so we + * convert the virtual clock precision to KHz here for higher + * precision. + */ + u32 iclk_virtual_root_freq = 172800 * 1000; + u32 iclk_pi_range = 64; + u32 desired_divisor, msb_divisor_value, pi_value; + + desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock); + msb_divisor_value = desired_divisor / iclk_pi_range; + pi_value = desired_divisor % iclk_pi_range; + + auxdiv = 0; + divsel = msb_divisor_value - 2; + phaseinc = pi_value; + } + + /* This should not happen with any sane values */ + if ((SBI_SSCDIVINTPHASE_DIVSEL(divsel) & + ~SBI_SSCDIVINTPHASE_DIVSEL_MASK)) + DRM_DEBUG_KMS("DIVSEL_MASK"); + if ((SBI_SSCDIVINTPHASE_DIR(phasedir) & + ~SBI_SSCDIVINTPHASE_INCVAL_MASK)) + DRM_DEBUG_KMS("INCVAL_MASK"); + + DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", + crtc->mode.clock, + auxdiv, + divsel, + phasedir, + phaseinc); + + /* Program SSCDIVINTPHASE6 */ + temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6); + temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; + temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); + temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; + temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); + temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); + temp |= SBI_SSCDIVINTPHASE_PROPAGATE; + + intel_sbi_write(dev_priv, + SBI_SSCDIVINTPHASE6, + temp); + + /* Program SSCAUXDIV */ + temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6); + temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); + temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); + intel_sbi_write(dev_priv, + SBI_SSCAUXDIV6, + temp); + + + /* Enable modulator and associated divider */ + temp = intel_sbi_read(dev_priv, SBI_SSCCTL6); + temp &= ~SBI_SSCCTL_DISABLE; + intel_sbi_write(dev_priv, + SBI_SSCCTL6, + temp); + + /* Wait for initialization time */ + DELAY(24); + + I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); +} + /* * Enable PCH resources required for PCH ports: * - PCH PLLs * - FDI training & RX/TX * - update transcoder timings * - DP transcoding bits * - transcoder */ static void ironlake_pch_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - u32 reg, temp, transc_sel; + u32 reg, temp; + assert_transcoder_disabled(dev_priv, pipe); + /* For PCH output, training FDI link */ dev_priv->display.fdi_link_train(crtc); - intel_enable_pch_pll(dev_priv, pipe); + intel_enable_pch_pll(intel_crtc); - if (HAS_PCH_CPT(dev)) { - transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL : - TRANSC_DPLLB_SEL; + if (HAS_PCH_LPT(dev)) { + DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n"); + lpt_program_iclkip(crtc); + } else if (HAS_PCH_CPT(dev)) { + u32 sel; - /* Be sure PCH DPLL SEL is set */ temp = I915_READ(PCH_DPLL_SEL); - if (pipe == 0) { - temp &= ~(TRANSA_DPLLB_SEL); - temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); - } else if (pipe == 1) { - temp &= ~(TRANSB_DPLLB_SEL); - temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); - } else if (pipe == 2) { - temp &= ~(TRANSC_DPLLB_SEL); - temp |= (TRANSC_DPLL_ENABLE | transc_sel); + switch (pipe) { + default: + case 0: + temp |= TRANSA_DPLL_ENABLE; + sel = TRANSA_DPLLB_SEL; + break; + case 1: + temp |= TRANSB_DPLL_ENABLE; + sel = TRANSB_DPLLB_SEL; + break; + case 2: + temp |= TRANSC_DPLL_ENABLE; + sel = TRANSC_DPLLB_SEL; + break; } + if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B) + temp |= sel; + else + temp &= ~sel; I915_WRITE(PCH_DPLL_SEL, temp); } /* set transcoder timing, panel must allow it */ assert_panel_unlocked(dev_priv, pipe); I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe))); I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe))); I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe))); I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe))); I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe))); - intel_fdi_normal_train(crtc); + if (!IS_HASWELL(dev)) + intel_fdi_normal_train(crtc); /* For PCH DP, enable TRANS_DP_CTL */ if (HAS_PCH_CPT(dev) && (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5; reg = TRANS_DP_CTL(pipe); temp = I915_READ(reg); temp &= ~(TRANS_DP_PORT_SEL_MASK | TRANS_DP_SYNC_MASK | TRANS_DP_BPC_MASK); temp |= (TRANS_DP_OUTPUT_ENABLE | TRANS_DP_ENH_FRAMING); temp |= bpc << 9; /* same format but at 11:9 */ if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; switch (intel_trans_dp_port_sel(crtc)) { case PCH_DP_B: temp |= TRANS_DP_PORT_SEL_B; break; case PCH_DP_C: temp |= TRANS_DP_PORT_SEL_C; break; case PCH_DP_D: temp |= TRANS_DP_PORT_SEL_D; break; default: DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); temp |= TRANS_DP_PORT_SEL_B; break; } I915_WRITE(reg, temp); } intel_enable_transcoder(dev_priv, pipe); } +static void intel_put_pch_pll(struct intel_crtc *intel_crtc) +{ + struct intel_pch_pll *pll = intel_crtc->pch_pll; + + if (pll == NULL) + return; + + if (pll->refcount == 0) { + printf("bad PCH PLL refcount\n"); + return; + } + + --pll->refcount; + intel_crtc->pch_pll = NULL; +} + +static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp) +{ + struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; + struct intel_pch_pll *pll; + int i; + + pll = intel_crtc->pch_pll; + if (pll) { + DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n", + intel_crtc->base.base.id, pll->pll_reg); + goto prepare; + } + + if (HAS_PCH_IBX(dev_priv->dev)) { + /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ + i = intel_crtc->pipe; + pll = &dev_priv->pch_plls[i]; + + DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n", + intel_crtc->base.base.id, pll->pll_reg); + + goto found; + } + + for (i = 0; i < dev_priv->num_pch_pll; i++) { + pll = &dev_priv->pch_plls[i]; + + /* Only want to check enabled timings first */ + if (pll->refcount == 0) + continue; + + if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) && + fp == I915_READ(pll->fp0_reg)) { + DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n", + intel_crtc->base.base.id, + pll->pll_reg, pll->refcount, pll->active); + + goto found; + } + } + + /* Ok no matching timings, maybe there's a free one? */ + for (i = 0; i < dev_priv->num_pch_pll; i++) { /* XXXKIB: HACK */ + pll = &dev_priv->pch_plls[i]; + if (pll->refcount == 0) { + DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n", + intel_crtc->base.base.id, pll->pll_reg); + goto found; + } + } + + return NULL; + +found: + intel_crtc->pch_pll = pll; + pll->refcount++; + DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe); +prepare: /* separate function? */ + DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg); + + /* Wait for the clocks to stabilize before rewriting the regs */ + I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE); + POSTING_READ(pll->pll_reg); + DELAY(150); + + I915_WRITE(pll->fp0_reg, fp); + I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE); + pll->on = false; + return pll; +} + void intel_cpt_verify_modeset(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe); u32 temp; temp = I915_READ(dslreg); DELAY(500); if (_intel_wait_for(dev, I915_READ(dslreg) != temp, 5, 1, "915cp1")) { /* Without this, mode sets may fail silently on FDI */ I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS); DELAY(250); I915_WRITE(tc2reg, 0); if (_intel_wait_for(dev, I915_READ(dslreg) != temp, 5, 1, "915cp2")) DRM_ERROR("mode set failed: pipe %d stuck\n", pipe); } } static void ironlake_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; u32 temp; bool is_pch_port; if (intel_crtc->active) return; intel_crtc->active = true; intel_update_watermarks(dev); if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { temp = I915_READ(PCH_LVDS); if ((temp & LVDS_PORT_EN) == 0) I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); } is_pch_port = intel_crtc_driving_pch(crtc); if (is_pch_port) ironlake_fdi_pll_enable(crtc); else ironlake_fdi_disable(crtc); /* Enable panel fitting for LVDS */ if (dev_priv->pch_pf_size && (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { /* Force use of hard-coded filter coefficients * as some pre-programmed values are broken, * e.g. x201. */ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); } intel_enable_pipe(dev_priv, pipe, is_pch_port); intel_enable_plane(dev_priv, plane, pipe); if (is_pch_port) ironlake_pch_enable(crtc); intel_crtc_load_lut(crtc); DRM_LOCK(dev); intel_update_fbc(dev); DRM_UNLOCK(dev); intel_crtc_update_cursor(crtc, true); } static void ironlake_crtc_disable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; u32 reg, temp; if (!intel_crtc->active) return; intel_crtc_wait_for_pending_flips(crtc); drm_vblank_off(dev, pipe); intel_crtc_update_cursor(crtc, false); intel_disable_plane(dev_priv, plane, pipe); if (dev_priv->cfb_plane == plane) intel_disable_fbc(dev); intel_disable_pipe(dev_priv, pipe); /* Disable PF */ I915_WRITE(PF_CTL(pipe), 0); I915_WRITE(PF_WIN_SZ(pipe), 0); ironlake_fdi_disable(crtc); /* This is a horrible layering violation; we should be doing this in * the connector/encoder ->prepare instead, but we don't always have * enough information there about the config to know whether it will * actually be necessary or just cause undesired flicker. */ intel_disable_pch_ports(dev_priv, pipe); intel_disable_transcoder(dev_priv, pipe); if (HAS_PCH_CPT(dev)) { /* disable TRANS_DP_CTL */ reg = TRANS_DP_CTL(pipe); temp = I915_READ(reg); temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); temp |= TRANS_DP_PORT_SEL_NONE; I915_WRITE(reg, temp); /* disable DPLL_SEL */ temp = I915_READ(PCH_DPLL_SEL); switch (pipe) { case 0: temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); break; case 1: temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); break; case 2: /* C shares PLL A or B */ temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL); break; default: KASSERT(1, ("Wrong pipe %d", pipe)); /* wtf */ } I915_WRITE(PCH_DPLL_SEL, temp); } /* disable PCH DPLL */ - if (!intel_crtc->no_pll) - intel_disable_pch_pll(dev_priv, pipe); + intel_disable_pch_pll(intel_crtc); /* Switch from PCDclk to Rawclk */ reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); I915_WRITE(reg, temp & ~FDI_PCDCLK); /* Disable CPU FDI TX PLL */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); POSTING_READ(reg); DELAY(100); reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); /* Wait for the clocks to turn off. */ POSTING_READ(reg); DELAY(100); intel_crtc->active = false; intel_update_watermarks(dev); DRM_LOCK(dev); intel_update_fbc(dev); - intel_clear_scanline_wait(dev); DRM_UNLOCK(dev); } static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; /* XXX: When our outputs are all unaware of DPMS modes other than off * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. */ switch (mode) { case DRM_MODE_DPMS_ON: case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane); ironlake_crtc_enable(crtc); break; case DRM_MODE_DPMS_OFF: DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane); ironlake_crtc_disable(crtc); break; } } +static void ironlake_crtc_off(struct drm_crtc *crtc) +{ + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + intel_put_pch_pll(intel_crtc); +} + static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) { if (!enable && intel_crtc->overlay) { struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; DRM_LOCK(dev); dev_priv->mm.interruptible = false; (void) intel_overlay_switch_off(intel_crtc->overlay); dev_priv->mm.interruptible = true; DRM_UNLOCK(dev); } /* Let userspace switch the overlay on again. In most cases userspace * has to recompute where to put it anyway. */ } static void i9xx_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; if (intel_crtc->active) return; intel_crtc->active = true; intel_update_watermarks(dev); intel_enable_pll(dev_priv, pipe); intel_enable_pipe(dev_priv, pipe, false); intel_enable_plane(dev_priv, plane, pipe); intel_crtc_load_lut(crtc); intel_update_fbc(dev); /* Give the overlay scaler a chance to enable if it's on this pipe */ intel_crtc_dpms_overlay(intel_crtc, true); intel_crtc_update_cursor(crtc, true); } static void i9xx_crtc_disable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; if (!intel_crtc->active) return; /* Give the overlay scaler a chance to disable if it's on this pipe */ intel_crtc_wait_for_pending_flips(crtc); drm_vblank_off(dev, pipe); intel_crtc_dpms_overlay(intel_crtc, false); intel_crtc_update_cursor(crtc, false); if (dev_priv->cfb_plane == plane) intel_disable_fbc(dev); intel_disable_plane(dev_priv, plane, pipe); intel_disable_pipe(dev_priv, pipe); intel_disable_pll(dev_priv, pipe); intel_crtc->active = false; intel_update_fbc(dev); intel_update_watermarks(dev); - intel_clear_scanline_wait(dev); } static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) { /* XXX: When our outputs are all unaware of DPMS modes other than off * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. */ switch (mode) { case DRM_MODE_DPMS_ON: case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: i9xx_crtc_enable(crtc); break; case DRM_MODE_DPMS_OFF: i9xx_crtc_disable(crtc); break; } } +static void i9xx_crtc_off(struct drm_crtc *crtc) +{ +} + /** * Sets the power management mode of the pipe and plane. */ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; #if 0 struct drm_i915_master_private *master_priv; #endif struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; bool enabled; if (intel_crtc->dpms_mode == mode) return; intel_crtc->dpms_mode = mode; dev_priv->display.dpms(crtc, mode); #if 0 if (!dev->primary->master) return; master_priv = dev->primary->master->driver_priv; if (!master_priv->sarea_priv) return; #else if (!dev_priv->sarea_priv) return; #endif enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF; switch (pipe) { case 0: #if 0 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0; #else dev_priv->sarea_priv->planeA_w = enabled ? crtc->mode.hdisplay : 0; dev_priv->sarea_priv->planeA_h = enabled ? crtc->mode.vdisplay : 0; #endif break; case 1: #if 0 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0; master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; #else dev_priv->sarea_priv->planeB_w = enabled ? crtc->mode.hdisplay : 0; dev_priv->sarea_priv->planeB_h = enabled ? crtc->mode.vdisplay : 0; #endif break; default: DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe)); break; } } static void intel_crtc_disable(struct drm_crtc *crtc) { struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; - /* Flush any pending WAITs before we disable the pipe. Note that - * we need to drop the struct_mutex in order to acquire it again - * during the lowlevel dpms routines around a couple of the - * operations. It does not look trivial nor desirable to move - * that locking higher. So instead we leave a window for the - * submission of further commands on the fb before we can actually - * disable it. This race with userspace exists anyway, and we can - * only rely on the pipe being disabled by userspace after it - * receives the hotplug notification and has flushed any pending - * batches. - */ - if (crtc->fb) { - DRM_LOCK(dev); - intel_finish_fb(crtc->fb); - DRM_UNLOCK(dev); - } - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); - assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); + dev_priv->display.off(crtc); + + assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe); if (crtc->fb) { DRM_LOCK(dev); intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); DRM_UNLOCK(dev); } } /* Prepare for a mode set. * * Note we could be a lot smarter here. We need to figure out which outputs * will be enabled, which disabled (in short, how the config will changes) * and perform the minimum necessary steps to accomplish that, e.g. updating * watermarks, FBC configuration, making sure PLLs are programmed correctly, * panel fitting is in the proper state, etc. */ static void i9xx_crtc_prepare(struct drm_crtc *crtc) { i9xx_crtc_disable(crtc); } static void i9xx_crtc_commit(struct drm_crtc *crtc) { i9xx_crtc_enable(crtc); } static void ironlake_crtc_prepare(struct drm_crtc *crtc) { ironlake_crtc_disable(crtc); } static void ironlake_crtc_commit(struct drm_crtc *crtc) { ironlake_crtc_enable(crtc); } void intel_encoder_prepare(struct drm_encoder *encoder) { struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; /* lvds has its own version of prepare see intel_lvds_prepare */ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); } void intel_encoder_commit(struct drm_encoder *encoder) { struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; struct drm_device *dev = encoder->dev; - struct intel_encoder *intel_encoder = to_intel_encoder(encoder); - struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); /* lvds has its own version of commit see intel_lvds_commit */ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); if (HAS_PCH_CPT(dev)) intel_cpt_verify_modeset(dev, intel_crtc->pipe); } void intel_encoder_destroy(struct drm_encoder *encoder) { struct intel_encoder *intel_encoder = to_intel_encoder(encoder); drm_encoder_cleanup(encoder); free(intel_encoder, DRM_MEM_KMS); } static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = crtc->dev; if (HAS_PCH_SPLIT(dev)) { /* FDI link clock is fixed at 2.7G */ if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4) return false; } /* All interlaced capable intel hw wants timings in frames. Note though * that intel_lvds_mode_fixup does some funny tricks with the crtc * timings, so we need to be careful not to clobber these.*/ if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET)) drm_mode_set_crtcinfo(adjusted_mode, 0); return true; } +static int valleyview_get_display_clock_speed(struct drm_device *dev) +{ + return 400000; /* FIXME */ +} + static int i945_get_display_clock_speed(struct drm_device *dev) { return 400000; } static int i915_get_display_clock_speed(struct drm_device *dev) { return 333000; } static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) { return 200000; } static int i915gm_get_display_clock_speed(struct drm_device *dev) { u16 gcfgc = 0; gcfgc = pci_read_config(dev->device, GCFGC, 2); if (gcfgc & GC_LOW_FREQUENCY_ENABLE) return 133000; else { switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { case GC_DISPLAY_CLOCK_333_MHZ: return 333000; default: case GC_DISPLAY_CLOCK_190_200_MHZ: return 190000; } } } static int i865_get_display_clock_speed(struct drm_device *dev) { return 266000; } static int i855_get_display_clock_speed(struct drm_device *dev) { u16 hpllcc = 0; /* Assume that the hardware is in the high speed state. This * should be the default. */ switch (hpllcc & GC_CLOCK_CONTROL_MASK) { case GC_CLOCK_133_200: case GC_CLOCK_100_200: return 200000; case GC_CLOCK_166_250: return 250000; case GC_CLOCK_100_133: return 133000; } /* Shouldn't happen */ return 0; } static int i830_get_display_clock_speed(struct drm_device *dev) { return 133000; } struct fdi_m_n { u32 tu; u32 gmch_m; u32 gmch_n; u32 link_m; u32 link_n; }; static void fdi_reduce_ratio(u32 *num, u32 *den) { while (*num > 0xffffff || *den > 0xffffff) { *num >>= 1; *den >>= 1; } } static void ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, int link_clock, struct fdi_m_n *m_n) { m_n->tu = 64; /* default size */ /* BUG_ON(pixel_clock > INT_MAX / 36); */ m_n->gmch_m = bits_per_pixel * pixel_clock; m_n->gmch_n = link_clock * nlanes * 8; fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); m_n->link_m = pixel_clock; m_n->link_n = link_clock; fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); } - -struct intel_watermark_params { - unsigned long fifo_size; - unsigned long max_wm; - unsigned long default_wm; - unsigned long guard_size; - unsigned long cacheline_size; -}; - -/* Pineview has different values for various configs */ -static const struct intel_watermark_params pineview_display_wm = { - PINEVIEW_DISPLAY_FIFO, - PINEVIEW_MAX_WM, - PINEVIEW_DFT_WM, - PINEVIEW_GUARD_WM, - PINEVIEW_FIFO_LINE_SIZE -}; -static const struct intel_watermark_params pineview_display_hplloff_wm = { - PINEVIEW_DISPLAY_FIFO, - PINEVIEW_MAX_WM, - PINEVIEW_DFT_HPLLOFF_WM, - PINEVIEW_GUARD_WM, - PINEVIEW_FIFO_LINE_SIZE -}; -static const struct intel_watermark_params pineview_cursor_wm = { - PINEVIEW_CURSOR_FIFO, - PINEVIEW_CURSOR_MAX_WM, - PINEVIEW_CURSOR_DFT_WM, - PINEVIEW_CURSOR_GUARD_WM, - PINEVIEW_FIFO_LINE_SIZE, -}; -static const struct intel_watermark_params pineview_cursor_hplloff_wm = { - PINEVIEW_CURSOR_FIFO, - PINEVIEW_CURSOR_MAX_WM, - PINEVIEW_CURSOR_DFT_WM, - PINEVIEW_CURSOR_GUARD_WM, - PINEVIEW_FIFO_LINE_SIZE -}; -static const struct intel_watermark_params g4x_wm_info = { - G4X_FIFO_SIZE, - G4X_MAX_WM, - G4X_MAX_WM, - 2, - G4X_FIFO_LINE_SIZE, -}; -static const struct intel_watermark_params g4x_cursor_wm_info = { - I965_CURSOR_FIFO, - I965_CURSOR_MAX_WM, - I965_CURSOR_DFT_WM, - 2, - G4X_FIFO_LINE_SIZE, -}; -static const struct intel_watermark_params i965_cursor_wm_info = { - I965_CURSOR_FIFO, - I965_CURSOR_MAX_WM, - I965_CURSOR_DFT_WM, - 2, - I915_FIFO_LINE_SIZE, -}; -static const struct intel_watermark_params i945_wm_info = { - I945_FIFO_SIZE, - I915_MAX_WM, - 1, - 2, - I915_FIFO_LINE_SIZE -}; -static const struct intel_watermark_params i915_wm_info = { - I915_FIFO_SIZE, - I915_MAX_WM, - 1, - 2, - I915_FIFO_LINE_SIZE -}; -static const struct intel_watermark_params i855_wm_info = { - I855GM_FIFO_SIZE, - I915_MAX_WM, - 1, - 2, - I830_FIFO_LINE_SIZE -}; -static const struct intel_watermark_params i830_wm_info = { - I830_FIFO_SIZE, - I915_MAX_WM, - 1, - 2, - I830_FIFO_LINE_SIZE -}; - -static const struct intel_watermark_params ironlake_display_wm_info = { - ILK_DISPLAY_FIFO, - ILK_DISPLAY_MAXWM, - ILK_DISPLAY_DFTWM, - 2, - ILK_FIFO_LINE_SIZE -}; -static const struct intel_watermark_params ironlake_cursor_wm_info = { - ILK_CURSOR_FIFO, - ILK_CURSOR_MAXWM, - ILK_CURSOR_DFTWM, - 2, - ILK_FIFO_LINE_SIZE -}; -static const struct intel_watermark_params ironlake_display_srwm_info = { - ILK_DISPLAY_SR_FIFO, - ILK_DISPLAY_MAX_SRWM, - ILK_DISPLAY_DFT_SRWM, - 2, - ILK_FIFO_LINE_SIZE -}; -static const struct intel_watermark_params ironlake_cursor_srwm_info = { - ILK_CURSOR_SR_FIFO, - ILK_CURSOR_MAX_SRWM, - ILK_CURSOR_DFT_SRWM, - 2, - ILK_FIFO_LINE_SIZE -}; - -static const struct intel_watermark_params sandybridge_display_wm_info = { - SNB_DISPLAY_FIFO, - SNB_DISPLAY_MAXWM, - SNB_DISPLAY_DFTWM, - 2, - SNB_FIFO_LINE_SIZE -}; -static const struct intel_watermark_params sandybridge_cursor_wm_info = { - SNB_CURSOR_FIFO, - SNB_CURSOR_MAXWM, - SNB_CURSOR_DFTWM, - 2, - SNB_FIFO_LINE_SIZE -}; -static const struct intel_watermark_params sandybridge_display_srwm_info = { - SNB_DISPLAY_SR_FIFO, - SNB_DISPLAY_MAX_SRWM, - SNB_DISPLAY_DFT_SRWM, - 2, - SNB_FIFO_LINE_SIZE -}; -static const struct intel_watermark_params sandybridge_cursor_srwm_info = { - SNB_CURSOR_SR_FIFO, - SNB_CURSOR_MAX_SRWM, - SNB_CURSOR_DFT_SRWM, - 2, - SNB_FIFO_LINE_SIZE -}; - - -/** - * intel_calculate_wm - calculate watermark level - * @clock_in_khz: pixel clock - * @wm: chip FIFO params - * @pixel_size: display pixel size - * @latency_ns: memory latency for the platform - * - * Calculate the watermark level (the level at which the display plane will - * start fetching from memory again). Each chip has a different display - * FIFO size and allocation, so the caller needs to figure that out and pass - * in the correct intel_watermark_params structure. - * - * As the pixel clock runs, the FIFO will be drained at a rate that depends - * on the pixel size. When it reaches the watermark level, it'll start - * fetching FIFO line sized based chunks from memory until the FIFO fills - * past the watermark point. If the FIFO drains completely, a FIFO underrun - * will occur, and a display engine hang could result. - */ -static unsigned long intel_calculate_wm(unsigned long clock_in_khz, - const struct intel_watermark_params *wm, - int fifo_size, - int pixel_size, - unsigned long latency_ns) -{ - long entries_required, wm_size; - - /* - * Note: we need to make sure we don't overflow for various clock & - * latency values. - * clocks go from a few thousand to several hundred thousand. - * latency is usually a few thousand - */ - entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / - 1000; - entries_required = howmany(entries_required, wm->cacheline_size); - - DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required); - - wm_size = fifo_size - (entries_required + wm->guard_size); - - DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size); - - /* Don't promote wm_size to unsigned... */ - if (wm_size > (long)wm->max_wm) - wm_size = wm->max_wm; - if (wm_size <= 0) - wm_size = wm->default_wm; - return wm_size; -} - -struct cxsr_latency { - int is_desktop; - int is_ddr3; - unsigned long fsb_freq; - unsigned long mem_freq; - unsigned long display_sr; - unsigned long display_hpll_disable; - unsigned long cursor_sr; - unsigned long cursor_hpll_disable; -}; - -static const struct cxsr_latency cxsr_latency_table[] = { - {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ - {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ - {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ - {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ - {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ - - {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ - {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ - {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ - {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ - {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ - - {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ - {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ - {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ - {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ - {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ - - {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ - {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ - {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ - {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ - {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ - - {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ - {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ - {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ - {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ - {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ - - {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ - {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ - {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ - {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ - {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ -}; - -static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, - int is_ddr3, - int fsb, - int mem) -{ - const struct cxsr_latency *latency; - int i; - - if (fsb == 0 || mem == 0) - return NULL; - - for (i = 0; i < DRM_ARRAY_SIZE(cxsr_latency_table); i++) { - latency = &cxsr_latency_table[i]; - if (is_desktop == latency->is_desktop && - is_ddr3 == latency->is_ddr3 && - fsb == latency->fsb_freq && mem == latency->mem_freq) - return latency; - } - - DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); - - return NULL; -} - -static void pineview_disable_cxsr(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - /* deactivate cxsr */ - I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN); -} - -/* - * Latency for FIFO fetches is dependent on several factors: - * - memory configuration (speed, channels) - * - chipset - * - current MCH state - * It can be fairly high in some situations, so here we assume a fairly - * pessimal value. It's a tradeoff between extra memory fetches (if we - * set this value too high, the FIFO will fetch frequently to stay full) - * and power consumption (set it too low to save power and we might see - * FIFO underruns and display "flicker"). - * - * A value of 5us seems to be a good balance; safe for very low end - * platforms but not overly aggressive on lower latency configs. - */ -static const int latency_ns = 5000; - -static int i9xx_get_fifo_size(struct drm_device *dev, int plane) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t dsparb = I915_READ(DSPARB); - int size; - - size = dsparb & 0x7f; - if (plane) - size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; - - DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, - plane ? "B" : "A", size); - - return size; -} - -static int i85x_get_fifo_size(struct drm_device *dev, int plane) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t dsparb = I915_READ(DSPARB); - int size; - - size = dsparb & 0x1ff; - if (plane) - size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; - size >>= 1; /* Convert to cachelines */ - - DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, - plane ? "B" : "A", size); - - return size; -} - -static int i845_get_fifo_size(struct drm_device *dev, int plane) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t dsparb = I915_READ(DSPARB); - int size; - - size = dsparb & 0x7f; - size >>= 2; /* Convert to cachelines */ - - DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, - plane ? "B" : "A", - size); - - return size; -} - -static int i830_get_fifo_size(struct drm_device *dev, int plane) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t dsparb = I915_READ(DSPARB); - int size; - - size = dsparb & 0x7f; - size >>= 1; /* Convert to cachelines */ - - DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, - plane ? "B" : "A", size); - - return size; -} - -static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) -{ - struct drm_crtc *crtc, *enabled = NULL; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - if (crtc->enabled && crtc->fb) { - if (enabled) - return NULL; - enabled = crtc; - } - } - - return enabled; -} - -static void pineview_update_wm(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc; - const struct cxsr_latency *latency; - u32 reg; - unsigned long wm; - - latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, - dev_priv->fsb_freq, dev_priv->mem_freq); - if (!latency) { - DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); - pineview_disable_cxsr(dev); - return; - } - - crtc = single_enabled_crtc(dev); - if (crtc) { - int clock = crtc->mode.clock; - int pixel_size = crtc->fb->bits_per_pixel / 8; - - /* Display SR */ - wm = intel_calculate_wm(clock, &pineview_display_wm, - pineview_display_wm.fifo_size, - pixel_size, latency->display_sr); - reg = I915_READ(DSPFW1); - reg &= ~DSPFW_SR_MASK; - reg |= wm << DSPFW_SR_SHIFT; - I915_WRITE(DSPFW1, reg); - DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); - - /* cursor SR */ - wm = intel_calculate_wm(clock, &pineview_cursor_wm, - pineview_display_wm.fifo_size, - pixel_size, latency->cursor_sr); - reg = I915_READ(DSPFW3); - reg &= ~DSPFW_CURSOR_SR_MASK; - reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT; - I915_WRITE(DSPFW3, reg); - - /* Display HPLL off SR */ - wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, - pineview_display_hplloff_wm.fifo_size, - pixel_size, latency->display_hpll_disable); - reg = I915_READ(DSPFW3); - reg &= ~DSPFW_HPLL_SR_MASK; - reg |= wm & DSPFW_HPLL_SR_MASK; - I915_WRITE(DSPFW3, reg); - - /* cursor HPLL off SR */ - wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, - pineview_display_hplloff_wm.fifo_size, - pixel_size, latency->cursor_hpll_disable); - reg = I915_READ(DSPFW3); - reg &= ~DSPFW_HPLL_CURSOR_MASK; - reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT; - I915_WRITE(DSPFW3, reg); - DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); - - /* activate cxsr */ - I915_WRITE(DSPFW3, - I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN); - DRM_DEBUG_KMS("Self-refresh is enabled\n"); - } else { - pineview_disable_cxsr(dev); - DRM_DEBUG_KMS("Self-refresh is disabled\n"); - } -} - -static bool g4x_compute_wm0(struct drm_device *dev, - int plane, - const struct intel_watermark_params *display, - int display_latency_ns, - const struct intel_watermark_params *cursor, - int cursor_latency_ns, - int *plane_wm, - int *cursor_wm) -{ - struct drm_crtc *crtc; - int htotal, hdisplay, clock, pixel_size; - int line_time_us, line_count; - int entries, tlb_miss; - - crtc = intel_get_crtc_for_plane(dev, plane); - if (crtc->fb == NULL || !crtc->enabled) { - *cursor_wm = cursor->guard_size; - *plane_wm = display->guard_size; - return false; - } - - htotal = crtc->mode.htotal; - hdisplay = crtc->mode.hdisplay; - clock = crtc->mode.clock; - pixel_size = crtc->fb->bits_per_pixel / 8; - - /* Use the small buffer method to calculate plane watermark */ - entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; - tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; - if (tlb_miss > 0) - entries += tlb_miss; - entries = howmany(entries, display->cacheline_size); - *plane_wm = entries + display->guard_size; - if (*plane_wm > (int)display->max_wm) - *plane_wm = display->max_wm; - - /* Use the large buffer method to calculate cursor watermark */ - line_time_us = ((htotal * 1000) / clock); - line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; - entries = line_count * 64 * pixel_size; - tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; - if (tlb_miss > 0) - entries += tlb_miss; - entries = howmany(entries, cursor->cacheline_size); - *cursor_wm = entries + cursor->guard_size; - if (*cursor_wm > (int)cursor->max_wm) - *cursor_wm = (int)cursor->max_wm; - - return true; -} - -/* - * Check the wm result. - * - * If any calculated watermark values is larger than the maximum value that - * can be programmed into the associated watermark register, that watermark - * must be disabled. - */ -static bool g4x_check_srwm(struct drm_device *dev, - int display_wm, int cursor_wm, - const struct intel_watermark_params *display, - const struct intel_watermark_params *cursor) -{ - DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n", - display_wm, cursor_wm); - - if (display_wm > display->max_wm) { - DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n", - display_wm, display->max_wm); - return false; - } - - if (cursor_wm > cursor->max_wm) { - DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n", - cursor_wm, cursor->max_wm); - return false; - } - - if (!(display_wm || cursor_wm)) { - DRM_DEBUG_KMS("SR latency is 0, disabling\n"); - return false; - } - - return true; -} - -static bool g4x_compute_srwm(struct drm_device *dev, - int plane, - int latency_ns, - const struct intel_watermark_params *display, - const struct intel_watermark_params *cursor, - int *display_wm, int *cursor_wm) -{ - struct drm_crtc *crtc; - int hdisplay, htotal, pixel_size, clock; - unsigned long line_time_us; - int line_count, line_size; - int small, large; - int entries; - - if (!latency_ns) { - *display_wm = *cursor_wm = 0; - return false; - } - - crtc = intel_get_crtc_for_plane(dev, plane); - hdisplay = crtc->mode.hdisplay; - htotal = crtc->mode.htotal; - clock = crtc->mode.clock; - pixel_size = crtc->fb->bits_per_pixel / 8; - - line_time_us = (htotal * 1000) / clock; - line_count = (latency_ns / line_time_us + 1000) / 1000; - line_size = hdisplay * pixel_size; - - /* Use the minimum of the small and large buffer method for primary */ - small = ((clock * pixel_size / 1000) * latency_ns) / 1000; - large = line_count * line_size; - - entries = howmany(min(small, large), display->cacheline_size); - *display_wm = entries + display->guard_size; - - /* calculate the self-refresh watermark for display cursor */ - entries = line_count * pixel_size * 64; - entries = howmany(entries, cursor->cacheline_size); - *cursor_wm = entries + cursor->guard_size; - - return g4x_check_srwm(dev, - *display_wm, *cursor_wm, - display, cursor); -} - -#define single_plane_enabled(mask) ((mask) != 0 && powerof2(mask)) - -static void g4x_update_wm(struct drm_device *dev) -{ - static const int sr_latency_ns = 12000; - struct drm_i915_private *dev_priv = dev->dev_private; - int planea_wm, planeb_wm, cursora_wm, cursorb_wm; - int plane_sr, cursor_sr; - unsigned int enabled = 0; - - if (g4x_compute_wm0(dev, 0, - &g4x_wm_info, latency_ns, - &g4x_cursor_wm_info, latency_ns, - &planea_wm, &cursora_wm)) - enabled |= 1; - - if (g4x_compute_wm0(dev, 1, - &g4x_wm_info, latency_ns, - &g4x_cursor_wm_info, latency_ns, - &planeb_wm, &cursorb_wm)) - enabled |= 2; - - plane_sr = cursor_sr = 0; - if (single_plane_enabled(enabled) && - g4x_compute_srwm(dev, ffs(enabled) - 1, - sr_latency_ns, - &g4x_wm_info, - &g4x_cursor_wm_info, - &plane_sr, &cursor_sr)) - I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); - else - I915_WRITE(FW_BLC_SELF, - I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); - - DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", - planea_wm, cursora_wm, - planeb_wm, cursorb_wm, - plane_sr, cursor_sr); - - I915_WRITE(DSPFW1, - (plane_sr << DSPFW_SR_SHIFT) | - (cursorb_wm << DSPFW_CURSORB_SHIFT) | - (planeb_wm << DSPFW_PLANEB_SHIFT) | - planea_wm); - I915_WRITE(DSPFW2, - (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | - (cursora_wm << DSPFW_CURSORA_SHIFT)); - /* HPLL off in SR has some issues on G4x... disable it */ - I915_WRITE(DSPFW3, - (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) | - (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); -} - -static void i965_update_wm(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc; - int srwm = 1; - int cursor_sr = 16; - - /* Calc sr entries for one plane configs */ - crtc = single_enabled_crtc(dev); - if (crtc) { - /* self-refresh has much higher latency */ - static const int sr_latency_ns = 12000; - int clock = crtc->mode.clock; - int htotal = crtc->mode.htotal; - int hdisplay = crtc->mode.hdisplay; - int pixel_size = crtc->fb->bits_per_pixel / 8; - unsigned long line_time_us; - int entries; - - line_time_us = ((htotal * 1000) / clock); - - /* Use ns/us then divide to preserve precision */ - entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * - pixel_size * hdisplay; - entries = howmany(entries, I915_FIFO_LINE_SIZE); - srwm = I965_FIFO_SIZE - entries; - if (srwm < 0) - srwm = 1; - srwm &= 0x1ff; - DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n", - entries, srwm); - - entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * - pixel_size * 64; - entries = howmany(entries, i965_cursor_wm_info.cacheline_size); - cursor_sr = i965_cursor_wm_info.fifo_size - - (entries + i965_cursor_wm_info.guard_size); - - if (cursor_sr > i965_cursor_wm_info.max_wm) - cursor_sr = i965_cursor_wm_info.max_wm; - - DRM_DEBUG_KMS("self-refresh watermark: display plane %d " - "cursor %d\n", srwm, cursor_sr); - - if (IS_CRESTLINE(dev)) - I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); - } else { - /* Turn off self refresh if both pipes are enabled */ - if (IS_CRESTLINE(dev)) - I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) - & ~FW_BLC_SELF_EN); - } - - DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", - srwm); - - /* 965 has limitations... */ - I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | - (8 << 16) | (8 << 8) | (8 << 0)); - I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); - /* update cursor SR watermark */ - I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); -} - -static void i9xx_update_wm(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - const struct intel_watermark_params *wm_info; - uint32_t fwater_lo; - uint32_t fwater_hi; - int cwm, srwm = 1; - int fifo_size; - int planea_wm, planeb_wm; - struct drm_crtc *crtc, *enabled = NULL; - - if (IS_I945GM(dev)) - wm_info = &i945_wm_info; - else if (!IS_GEN2(dev)) - wm_info = &i915_wm_info; - else - wm_info = &i855_wm_info; - - fifo_size = dev_priv->display.get_fifo_size(dev, 0); - crtc = intel_get_crtc_for_plane(dev, 0); - if (crtc->enabled && crtc->fb) { - planea_wm = intel_calculate_wm(crtc->mode.clock, - wm_info, fifo_size, - crtc->fb->bits_per_pixel / 8, - latency_ns); - enabled = crtc; - } else - planea_wm = fifo_size - wm_info->guard_size; - - fifo_size = dev_priv->display.get_fifo_size(dev, 1); - crtc = intel_get_crtc_for_plane(dev, 1); - if (crtc->enabled && crtc->fb) { - planeb_wm = intel_calculate_wm(crtc->mode.clock, - wm_info, fifo_size, - crtc->fb->bits_per_pixel / 8, - latency_ns); - if (enabled == NULL) - enabled = crtc; - else - enabled = NULL; - } else - planeb_wm = fifo_size - wm_info->guard_size; - - DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); - - /* - * Overlay gets an aggressive default since video jitter is bad. - */ - cwm = 2; - - /* Play safe and disable self-refresh before adjusting watermarks. */ - if (IS_I945G(dev) || IS_I945GM(dev)) - I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0); - else if (IS_I915GM(dev)) - I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); - - /* Calc sr entries for one plane configs */ - if (HAS_FW_BLC(dev) && enabled) { - /* self-refresh has much higher latency */ - static const int sr_latency_ns = 6000; - int clock = enabled->mode.clock; - int htotal = enabled->mode.htotal; - int hdisplay = enabled->mode.hdisplay; - int pixel_size = enabled->fb->bits_per_pixel / 8; - unsigned long line_time_us; - int entries; - - line_time_us = (htotal * 1000) / clock; - - /* Use ns/us then divide to preserve precision */ - entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * - pixel_size * hdisplay; - entries = howmany(entries, wm_info->cacheline_size); - DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); - srwm = wm_info->fifo_size - entries; - if (srwm < 0) - srwm = 1; - - if (IS_I945G(dev) || IS_I945GM(dev)) - I915_WRITE(FW_BLC_SELF, - FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); - else if (IS_I915GM(dev)) - I915_WRITE(FW_BLC_SELF, srwm & 0x3f); - } - - DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", - planea_wm, planeb_wm, cwm, srwm); - - fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); - fwater_hi = (cwm & 0x1f); - - /* Set request length to 8 cachelines per fetch */ - fwater_lo = fwater_lo | (1 << 24) | (1 << 8); - fwater_hi = fwater_hi | (1 << 8); - - I915_WRITE(FW_BLC, fwater_lo); - I915_WRITE(FW_BLC2, fwater_hi); - - if (HAS_FW_BLC(dev)) { - if (enabled) { - if (IS_I945G(dev) || IS_I945GM(dev)) - I915_WRITE(FW_BLC_SELF, - FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); - else if (IS_I915GM(dev)) - I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); - DRM_DEBUG_KMS("memory self refresh enabled\n"); - } else - DRM_DEBUG_KMS("memory self refresh disabled\n"); - } -} - -static void i830_update_wm(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc; - uint32_t fwater_lo; - int planea_wm; - - crtc = single_enabled_crtc(dev); - if (crtc == NULL) - return; - - planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, - dev_priv->display.get_fifo_size(dev, 0), - crtc->fb->bits_per_pixel / 8, - latency_ns); - fwater_lo = I915_READ(FW_BLC) & ~0xfff; - fwater_lo |= (3<<8) | planea_wm; - - DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); - - I915_WRITE(FW_BLC, fwater_lo); -} - -#define ILK_LP0_PLANE_LATENCY 700 -#define ILK_LP0_CURSOR_LATENCY 1300 - -/* - * Check the wm result. - * - * If any calculated watermark values is larger than the maximum value that - * can be programmed into the associated watermark register, that watermark - * must be disabled. - */ -static bool ironlake_check_srwm(struct drm_device *dev, int level, - int fbc_wm, int display_wm, int cursor_wm, - const struct intel_watermark_params *display, - const struct intel_watermark_params *cursor) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d," - " cursor %d\n", level, display_wm, fbc_wm, cursor_wm); - - if (fbc_wm > SNB_FBC_MAX_SRWM) { - DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n", - fbc_wm, SNB_FBC_MAX_SRWM, level); - - /* fbc has it's own way to disable FBC WM */ - I915_WRITE(DISP_ARB_CTL, - I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); - return false; - } - - if (display_wm > display->max_wm) { - DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n", - display_wm, SNB_DISPLAY_MAX_SRWM, level); - return false; - } - - if (cursor_wm > cursor->max_wm) { - DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n", - cursor_wm, SNB_CURSOR_MAX_SRWM, level); - return false; - } - - if (!(fbc_wm || display_wm || cursor_wm)) { - DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level); - return false; - } - - return true; -} - -/* - * Compute watermark values of WM[1-3], - */ -static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane, - int latency_ns, - const struct intel_watermark_params *display, - const struct intel_watermark_params *cursor, - int *fbc_wm, int *display_wm, int *cursor_wm) -{ - struct drm_crtc *crtc; - unsigned long line_time_us; - int hdisplay, htotal, pixel_size, clock; - int line_count, line_size; - int small, large; - int entries; - - if (!latency_ns) { - *fbc_wm = *display_wm = *cursor_wm = 0; - return false; - } - - crtc = intel_get_crtc_for_plane(dev, plane); - hdisplay = crtc->mode.hdisplay; - htotal = crtc->mode.htotal; - clock = crtc->mode.clock; - pixel_size = crtc->fb->bits_per_pixel / 8; - - line_time_us = (htotal * 1000) / clock; - line_count = (latency_ns / line_time_us + 1000) / 1000; - line_size = hdisplay * pixel_size; - - /* Use the minimum of the small and large buffer method for primary */ - small = ((clock * pixel_size / 1000) * latency_ns) / 1000; - large = line_count * line_size; - - entries = howmany(min(small, large), display->cacheline_size); - *display_wm = entries + display->guard_size; - - /* - * Spec says: - * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2 - */ - *fbc_wm = howmany(*display_wm * 64, line_size) + 2; - - /* calculate the self-refresh watermark for display cursor */ - entries = line_count * pixel_size * 64; - entries = howmany(entries, cursor->cacheline_size); - *cursor_wm = entries + cursor->guard_size; - - return ironlake_check_srwm(dev, level, - *fbc_wm, *display_wm, *cursor_wm, - display, cursor); -} - -static void ironlake_update_wm(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int fbc_wm, plane_wm, cursor_wm; - unsigned int enabled; - - enabled = 0; - if (g4x_compute_wm0(dev, 0, - &ironlake_display_wm_info, - ILK_LP0_PLANE_LATENCY, - &ironlake_cursor_wm_info, - ILK_LP0_CURSOR_LATENCY, - &plane_wm, &cursor_wm)) { - I915_WRITE(WM0_PIPEA_ILK, - (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); - DRM_DEBUG_KMS("FIFO watermarks For pipe A -" - " plane %d, " "cursor: %d\n", - plane_wm, cursor_wm); - enabled |= 1; - } - - if (g4x_compute_wm0(dev, 1, - &ironlake_display_wm_info, - ILK_LP0_PLANE_LATENCY, - &ironlake_cursor_wm_info, - ILK_LP0_CURSOR_LATENCY, - &plane_wm, &cursor_wm)) { - I915_WRITE(WM0_PIPEB_ILK, - (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); - DRM_DEBUG_KMS("FIFO watermarks For pipe B -" - " plane %d, cursor: %d\n", - plane_wm, cursor_wm); - enabled |= 2; - } - - /* - * Calculate and update the self-refresh watermark only when one - * display plane is used. - */ - I915_WRITE(WM3_LP_ILK, 0); - I915_WRITE(WM2_LP_ILK, 0); - I915_WRITE(WM1_LP_ILK, 0); - - if (!single_plane_enabled(enabled)) - return; - enabled = ffs(enabled) - 1; - - /* WM1 */ - if (!ironlake_compute_srwm(dev, 1, enabled, - ILK_READ_WM1_LATENCY() * 500, - &ironlake_display_srwm_info, - &ironlake_cursor_srwm_info, - &fbc_wm, &plane_wm, &cursor_wm)) - return; - - I915_WRITE(WM1_LP_ILK, - WM1_LP_SR_EN | - (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | - (fbc_wm << WM1_LP_FBC_SHIFT) | - (plane_wm << WM1_LP_SR_SHIFT) | - cursor_wm); - - /* WM2 */ - if (!ironlake_compute_srwm(dev, 2, enabled, - ILK_READ_WM2_LATENCY() * 500, - &ironlake_display_srwm_info, - &ironlake_cursor_srwm_info, - &fbc_wm, &plane_wm, &cursor_wm)) - return; - - I915_WRITE(WM2_LP_ILK, - WM2_LP_EN | - (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | - (fbc_wm << WM1_LP_FBC_SHIFT) | - (plane_wm << WM1_LP_SR_SHIFT) | - cursor_wm); - - /* - * WM3 is unsupported on ILK, probably because we don't have latency - * data for that power state - */ -} - -void sandybridge_update_wm(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ - u32 val; - int fbc_wm, plane_wm, cursor_wm; - unsigned int enabled; - - enabled = 0; - if (g4x_compute_wm0(dev, 0, - &sandybridge_display_wm_info, latency, - &sandybridge_cursor_wm_info, latency, - &plane_wm, &cursor_wm)) { - val = I915_READ(WM0_PIPEA_ILK); - val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); - I915_WRITE(WM0_PIPEA_ILK, val | - ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); - DRM_DEBUG_KMS("FIFO watermarks For pipe A -" - " plane %d, " "cursor: %d\n", - plane_wm, cursor_wm); - enabled |= 1; - } - - if (g4x_compute_wm0(dev, 1, - &sandybridge_display_wm_info, latency, - &sandybridge_cursor_wm_info, latency, - &plane_wm, &cursor_wm)) { - val = I915_READ(WM0_PIPEB_ILK); - val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); - I915_WRITE(WM0_PIPEB_ILK, val | - ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); - DRM_DEBUG_KMS("FIFO watermarks For pipe B -" - " plane %d, cursor: %d\n", - plane_wm, cursor_wm); - enabled |= 2; - } - - /* IVB has 3 pipes */ - if (IS_IVYBRIDGE(dev) && - g4x_compute_wm0(dev, 2, - &sandybridge_display_wm_info, latency, - &sandybridge_cursor_wm_info, latency, - &plane_wm, &cursor_wm)) { - val = I915_READ(WM0_PIPEC_IVB); - val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); - I915_WRITE(WM0_PIPEC_IVB, val | - ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); - DRM_DEBUG_KMS("FIFO watermarks For pipe C -" - " plane %d, cursor: %d\n", - plane_wm, cursor_wm); - enabled |= 3; - } - - /* - * Calculate and update the self-refresh watermark only when one - * display plane is used. - * - * SNB support 3 levels of watermark. - * - * WM1/WM2/WM2 watermarks have to be enabled in the ascending order, - * and disabled in the descending order - * - */ - I915_WRITE(WM3_LP_ILK, 0); - I915_WRITE(WM2_LP_ILK, 0); - I915_WRITE(WM1_LP_ILK, 0); - - if (!single_plane_enabled(enabled) || - dev_priv->sprite_scaling_enabled) - return; - enabled = ffs(enabled) - 1; - - /* WM1 */ - if (!ironlake_compute_srwm(dev, 1, enabled, - SNB_READ_WM1_LATENCY() * 500, - &sandybridge_display_srwm_info, - &sandybridge_cursor_srwm_info, - &fbc_wm, &plane_wm, &cursor_wm)) - return; - - I915_WRITE(WM1_LP_ILK, - WM1_LP_SR_EN | - (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | - (fbc_wm << WM1_LP_FBC_SHIFT) | - (plane_wm << WM1_LP_SR_SHIFT) | - cursor_wm); - - /* WM2 */ - if (!ironlake_compute_srwm(dev, 2, enabled, - SNB_READ_WM2_LATENCY() * 500, - &sandybridge_display_srwm_info, - &sandybridge_cursor_srwm_info, - &fbc_wm, &plane_wm, &cursor_wm)) - return; - - I915_WRITE(WM2_LP_ILK, - WM2_LP_EN | - (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | - (fbc_wm << WM1_LP_FBC_SHIFT) | - (plane_wm << WM1_LP_SR_SHIFT) | - cursor_wm); - - /* WM3 */ - if (!ironlake_compute_srwm(dev, 3, enabled, - SNB_READ_WM3_LATENCY() * 500, - &sandybridge_display_srwm_info, - &sandybridge_cursor_srwm_info, - &fbc_wm, &plane_wm, &cursor_wm)) - return; - - I915_WRITE(WM3_LP_ILK, - WM3_LP_EN | - (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | - (fbc_wm << WM1_LP_FBC_SHIFT) | - (plane_wm << WM1_LP_SR_SHIFT) | - cursor_wm); -} - -static bool -sandybridge_compute_sprite_wm(struct drm_device *dev, int plane, - uint32_t sprite_width, int pixel_size, - const struct intel_watermark_params *display, - int display_latency_ns, int *sprite_wm) -{ - struct drm_crtc *crtc; - int clock; - int entries, tlb_miss; - - crtc = intel_get_crtc_for_plane(dev, plane); - if (crtc->fb == NULL || !crtc->enabled) { - *sprite_wm = display->guard_size; - return false; - } - - clock = crtc->mode.clock; - - /* Use the small buffer method to calculate the sprite watermark */ - entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; - tlb_miss = display->fifo_size*display->cacheline_size - - sprite_width * 8; - if (tlb_miss > 0) - entries += tlb_miss; - entries = howmany(entries, display->cacheline_size); - *sprite_wm = entries + display->guard_size; - if (*sprite_wm > (int)display->max_wm) - *sprite_wm = display->max_wm; - - return true; -} - -static bool -sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane, - uint32_t sprite_width, int pixel_size, - const struct intel_watermark_params *display, - int latency_ns, int *sprite_wm) -{ - struct drm_crtc *crtc; - unsigned long line_time_us; - int clock; - int line_count, line_size; - int small, large; - int entries; - - if (!latency_ns) { - *sprite_wm = 0; - return false; - } - - crtc = intel_get_crtc_for_plane(dev, plane); - clock = crtc->mode.clock; - if (!clock) { - *sprite_wm = 0; - return false; - } - - line_time_us = (sprite_width * 1000) / clock; - if (!line_time_us) { - *sprite_wm = 0; - return false; - } - - line_count = (latency_ns / line_time_us + 1000) / 1000; - line_size = sprite_width * pixel_size; - - /* Use the minimum of the small and large buffer method for primary */ - small = ((clock * pixel_size / 1000) * latency_ns) / 1000; - large = line_count * line_size; - - entries = howmany(min(small, large), display->cacheline_size); - *sprite_wm = entries + display->guard_size; - - return *sprite_wm > 0x3ff ? false : true; -} - -static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, - uint32_t sprite_width, int pixel_size) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ - u32 val; - int sprite_wm, reg; - int ret; - - switch (pipe) { - case 0: - reg = WM0_PIPEA_ILK; - break; - case 1: - reg = WM0_PIPEB_ILK; - break; - case 2: - reg = WM0_PIPEC_IVB; - break; - default: - return; /* bad pipe */ - } - - ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size, - &sandybridge_display_wm_info, - latency, &sprite_wm); - if (!ret) { - DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n", - pipe); - return; - } - - val = I915_READ(reg); - val &= ~WM0_PIPE_SPRITE_MASK; - I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT)); - DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm); - - - ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, - pixel_size, - &sandybridge_display_srwm_info, - SNB_READ_WM1_LATENCY() * 500, - &sprite_wm); - if (!ret) { - DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n", - pipe); - return; - } - I915_WRITE(WM1S_LP_ILK, sprite_wm); - - /* Only IVB has two more LP watermarks for sprite */ - if (!IS_IVYBRIDGE(dev)) - return; - - ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, - pixel_size, - &sandybridge_display_srwm_info, - SNB_READ_WM2_LATENCY() * 500, - &sprite_wm); - if (!ret) { - DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n", - pipe); - return; - } - I915_WRITE(WM2S_LP_IVB, sprite_wm); - - ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, - pixel_size, - &sandybridge_display_srwm_info, - SNB_READ_WM3_LATENCY() * 500, - &sprite_wm); - if (!ret) { - DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n", - pipe); - return; - } - I915_WRITE(WM3S_LP_IVB, sprite_wm); -} - -/** - * intel_update_watermarks - update FIFO watermark values based on current modes - * - * Calculate watermark values for the various WM regs based on current mode - * and plane configuration. - * - * There are several cases to deal with here: - * - normal (i.e. non-self-refresh) - * - self-refresh (SR) mode - * - lines are large relative to FIFO size (buffer can hold up to 2) - * - lines are small relative to FIFO size (buffer can hold more than 2 - * lines), so need to account for TLB latency - * - * The normal calculation is: - * watermark = dotclock * bytes per pixel * latency - * where latency is platform & configuration dependent (we assume pessimal - * values here). - * - * The SR calculation is: - * watermark = (trunc(latency/line time)+1) * surface width * - * bytes per pixel - * where - * line time = htotal / dotclock - * surface width = hdisplay for normal plane and 64 for cursor - * and latency is assumed to be high, as above. - * - * The final value programmed to the register should always be rounded up, - * and include an extra 2 entries to account for clock crossings. - * - * We don't use the sprite, so we can ignore that. And on Crestline we have - * to set the non-SR watermarks to 8. - */ -static void intel_update_watermarks(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (dev_priv->display.update_wm) - dev_priv->display.update_wm(dev); -} - -void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, - uint32_t sprite_width, int pixel_size) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (dev_priv->display.update_sprite_wm) - dev_priv->display.update_sprite_wm(dev, pipe, sprite_width, - pixel_size); -} - static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) { if (i915_panel_use_ssc >= 0) return i915_panel_use_ssc != 0; return dev_priv->lvds_use_ssc && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); } /** * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send * @crtc: CRTC structure * @mode: requested mode * * A pipe may be connected to one or more outputs. Based on the depth of the * attached framebuffer, choose a good color depth to use on the pipe. * * If possible, match the pipe depth to the fb depth. In some cases, this * isn't ideal, because the connected output supports a lesser or restricted * set of depths. Resolve that here: * LVDS typically supports only 6bpc, so clamp down in that case * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc * Displays may support a restricted set as well, check EDID and clamp as * appropriate. * DP may want to dither down to 6bpc to fit larger modes * * RETURNS: * Dithering requirement (i.e. false if display bpc and pipe bpc match, * true if they don't match). */ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, unsigned int *pipe_bpp, struct drm_display_mode *mode) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_encoder *encoder; struct drm_connector *connector; unsigned int display_bpc = UINT_MAX, bpc; /* Walk the encoders & connectors on this crtc, get min bpc */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { struct intel_encoder *intel_encoder = to_intel_encoder(encoder); if (encoder->crtc != crtc) continue; if (intel_encoder->type == INTEL_OUTPUT_LVDS) { unsigned int lvds_bpc; if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) lvds_bpc = 8; else lvds_bpc = 6; if (lvds_bpc < display_bpc) { DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc); display_bpc = lvds_bpc; } continue; } if (intel_encoder->type == INTEL_OUTPUT_EDP) { /* Use VBT settings if we have an eDP panel */ unsigned int edp_bpc = dev_priv->edp.bpp / 3; if (edp_bpc < display_bpc) { DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); display_bpc = edp_bpc; } continue; } /* Not one of the known troublemakers, check the EDID */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (connector->encoder != encoder) continue; /* Don't use an invalid EDID bpc value */ if (connector->display_info.bpc && connector->display_info.bpc < display_bpc) { DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); display_bpc = connector->display_info.bpc; } } /* * HDMI is either 12 or 8, so if the display lets 10bpc sneak * through, clamp it down. (Note: >12bpc will be caught below.) */ if (intel_encoder->type == INTEL_OUTPUT_HDMI) { if (display_bpc > 8 && display_bpc < 12) { DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n"); display_bpc = 12; } else { DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n"); display_bpc = 8; } } } if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { DRM_DEBUG_KMS("Dithering DP to 6bpc\n"); display_bpc = 6; } /* * We could just drive the pipe at the highest bpc all the time and * enable dithering as needed, but that costs bandwidth. So choose * the minimum value that expresses the full color range of the fb but * also stays within the max display bpc discovered above. */ switch (crtc->fb->depth) { case 8: bpc = 8; /* since we go through a colormap */ break; case 15: case 16: bpc = 6; /* min is 18bpp */ break; case 24: bpc = 8; break; case 30: bpc = 10; break; case 48: bpc = 12; break; default: DRM_DEBUG("unsupported depth, assuming 24 bits\n"); bpc = min((unsigned int)8, display_bpc); break; } display_bpc = min(display_bpc, bpc); DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n", bpc, display_bpc); *pipe_bpp = display_bpc * 3; return display_bpc != bpc; } static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; int refclk; if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { refclk = dev_priv->lvds_ssc_freq * 1000; DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", refclk / 1000); } else if (!IS_GEN2(dev)) { refclk = 96000; } else { refclk = 48000; } return refclk; } static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode, intel_clock_t *clock) { /* SDVO TV has fixed PLL values depend on its clock range, this mirrors vbios setting. */ if (adjusted_mode->clock >= 100000 && adjusted_mode->clock < 140500) { clock->p1 = 2; clock->p2 = 10; clock->n = 3; clock->m1 = 16; clock->m2 = 8; } else if (adjusted_mode->clock >= 140500 && adjusted_mode->clock <= 200000) { clock->p1 = 1; clock->p2 = 10; clock->n = 6; clock->m1 = 12; clock->m2 = 8; } } static void i9xx_update_pll_dividers(struct drm_crtc *crtc, intel_clock_t *clock, intel_clock_t *reduced_clock) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; u32 fp, fp2 = 0; if (IS_PINEVIEW(dev)) { fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2; if (reduced_clock) fp2 = (1 << reduced_clock->n) << 16 | reduced_clock->m1 << 8 | reduced_clock->m2; } else { fp = clock->n << 16 | clock->m1 << 8 | clock->m2; if (reduced_clock) fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 | reduced_clock->m2; } I915_WRITE(FP0(pipe), fp); intel_crtc->lowfreq_avail = false; if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && reduced_clock && i915_powersave) { I915_WRITE(FP1(pipe), fp2); intel_crtc->lowfreq_avail = true; } else { I915_WRITE(FP1(pipe), fp); } } +static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + u32 temp; + + temp = I915_READ(LVDS); + temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; + if (pipe == 1) { + temp |= LVDS_PIPEB_SELECT; + } else { + temp &= ~LVDS_PIPEB_SELECT; + } + /* set the corresponsding LVDS_BORDER bit */ + temp |= dev_priv->lvds_border_bits; + /* Set the B0-B3 data pairs corresponding to whether we're going to + * set the DPLLs for dual-channel mode or not. + */ + if (clock->p2 == 7) + temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; + else + temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); + + /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) + * appropriately here, but we need to look more thoroughly into how + * panels behave in the two modes. + */ + /* set the dithering flag on LVDS as needed */ + if (INTEL_INFO(dev)->gen >= 4) { + if (dev_priv->lvds_dither) + temp |= LVDS_ENABLE_DITHER; + else + temp &= ~LVDS_ENABLE_DITHER; + } + temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); + if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) + temp |= LVDS_HSYNC_POLARITY; + if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) + temp |= LVDS_VSYNC_POLARITY; + I915_WRITE(LVDS, temp); +} + +static void i9xx_update_pll(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + intel_clock_t *clock, intel_clock_t *reduced_clock, + int num_connectors) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + u32 dpll; + bool is_sdvo; + + is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || + intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); + + dpll = DPLL_VGA_MODE_DIS; + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) + dpll |= DPLLB_MODE_LVDS; + else + dpll |= DPLLB_MODE_DAC_SERIAL; + if (is_sdvo) { + int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); + if (pixel_multiplier > 1) { + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; + } + dpll |= DPLL_DVO_HIGH_SPEED; + } + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) + dpll |= DPLL_DVO_HIGH_SPEED; + + /* compute bitmask from p1 value */ + if (IS_PINEVIEW(dev)) + dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; + else { + dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; + if (IS_G4X(dev) && reduced_clock) + dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; + } + switch (clock->p2) { + case 5: + dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; + break; + case 7: + dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; + break; + case 10: + dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; + break; + case 14: + dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; + break; + } + if (INTEL_INFO(dev)->gen >= 4) + dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); + + if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT)) + dpll |= PLL_REF_INPUT_TVCLKINBC; + else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT)) + /* XXX: just matching BIOS for now */ + /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ + dpll |= 3; + else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && + intel_panel_use_ssc(dev_priv) && num_connectors < 2) + dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; + else + dpll |= PLL_REF_INPUT_DREFCLK; + + dpll |= DPLL_VCO_ENABLE; + I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); + POSTING_READ(DPLL(pipe)); + DELAY(150); + + /* The LVDS pin pair needs to be on before the DPLLs are enabled. + * This is an exception to the general rule that mode_set doesn't turn + * things on. + */ + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) + intel_update_lvds(crtc, clock, adjusted_mode); + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) + intel_dp_set_m_n(crtc, mode, adjusted_mode); + + I915_WRITE(DPLL(pipe), dpll); + + /* Wait for the clocks to stabilize. */ + POSTING_READ(DPLL(pipe)); + DELAY(150); + + if (INTEL_INFO(dev)->gen >= 4) { + u32 temp = 0; + if (is_sdvo) { + temp = intel_mode_get_pixel_multiplier(adjusted_mode); + if (temp > 1) + temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; + else + temp = 0; + } + I915_WRITE(DPLL_MD(pipe), temp); + } else { + /* The pixel multiplier can only be updated once the + * DPLL is enabled and the clocks are stable. + * + * So write it again. + */ + I915_WRITE(DPLL(pipe), dpll); + } +} + +static void i8xx_update_pll(struct drm_crtc *crtc, + struct drm_display_mode *adjusted_mode, + intel_clock_t *clock, + int num_connectors) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + u32 dpll; + + dpll = DPLL_VGA_MODE_DIS; + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; + } else { + if (clock->p1 == 2) + dpll |= PLL_P1_DIVIDE_BY_TWO; + else + dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; + if (clock->p2 == 4) + dpll |= PLL_P2_DIVIDE_BY_4; + } + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT)) + /* XXX: just matching BIOS for now */ + /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ + dpll |= 3; + else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && + intel_panel_use_ssc(dev_priv) && num_connectors < 2) + dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; + else + dpll |= PLL_REF_INPUT_DREFCLK; + + dpll |= DPLL_VCO_ENABLE; + I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); + POSTING_READ(DPLL(pipe)); + DELAY(150); + + I915_WRITE(DPLL(pipe), dpll); + + /* Wait for the clocks to stabilize. */ + POSTING_READ(DPLL(pipe)); + DELAY(150); + + /* The LVDS pin pair needs to be on before the DPLLs are enabled. + * This is an exception to the general rule that mode_set doesn't turn + * things on. + */ + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) + intel_update_lvds(crtc, clock, adjusted_mode); + + /* The pixel multiplier can only be updated once the + * DPLL is enabled and the clocks are stable. + * + * So write it again. + */ + I915_WRITE(DPLL(pipe), dpll); +} + static int i9xx_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; int refclk, num_connectors = 0; intel_clock_t clock, reduced_clock; - u32 dpll, dspcntr, pipeconf, vsyncshift; - bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; - bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; + u32 dspcntr, pipeconf, vsyncshift; + bool ok, has_reduced_clock = false, is_sdvo = false; + bool is_lvds = false, is_tv = false, is_dp = false; struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *encoder; const intel_limit_t *limit; int ret; - u32 temp; - u32 lvds_sync = 0; list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { if (encoder->base.crtc != crtc) continue; switch (encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; break; case INTEL_OUTPUT_SDVO: case INTEL_OUTPUT_HDMI: is_sdvo = true; if (encoder->needs_tv_clock) is_tv = true; break; - case INTEL_OUTPUT_DVO: - is_dvo = true; - break; case INTEL_OUTPUT_TVOUT: is_tv = true; break; - case INTEL_OUTPUT_ANALOG: - is_crt = true; - break; case INTEL_OUTPUT_DISPLAYPORT: is_dp = true; break; } num_connectors++; } refclk = i9xx_get_refclk(crtc, num_connectors); /* * Returns a set of divisors for the desired target clock with the given * refclk, or false. The returned values represent the clock equation: * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. */ limit = intel_limit(crtc, refclk); ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, &clock); if (!ok) { DRM_ERROR("Couldn't find PLL settings for mode!\n"); return -EINVAL; } /* Ensure that the cursor is valid for the new mode before changing... */ intel_crtc_update_cursor(crtc, true); if (is_lvds && dev_priv->lvds_downclock_avail) { /* * Ensure we match the reduced clock's P to the target clock. * If the clocks don't match, we can't switch the display clock * by using the FP0/FP1. In such case we will disable the LVDS * downclock feature. */ has_reduced_clock = limit->find_pll(limit, crtc, dev_priv->lvds_downclock, refclk, &clock, &reduced_clock); } if (is_sdvo && is_tv) i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock); i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ? &reduced_clock : NULL); - dpll = DPLL_VGA_MODE_DIS; - - if (!IS_GEN2(dev)) { - if (is_lvds) - dpll |= DPLLB_MODE_LVDS; - else - dpll |= DPLLB_MODE_DAC_SERIAL; - if (is_sdvo) { - int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); - if (pixel_multiplier > 1) { - if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) - dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; - } - dpll |= DPLL_DVO_HIGH_SPEED; - } - if (is_dp) - dpll |= DPLL_DVO_HIGH_SPEED; - - /* compute bitmask from p1 value */ - if (IS_PINEVIEW(dev)) - dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; - else { - dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; - if (IS_G4X(dev) && has_reduced_clock) - dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; - } - switch (clock.p2) { - case 5: - dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; - break; - case 7: - dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; - break; - case 10: - dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; - break; - case 14: - dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; - break; - } - if (INTEL_INFO(dev)->gen >= 4) - dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); - } else { - if (is_lvds) { - dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; - } else { - if (clock.p1 == 2) - dpll |= PLL_P1_DIVIDE_BY_TWO; - else - dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; - if (clock.p2 == 4) - dpll |= PLL_P2_DIVIDE_BY_4; - } - } - - if (is_sdvo && is_tv) - dpll |= PLL_REF_INPUT_TVCLKINBC; - else if (is_tv) - /* XXX: just matching BIOS for now */ - /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ - dpll |= 3; - else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) - dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; + if (IS_GEN2(dev)) + i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors); else - dpll |= PLL_REF_INPUT_DREFCLK; + i9xx_update_pll(crtc, mode, adjusted_mode, &clock, + has_reduced_clock ? &reduced_clock : NULL, + num_connectors); /* setup pipeconf */ pipeconf = I915_READ(PIPECONF(pipe)); /* Set up the display plane register */ dspcntr = DISPPLANE_GAMMA_ENABLE; if (pipe == 0) dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; else dspcntr |= DISPPLANE_SEL_PIPE_B; if (pipe == 0 && INTEL_INFO(dev)->gen < 4) { /* Enable pixel doubling when the dot clock is > 90% of the (display) * core speed. * * XXX: No double-wide on 915GM pipe B. Is that the only reason for the * pipe == 0 check? */ if (mode->clock > dev_priv->display.get_display_clock_speed(dev) * 9 / 10) pipeconf |= PIPECONF_DOUBLE_WIDE; else pipeconf &= ~PIPECONF_DOUBLE_WIDE; } /* default to 8bpc */ pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN); if (is_dp) { if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { pipeconf |= PIPECONF_BPP_6 | PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP; } } - dpll |= DPLL_VCO_ENABLE; - DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); drm_mode_debug_printmodeline(mode); - I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); - - POSTING_READ(DPLL(pipe)); - DELAY(150); - - /* The LVDS pin pair needs to be on before the DPLLs are enabled. - * This is an exception to the general rule that mode_set doesn't turn - * things on. - */ - if (is_lvds) { - temp = I915_READ(LVDS); - temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; - if (pipe == 1) { - temp |= LVDS_PIPEB_SELECT; - } else { - temp &= ~LVDS_PIPEB_SELECT; - } - /* set the corresponsding LVDS_BORDER bit */ - temp |= dev_priv->lvds_border_bits; - /* Set the B0-B3 data pairs corresponding to whether we're going to - * set the DPLLs for dual-channel mode or not. - */ - if (clock.p2 == 7) - temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; - else - temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); - - /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) - * appropriately here, but we need to look more thoroughly into how - * panels behave in the two modes. - */ - /* set the dithering flag on LVDS as needed */ - if (INTEL_INFO(dev)->gen >= 4) { - if (dev_priv->lvds_dither) - temp |= LVDS_ENABLE_DITHER; - else - temp &= ~LVDS_ENABLE_DITHER; - } - if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) - lvds_sync |= LVDS_HSYNC_POLARITY; - if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) - lvds_sync |= LVDS_VSYNC_POLARITY; - if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) - != lvds_sync) { - char flags[2] = "-+"; - DRM_INFO("Changing LVDS panel from " - "(%chsync, %cvsync) to (%chsync, %cvsync)\n", - flags[!(temp & LVDS_HSYNC_POLARITY)], - flags[!(temp & LVDS_VSYNC_POLARITY)], - flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], - flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); - temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); - temp |= lvds_sync; - } - I915_WRITE(LVDS, temp); - } - - if (is_dp) { - intel_dp_set_m_n(crtc, mode, adjusted_mode); - } - - I915_WRITE(DPLL(pipe), dpll); - - /* Wait for the clocks to stabilize. */ - POSTING_READ(DPLL(pipe)); - DELAY(150); - - if (INTEL_INFO(dev)->gen >= 4) { - temp = 0; - if (is_sdvo) { - temp = intel_mode_get_pixel_multiplier(adjusted_mode); - if (temp > 1) - temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; - else - temp = 0; - } - I915_WRITE(DPLL_MD(pipe), temp); - } else { - /* The pixel multiplier can only be updated once the - * DPLL is enabled and the clocks are stable. - * - * So write it again. - */ - I915_WRITE(DPLL(pipe), dpll); - } - if (HAS_PIPE_CXSR(dev)) { if (intel_crtc->lowfreq_avail) { DRM_DEBUG_KMS("enabling CxSR downclocking\n"); pipeconf |= PIPECONF_CXSR_DOWNCLOCK; } else { DRM_DEBUG_KMS("disabling CxSR downclocking\n"); pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; } } pipeconf &= ~PIPECONF_INTERLACE_MASK; if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; /* the chip adds 2 halflines automatically */ adjusted_mode->crtc_vtotal -= 1; adjusted_mode->crtc_vblank_end -= 1; vsyncshift = adjusted_mode->crtc_hsync_start - adjusted_mode->crtc_htotal/2; } else { pipeconf |= PIPECONF_PROGRESSIVE; vsyncshift = 0; } if (!IS_GEN3(dev)) I915_WRITE(VSYNCSHIFT(pipe), vsyncshift); I915_WRITE(HTOTAL(pipe), (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16)); I915_WRITE(HBLANK(pipe), (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16)); I915_WRITE(HSYNC(pipe), (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16)); I915_WRITE(VTOTAL(pipe), (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16)); I915_WRITE(VBLANK(pipe), (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16)); I915_WRITE(VSYNC(pipe), (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16)); /* pipesrc and dspsize control the size that is scaled from, * which should always be the user's requested size. */ I915_WRITE(DSPSIZE(plane), ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); I915_WRITE(DSPPOS(plane), 0); I915_WRITE(PIPESRC(pipe), ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); I915_WRITE(PIPECONF(pipe), pipeconf); POSTING_READ(PIPECONF(pipe)); intel_enable_pipe(dev_priv, pipe, false); intel_wait_for_vblank(dev, pipe); I915_WRITE(DSPCNTR(plane), dspcntr); POSTING_READ(DSPCNTR(plane)); - intel_enable_plane(dev_priv, plane, pipe); ret = intel_pipe_set_base(crtc, x, y, old_fb); intel_update_watermarks(dev); return ret; } /* * Initialize reference clocks when the driver loads */ void ironlake_init_pch_refclk(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *encoder; u32 temp; bool has_lvds = false; bool has_cpu_edp = false; bool has_pch_edp = false; bool has_panel = false; bool has_ck505 = false; bool can_ssc = false; /* We need to take the global config into account */ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { switch (encoder->type) { case INTEL_OUTPUT_LVDS: has_panel = true; has_lvds = true; break; case INTEL_OUTPUT_EDP: has_panel = true; if (intel_encoder_is_pch_edp(&encoder->base)) has_pch_edp = true; else has_cpu_edp = true; break; } } if (HAS_PCH_IBX(dev)) { has_ck505 = dev_priv->display_clock_mode; can_ssc = has_ck505; } else { has_ck505 = false; can_ssc = true; } DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n", has_panel, has_lvds, has_pch_edp, has_cpu_edp, has_ck505); /* Ironlake: try to setup display ref clock before DPLL * enabling. This is only under driver's control after * PCH B stepping, previous chipset stepping should be * ignoring this setting. */ temp = I915_READ(PCH_DREF_CONTROL); /* Always enable nonspread source */ temp &= ~DREF_NONSPREAD_SOURCE_MASK; if (has_ck505) temp |= DREF_NONSPREAD_CK505_ENABLE; else temp |= DREF_NONSPREAD_SOURCE_ENABLE; if (has_panel) { temp &= ~DREF_SSC_SOURCE_MASK; temp |= DREF_SSC_SOURCE_ENABLE; /* SSC must be turned on before enabling the CPU output */ if (intel_panel_use_ssc(dev_priv) && can_ssc) { DRM_DEBUG_KMS("Using SSC on panel\n"); temp |= DREF_SSC1_ENABLE; } else temp &= ~DREF_SSC1_ENABLE; /* Get SSC going before enabling the outputs */ I915_WRITE(PCH_DREF_CONTROL, temp); POSTING_READ(PCH_DREF_CONTROL); DELAY(200); temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; /* Enable CPU source on CPU attached eDP */ if (has_cpu_edp) { if (intel_panel_use_ssc(dev_priv) && can_ssc) { DRM_DEBUG_KMS("Using SSC on eDP\n"); temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; } else temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; } else temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; I915_WRITE(PCH_DREF_CONTROL, temp); POSTING_READ(PCH_DREF_CONTROL); DELAY(200); } else { DRM_DEBUG_KMS("Disabling SSC entirely\n"); temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; /* Turn off CPU output */ temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; I915_WRITE(PCH_DREF_CONTROL, temp); POSTING_READ(PCH_DREF_CONTROL); DELAY(200); /* Turn off the SSC source */ temp &= ~DREF_SSC_SOURCE_MASK; temp |= DREF_SSC_SOURCE_DISABLE; /* Turn off SSC1 */ temp &= ~ DREF_SSC1_ENABLE; I915_WRITE(PCH_DREF_CONTROL, temp); POSTING_READ(PCH_DREF_CONTROL); DELAY(200); } } static int ironlake_get_refclk(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *encoder; struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *edp_encoder = NULL; int num_connectors = 0; bool is_lvds = false; list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { if (encoder->base.crtc != crtc) continue; switch (encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; break; case INTEL_OUTPUT_EDP: edp_encoder = encoder; break; } num_connectors++; } if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", dev_priv->lvds_ssc_freq); return dev_priv->lvds_ssc_freq * 1000; } return 120000; } static int ironlake_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; int refclk, num_connectors = 0; intel_clock_t clock, reduced_clock; u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; bool ok, has_reduced_clock = false, is_sdvo = false; bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; - struct intel_encoder *has_edp_encoder = NULL; struct drm_mode_config *mode_config = &dev->mode_config; - struct intel_encoder *encoder; + struct intel_encoder *encoder, *edp_encoder = NULL; const intel_limit_t *limit; int ret; struct fdi_m_n m_n = {0}; u32 temp; - u32 lvds_sync = 0; int target_clock, pixel_multiplier, lane, link_bw, factor; unsigned int pipe_bpp; bool dither; + bool is_cpu_edp = false, is_pch_edp = false; list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { if (encoder->base.crtc != crtc) continue; switch (encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; break; case INTEL_OUTPUT_SDVO: case INTEL_OUTPUT_HDMI: is_sdvo = true; if (encoder->needs_tv_clock) is_tv = true; break; case INTEL_OUTPUT_TVOUT: is_tv = true; break; case INTEL_OUTPUT_ANALOG: is_crt = true; break; case INTEL_OUTPUT_DISPLAYPORT: is_dp = true; break; case INTEL_OUTPUT_EDP: - has_edp_encoder = encoder; + is_dp = true; + if (intel_encoder_is_pch_edp(&encoder->base)) + is_pch_edp = true; + else + is_cpu_edp = true; + edp_encoder = encoder; break; } num_connectors++; } refclk = ironlake_get_refclk(crtc); /* * Returns a set of divisors for the desired target clock with the given * refclk, or false. The returned values represent the clock equation: * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. */ limit = intel_limit(crtc, refclk); ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, &clock); if (!ok) { DRM_ERROR("Couldn't find PLL settings for mode!\n"); return -EINVAL; } /* Ensure that the cursor is valid for the new mode before changing... */ intel_crtc_update_cursor(crtc, true); if (is_lvds && dev_priv->lvds_downclock_avail) { /* * Ensure we match the reduced clock's P to the target clock. * If the clocks don't match, we can't switch the display clock * by using the FP0/FP1. In such case we will disable the LVDS * downclock feature. */ has_reduced_clock = limit->find_pll(limit, crtc, dev_priv->lvds_downclock, refclk, &clock, &reduced_clock); } /* SDVO TV has fixed PLL values depend on its clock range, this mirrors vbios setting. */ if (is_sdvo && is_tv) { if (adjusted_mode->clock >= 100000 && adjusted_mode->clock < 140500) { clock.p1 = 2; clock.p2 = 10; clock.n = 3; clock.m1 = 16; clock.m2 = 8; } else if (adjusted_mode->clock >= 140500 && adjusted_mode->clock <= 200000) { clock.p1 = 1; clock.p2 = 10; clock.n = 6; clock.m1 = 12; clock.m2 = 8; } } /* FDI link */ pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); lane = 0; /* CPU eDP doesn't require FDI link, so just set DP M/N according to current link config */ - if (has_edp_encoder && - !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { + if (is_cpu_edp) { target_clock = mode->clock; - intel_edp_link_config(has_edp_encoder, - &lane, &link_bw); + intel_edp_link_config(edp_encoder, &lane, &link_bw); } else { /* [e]DP over FDI requires target mode clock instead of link clock */ - if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) + if (is_dp) target_clock = mode->clock; else target_clock = adjusted_mode->clock; /* FDI is a binary signal running at ~2.7GHz, encoding * each output octet as 10 bits. The actual frequency * is stored as a divider into a 100MHz clock, and the * mode pixel clock is stored in units of 1KHz. * Hence the bw of each lane in terms of the mode signal * is: */ link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; } /* determine panel color depth */ temp = I915_READ(PIPECONF(pipe)); temp &= ~PIPE_BPC_MASK; dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode); switch (pipe_bpp) { case 18: temp |= PIPE_6BPC; break; case 24: temp |= PIPE_8BPC; break; case 30: temp |= PIPE_10BPC; break; case 36: temp |= PIPE_12BPC; break; default: printf("intel_choose_pipe_bpp returned invalid value %d\n", pipe_bpp); temp |= PIPE_8BPC; pipe_bpp = 24; break; } intel_crtc->bpp = pipe_bpp; I915_WRITE(PIPECONF(pipe), temp); if (!lane) { /* * Account for spread spectrum to avoid * oversubscribing the link. Max center spread * is 2.5%; use 5% for safety's sake. */ u32 bps = target_clock * intel_crtc->bpp * 21 / 20; lane = bps / (link_bw * 8) + 1; } intel_crtc->fdi_lanes = lane; if (pixel_multiplier > 1) link_bw *= pixel_multiplier; ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, &m_n); fp = clock.n << 16 | clock.m1 << 8 | clock.m2; if (has_reduced_clock) fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | reduced_clock.m2; /* Enable autotuning of the PLL clock (if permissible) */ factor = 21; if (is_lvds) { if ((intel_panel_use_ssc(dev_priv) && dev_priv->lvds_ssc_freq == 100) || (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) factor = 25; } else if (is_sdvo && is_tv) factor = 20; if (clock.m < factor * clock.n) fp |= FP_CB_TUNE; dpll = 0; if (is_lvds) dpll |= DPLLB_MODE_LVDS; else dpll |= DPLLB_MODE_DAC_SERIAL; if (is_sdvo) { int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); if (pixel_multiplier > 1) { dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; } dpll |= DPLL_DVO_HIGH_SPEED; } - if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) + if (is_dp && !is_cpu_edp) dpll |= DPLL_DVO_HIGH_SPEED; /* compute bitmask from p1 value */ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; /* also FPA1 */ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; switch (clock.p2) { case 5: dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; break; case 7: dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; break; case 10: dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; break; case 14: dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; break; } if (is_sdvo && is_tv) dpll |= PLL_REF_INPUT_TVCLKINBC; else if (is_tv) /* XXX: just matching BIOS for now */ /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ dpll |= 3; else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else dpll |= PLL_REF_INPUT_DREFCLK; /* setup pipeconf */ pipeconf = I915_READ(PIPECONF(pipe)); /* Set up the display plane register */ dspcntr = DISPPLANE_GAMMA_ENABLE; - DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); drm_mode_debug_printmodeline(mode); - /* PCH eDP needs FDI, but CPU eDP does not */ - if (!intel_crtc->no_pll) { - if (!has_edp_encoder || - intel_encoder_is_pch_edp(&has_edp_encoder->base)) { - I915_WRITE(PCH_FP0(pipe), fp); - I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); + /* CPU eDP is the only output that doesn't need a PCH PLL of its own on + * pre-Haswell/LPT generation */ + if (HAS_PCH_LPT(dev)) { + DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n", + pipe); + } else if (!is_cpu_edp) { + struct intel_pch_pll *pll; - POSTING_READ(PCH_DPLL(pipe)); - DELAY(150); - } - } else { - if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) && - fp == I915_READ(PCH_FP0(0))) { - intel_crtc->use_pll_a = true; - DRM_DEBUG_KMS("using pipe a dpll\n"); - } else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) && - fp == I915_READ(PCH_FP0(1))) { - intel_crtc->use_pll_a = false; - DRM_DEBUG_KMS("using pipe b dpll\n"); - } else { - DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n"); - return -EINVAL; - } - } + pll = intel_get_pch_pll(intel_crtc, dpll, fp); + if (pll == NULL) { + DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n", + pipe); + return -EINVAL; + } + } else + intel_put_pch_pll(intel_crtc); /* The LVDS pin pair needs to be on before the DPLLs are enabled. * This is an exception to the general rule that mode_set doesn't turn * things on. */ if (is_lvds) { temp = I915_READ(PCH_LVDS); temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; if (HAS_PCH_CPT(dev)) { temp &= ~PORT_TRANS_SEL_MASK; temp |= PORT_TRANS_SEL_CPT(pipe); } else { if (pipe == 1) temp |= LVDS_PIPEB_SELECT; else temp &= ~LVDS_PIPEB_SELECT; } /* set the corresponsding LVDS_BORDER bit */ temp |= dev_priv->lvds_border_bits; /* Set the B0-B3 data pairs corresponding to whether we're going to * set the DPLLs for dual-channel mode or not. */ if (clock.p2 == 7) temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; else temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) * appropriately here, but we need to look more thoroughly into how * panels behave in the two modes. */ + temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) - lvds_sync |= LVDS_HSYNC_POLARITY; + temp |= LVDS_HSYNC_POLARITY; if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) - lvds_sync |= LVDS_VSYNC_POLARITY; - if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) - != lvds_sync) { - char flags[2] = "-+"; - DRM_INFO("Changing LVDS panel from " - "(%chsync, %cvsync) to (%chsync, %cvsync)\n", - flags[!(temp & LVDS_HSYNC_POLARITY)], - flags[!(temp & LVDS_VSYNC_POLARITY)], - flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], - flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); - temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); - temp |= lvds_sync; - } + temp |= LVDS_VSYNC_POLARITY; I915_WRITE(PCH_LVDS, temp); } pipeconf &= ~PIPECONF_DITHER_EN; pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; if ((is_lvds && dev_priv->lvds_dither) || dither) { pipeconf |= PIPECONF_DITHER_EN; pipeconf |= PIPECONF_DITHER_TYPE_SP; } - if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { + if (is_dp && !is_cpu_edp) { intel_dp_set_m_n(crtc, mode, adjusted_mode); } else { /* For non-DP output, clear any trans DP clock recovery setting.*/ I915_WRITE(TRANSDATA_M1(pipe), 0); I915_WRITE(TRANSDATA_N1(pipe), 0); I915_WRITE(TRANSDPLINK_M1(pipe), 0); I915_WRITE(TRANSDPLINK_N1(pipe), 0); } - if (!intel_crtc->no_pll && - (!has_edp_encoder || - intel_encoder_is_pch_edp(&has_edp_encoder->base))) { - I915_WRITE(PCH_DPLL(pipe), dpll); + if (intel_crtc->pch_pll) { + I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); /* Wait for the clocks to stabilize. */ - POSTING_READ(PCH_DPLL(pipe)); + POSTING_READ(intel_crtc->pch_pll->pll_reg); DELAY(150); /* The pixel multiplier can only be updated once the * DPLL is enabled and the clocks are stable. * * So write it again. */ - I915_WRITE(PCH_DPLL(pipe), dpll); + I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); } intel_crtc->lowfreq_avail = false; - if (!intel_crtc->no_pll) { + if (intel_crtc->pch_pll) { if (is_lvds && has_reduced_clock && i915_powersave) { - I915_WRITE(PCH_FP1(pipe), fp2); + I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2); intel_crtc->lowfreq_avail = true; if (HAS_PIPE_CXSR(dev)) { DRM_DEBUG_KMS("enabling CxSR downclocking\n"); pipeconf |= PIPECONF_CXSR_DOWNCLOCK; } } else { - I915_WRITE(PCH_FP1(pipe), fp); + I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp); if (HAS_PIPE_CXSR(dev)) { DRM_DEBUG_KMS("disabling CxSR downclocking\n"); pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; } } } pipeconf &= ~PIPECONF_INTERLACE_MASK; if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { pipeconf |= PIPECONF_INTERLACED_ILK; /* the chip adds 2 halflines automatically */ adjusted_mode->crtc_vtotal -= 1; adjusted_mode->crtc_vblank_end -= 1; I915_WRITE(VSYNCSHIFT(pipe), adjusted_mode->crtc_hsync_start - adjusted_mode->crtc_htotal/2); } else { pipeconf |= PIPECONF_PROGRESSIVE; I915_WRITE(VSYNCSHIFT(pipe), 0); } I915_WRITE(HTOTAL(pipe), (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16)); I915_WRITE(HBLANK(pipe), (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16)); I915_WRITE(HSYNC(pipe), (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16)); I915_WRITE(VTOTAL(pipe), (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16)); I915_WRITE(VBLANK(pipe), (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16)); I915_WRITE(VSYNC(pipe), (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16)); /* pipesrc controls the size that is scaled from, which should * always be the user's requested size. */ I915_WRITE(PIPESRC(pipe), ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); - if (has_edp_encoder && - !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { + if (is_cpu_edp) ironlake_set_pll_edp(crtc, adjusted_mode->clock); - } I915_WRITE(PIPECONF(pipe), pipeconf); POSTING_READ(PIPECONF(pipe)); intel_wait_for_vblank(dev, pipe); I915_WRITE(DSPCNTR(plane), dspcntr); POSTING_READ(DSPCNTR(plane)); ret = intel_pipe_set_base(crtc, x, y, old_fb); intel_update_watermarks(dev); + intel_update_linetime_watermarks(dev, pipe, adjusted_mode); + return ret; } static int intel_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int ret; drm_vblank_pre_modeset(dev, pipe); ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, x, y, old_fb); drm_vblank_post_modeset(dev, pipe); if (ret) intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; else intel_crtc->dpms_mode = DRM_MODE_DPMS_ON; return ret; } static bool intel_eld_uptodate(struct drm_connector *connector, int reg_eldv, uint32_t bits_eldv, int reg_elda, uint32_t bits_elda, int reg_edid) { struct drm_i915_private *dev_priv = connector->dev->dev_private; uint8_t *eld = connector->eld; uint32_t i; i = I915_READ(reg_eldv); i &= bits_eldv; if (!eld[0]) return !i; if (!i) return false; i = I915_READ(reg_elda); i &= ~bits_elda; I915_WRITE(reg_elda, i); for (i = 0; i < eld[2]; i++) if (I915_READ(reg_edid) != *((uint32_t *)eld + i)) return false; return true; } static void g4x_write_eld(struct drm_connector *connector, struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = connector->dev->dev_private; uint8_t *eld = connector->eld; uint32_t eldv; uint32_t len; uint32_t i; i = I915_READ(G4X_AUD_VID_DID); if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL) eldv = G4X_ELDV_DEVCL_DEVBLC; else eldv = G4X_ELDV_DEVCTG; if (intel_eld_uptodate(connector, G4X_AUD_CNTL_ST, eldv, G4X_AUD_CNTL_ST, G4X_ELD_ADDR, G4X_HDMIW_HDMIEDID)) return; i = I915_READ(G4X_AUD_CNTL_ST); i &= ~(eldv | G4X_ELD_ADDR); len = (i >> 9) & 0x1f; /* ELD buffer size */ I915_WRITE(G4X_AUD_CNTL_ST, i); if (!eld[0]) return; if (eld[2] < (uint8_t)len) len = eld[2]; DRM_DEBUG_KMS("ELD size %d\n", len); for (i = 0; i < len; i++) I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i)); i = I915_READ(G4X_AUD_CNTL_ST); i |= eldv; I915_WRITE(G4X_AUD_CNTL_ST, i); } static void ironlake_write_eld(struct drm_connector *connector, struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = connector->dev->dev_private; uint8_t *eld = connector->eld; uint32_t eldv; uint32_t i; int len; int hdmiw_hdmiedid; int aud_config; int aud_cntl_st; int aud_cntrl_st2; if (HAS_PCH_IBX(connector->dev)) { hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A; aud_config = IBX_AUD_CONFIG_A; aud_cntl_st = IBX_AUD_CNTL_ST_A; aud_cntrl_st2 = IBX_AUD_CNTL_ST2; } else { hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A; aud_config = CPT_AUD_CONFIG_A; aud_cntl_st = CPT_AUD_CNTL_ST_A; aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; } i = to_intel_crtc(crtc)->pipe; hdmiw_hdmiedid += i * 0x100; aud_cntl_st += i * 0x100; aud_config += i * 0x100; DRM_DEBUG_KMS("ELD on pipe %c\n", pipe_name(i)); i = I915_READ(aud_cntl_st); i = (i >> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */ if (!i) { DRM_DEBUG_KMS("Audio directed to unknown port\n"); /* operate blindly on all ports */ eldv = IBX_ELD_VALIDB; eldv |= IBX_ELD_VALIDB << 4; eldv |= IBX_ELD_VALIDB << 8; } else { DRM_DEBUG_KMS("ELD on port %c\n", 'A' + i); eldv = IBX_ELD_VALIDB << ((i - 1) * 4); } if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ } else I915_WRITE(aud_config, 0); if (intel_eld_uptodate(connector, aud_cntrl_st2, eldv, aud_cntl_st, IBX_ELD_ADDRESS, hdmiw_hdmiedid)) return; i = I915_READ(aud_cntrl_st2); i &= ~eldv; I915_WRITE(aud_cntrl_st2, i); if (!eld[0]) return; i = I915_READ(aud_cntl_st); i &= ~IBX_ELD_ADDRESS; I915_WRITE(aud_cntl_st, i); /* 84 bytes of hw ELD buffer */ len = 21; if (eld[2] < (uint8_t)len) len = eld[2]; DRM_DEBUG_KMS("ELD size %d\n", len); for (i = 0; i < len; i++) I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); i = I915_READ(aud_cntrl_st2); i |= eldv; I915_WRITE(aud_cntrl_st2, i); } void intel_write_eld(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct drm_crtc *crtc = encoder->crtc; struct drm_connector *connector; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; connector = drm_select_eld(encoder, mode); if (!connector) return; DRM_DEBUG_KMS("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", connector->base.id, drm_get_connector_name(connector), connector->encoder->base.id, drm_get_encoder_name(connector->encoder)); connector->eld[6] = drm_av_sync_delay(connector, mode) / 2; if (dev_priv->display.write_eld) dev_priv->display.write_eld(connector, crtc); } /** Loads the palette/gamma unit for the CRTC with the prepared values */ void intel_crtc_load_lut(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int palreg = PALETTE(intel_crtc->pipe); int i; /* The clocks have to be on to load the palette. */ if (!crtc->enabled || !intel_crtc->active) return; /* use legacy palette for Ironlake */ if (HAS_PCH_SPLIT(dev)) palreg = LGC_PALETTE(intel_crtc->pipe); for (i = 0; i < 256; i++) { I915_WRITE(palreg + 4 * i, (intel_crtc->lut_r[i] << 16) | (intel_crtc->lut_g[i] << 8) | intel_crtc->lut_b[i]); } } static void i845_update_cursor(struct drm_crtc *crtc, u32 base) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); bool visible = base != 0; u32 cntl; if (intel_crtc->cursor_visible == visible) return; cntl = I915_READ(_CURACNTR); if (visible) { /* On these chipsets we can only modify the base whilst * the cursor is disabled. */ I915_WRITE(_CURABASE, base); cntl &= ~(CURSOR_FORMAT_MASK); /* XXX width must be 64, stride 256 => 0x00 << 28 */ cntl |= CURSOR_ENABLE | CURSOR_GAMMA_ENABLE | CURSOR_FORMAT_ARGB; } else cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); I915_WRITE(_CURACNTR, cntl); intel_crtc->cursor_visible = visible; } static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; bool visible = base != 0; if (intel_crtc->cursor_visible != visible) { uint32_t cntl = I915_READ(CURCNTR(pipe)); if (base) { cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; cntl |= pipe << 28; /* Connect to correct pipe */ } else { cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); cntl |= CURSOR_MODE_DISABLE; } I915_WRITE(CURCNTR(pipe), cntl); intel_crtc->cursor_visible = visible; } /* and commit changes on next vblank */ I915_WRITE(CURBASE(pipe), base); } static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; bool visible = base != 0; if (intel_crtc->cursor_visible != visible) { uint32_t cntl = I915_READ(CURCNTR_IVB(pipe)); if (base) { cntl &= ~CURSOR_MODE; cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; } else { cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); cntl |= CURSOR_MODE_DISABLE; } I915_WRITE(CURCNTR_IVB(pipe), cntl); intel_crtc->cursor_visible = visible; } /* and commit changes on next vblank */ I915_WRITE(CURBASE_IVB(pipe), base); } /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int x = intel_crtc->cursor_x; int y = intel_crtc->cursor_y; u32 base, pos; bool visible; pos = 0; if (on && crtc->enabled && crtc->fb) { base = intel_crtc->cursor_addr; if (x > (int) crtc->fb->width) base = 0; if (y > (int) crtc->fb->height) base = 0; } else base = 0; if (x < 0) { if (x + intel_crtc->cursor_width < 0) base = 0; pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; x = -x; } pos |= x << CURSOR_X_SHIFT; if (y < 0) { if (y + intel_crtc->cursor_height < 0) base = 0; pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; y = -y; } pos |= y << CURSOR_Y_SHIFT; visible = base != 0; if (!visible && !intel_crtc->cursor_visible) return; - if (IS_IVYBRIDGE(dev)) { + if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { I915_WRITE(CURPOS_IVB(pipe), pos); ivb_update_cursor(crtc, base); } else { I915_WRITE(CURPOS(pipe), pos); if (IS_845G(dev) || IS_I865G(dev)) i845_update_cursor(crtc, base); else i9xx_update_cursor(crtc, base); } - - if (visible) - intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj); } static int intel_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file, uint32_t handle, uint32_t width, uint32_t height) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_i915_gem_object *obj; uint32_t addr; int ret; DRM_DEBUG_KMS("\n"); /* if we want to turn off the cursor ignore width and height */ if (!handle) { DRM_DEBUG_KMS("cursor off\n"); addr = 0; obj = NULL; DRM_LOCK(dev); goto finish; } /* Currently we only support 64x64 cursors */ if (width != 64 || height != 64) { DRM_ERROR("we currently only support 64x64 cursors\n"); return -EINVAL; } obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); if (&obj->base == NULL) return -ENOENT; if (obj->base.size < width * height * 4) { DRM_ERROR("buffer is to small\n"); ret = -ENOMEM; goto fail; } /* we only need to pin inside GTT if cursor is non-phy */ DRM_LOCK(dev); if (!dev_priv->info->cursor_needs_physical) { if (obj->tiling_mode) { DRM_ERROR("cursor cannot be tiled\n"); ret = -EINVAL; goto fail_locked; } ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL); if (ret) { DRM_ERROR("failed to move cursor bo into the GTT\n"); goto fail_locked; } ret = i915_gem_object_put_fence(obj); if (ret) { DRM_ERROR("failed to release fence for cursor\n"); goto fail_unpin; } addr = obj->gtt_offset; } else { int align = IS_I830(dev) ? 16 * 1024 : 256; ret = i915_gem_attach_phys_object(dev, obj, (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, align); if (ret) { DRM_ERROR("failed to attach phys object\n"); goto fail_locked; } addr = obj->phys_obj->handle->busaddr; } if (IS_GEN2(dev)) I915_WRITE(CURSIZE, (height << 12) | width); finish: if (intel_crtc->cursor_bo) { if (dev_priv->info->cursor_needs_physical) { if (intel_crtc->cursor_bo != obj) i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); } else - i915_gem_object_unpin(intel_crtc->cursor_bo); + i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); drm_gem_object_unreference(&intel_crtc->cursor_bo->base); } DRM_UNLOCK(dev); intel_crtc->cursor_addr = addr; intel_crtc->cursor_bo = obj; intel_crtc->cursor_width = width; intel_crtc->cursor_height = height; intel_crtc_update_cursor(crtc, true); return 0; fail_unpin: - i915_gem_object_unpin(obj); + i915_gem_object_unpin_from_display_plane(obj); fail_locked: DRM_UNLOCK(dev); fail: drm_gem_object_unreference_unlocked(&obj->base); return ret; } static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); intel_crtc->cursor_x = x; intel_crtc->cursor_y = y; intel_crtc_update_cursor(crtc, true); return 0; } /** Sets the color ramps on behalf of RandR */ void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); intel_crtc->lut_r[regno] = red >> 8; intel_crtc->lut_g[regno] = green >> 8; intel_crtc->lut_b[regno] = blue >> 8; } void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, int regno) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); *red = intel_crtc->lut_r[regno] << 8; *green = intel_crtc->lut_g[regno] << 8; *blue = intel_crtc->lut_b[regno] << 8; } static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, uint32_t start, uint32_t size) { int end = (start + size > 256) ? 256 : start + size, i; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); for (i = start; i < end; i++) { intel_crtc->lut_r[i] = red[i] >> 8; intel_crtc->lut_g[i] = green[i] >> 8; intel_crtc->lut_b[i] = blue[i] >> 8; } intel_crtc_load_lut(crtc); } /** * Get a pipe with a simple mode set on it for doing load-based monitor * detection. * * It will be up to the load-detect code to adjust the pipe as appropriate for * its requirements. The pipe will be connected to no other encoders. * * Currently this code will only succeed if there is a pipe with no encoders * configured for it. In the future, it could choose to temporarily disable * some outputs to free up a pipe for its use. * * \return crtc, or NULL if no pipes are available. */ /* VESA 640x480x72Hz mode to set on the pipe */ static struct drm_display_mode load_detect_mode = { DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), }; static int intel_framebuffer_create(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_i915_gem_object *obj, struct drm_framebuffer **res) { struct intel_framebuffer *intel_fb; int ret; intel_fb = malloc(sizeof(*intel_fb), DRM_MEM_KMS, M_WAITOK | M_ZERO); ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); if (ret) { drm_gem_object_unreference_unlocked(&obj->base); free(intel_fb, DRM_MEM_KMS); return (ret); } *res = &intel_fb->base; return (0); } static u32 intel_framebuffer_pitch_for_width(int width, int bpp) { u32 pitch = howmany(width * bpp, 8); return roundup2(pitch, 64); } static u32 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) { u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); return roundup2(pitch * mode->vdisplay, PAGE_SIZE); } static int intel_framebuffer_create_for_mode(struct drm_device *dev, struct drm_display_mode *mode, int depth, int bpp, struct drm_framebuffer **res) { struct drm_i915_gem_object *obj; struct drm_mode_fb_cmd2 mode_cmd; obj = i915_gem_alloc_object(dev, intel_framebuffer_size_for_mode(mode, bpp)); if (obj == NULL) return (-ENOMEM); mode_cmd.width = mode->hdisplay; mode_cmd.height = mode->vdisplay; mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp); mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); return (intel_framebuffer_create(dev, &mode_cmd, obj, res)); } static int mode_fits_in_fbdev(struct drm_device *dev, struct drm_display_mode *mode, struct drm_framebuffer **res) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; struct drm_framebuffer *fb; if (dev_priv->fbdev == NULL) { *res = NULL; return (0); } obj = dev_priv->fbdev->ifb.obj; if (obj == NULL) { *res = NULL; return (0); } fb = &dev_priv->fbdev->ifb.base; if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, fb->bits_per_pixel)) { *res = NULL; return (0); } if (obj->base.size < mode->vdisplay * fb->pitches[0]) { *res = NULL; return (0); } *res = fb; return (0); } bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, struct drm_connector *connector, struct drm_display_mode *mode, struct intel_load_detect_pipe *old) { struct intel_crtc *intel_crtc; struct drm_crtc *possible_crtc; struct drm_encoder *encoder = &intel_encoder->base; struct drm_crtc *crtc = NULL; struct drm_device *dev = encoder->dev; struct drm_framebuffer *old_fb; int i = -1, r; DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", connector->base.id, drm_get_connector_name(connector), encoder->base.id, drm_get_encoder_name(encoder)); /* * Algorithm gets a little messy: * * - if the connector already has an assigned crtc, use it (but make * sure it's on first) * * - try to find the first unused crtc that can drive this connector, * and use that if we find one */ /* See if we already have a CRTC for this connector */ if (encoder->crtc) { crtc = encoder->crtc; intel_crtc = to_intel_crtc(crtc); old->dpms_mode = intel_crtc->dpms_mode; old->load_detect_temp = false; /* Make sure the crtc and connector are running */ if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { struct drm_encoder_helper_funcs *encoder_funcs; struct drm_crtc_helper_funcs *crtc_funcs; crtc_funcs = crtc->helper_private; crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); encoder_funcs = encoder->helper_private; encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); } return true; } /* Find an unused one (if possible) */ list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) { i++; if (!(encoder->possible_crtcs & (1 << i))) continue; if (!possible_crtc->enabled) { crtc = possible_crtc; break; } } /* * If we didn't find an unused CRTC, don't use any. */ if (!crtc) { DRM_DEBUG_KMS("no pipe available for load-detect\n"); return false; } encoder->crtc = crtc; connector->encoder = encoder; intel_crtc = to_intel_crtc(crtc); old->dpms_mode = intel_crtc->dpms_mode; old->load_detect_temp = true; old->release_fb = NULL; if (!mode) mode = &load_detect_mode; old_fb = crtc->fb; /* We need a framebuffer large enough to accommodate all accesses * that the plane may generate whilst we perform load detection. * We can not rely on the fbcon either being present (we get called * during its initialisation to detect all boot displays, or it may * not even exist) or that it is large enough to satisfy the * requested mode. */ r = mode_fits_in_fbdev(dev, mode, &crtc->fb); if (crtc->fb == NULL) { DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); r = intel_framebuffer_create_for_mode(dev, mode, 24, 32, &crtc->fb); old->release_fb = crtc->fb; } else DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); if (r != 0) { DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); crtc->fb = old_fb; return false; } if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) { DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); if (old->release_fb) old->release_fb->funcs->destroy(old->release_fb); crtc->fb = old_fb; return false; } /* let the connector get through one full cycle before testing */ intel_wait_for_vblank(dev, intel_crtc->pipe); return true; } void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, struct drm_connector *connector, struct intel_load_detect_pipe *old) { struct drm_encoder *encoder = &intel_encoder->base; struct drm_device *dev = encoder->dev; struct drm_crtc *crtc = encoder->crtc; struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", connector->base.id, drm_get_connector_name(connector), encoder->base.id, drm_get_encoder_name(encoder)); if (old->load_detect_temp) { connector->encoder = NULL; drm_helper_disable_unused_functions(dev); if (old->release_fb) old->release_fb->funcs->destroy(old->release_fb); return; } /* Switch crtc and encoder back off if necessary */ if (old->dpms_mode != DRM_MODE_DPMS_ON) { encoder_funcs->dpms(encoder, old->dpms_mode); crtc_funcs->dpms(crtc, old->dpms_mode); } } /* Returns the clock of the currently programmed mode of the given pipe. */ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; u32 dpll = I915_READ(DPLL(pipe)); u32 fp; intel_clock_t clock; if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) fp = I915_READ(FP0(pipe)); else fp = I915_READ(FP1(pipe)); clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; if (IS_PINEVIEW(dev)) { clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; } else { clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; } if (!IS_GEN2(dev)) { if (IS_PINEVIEW(dev)) clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); else clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> DPLL_FPA01_P1_POST_DIV_SHIFT); switch (dpll & DPLL_MODE_MASK) { case DPLLB_MODE_DAC_SERIAL: clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 5 : 10; break; case DPLLB_MODE_LVDS: clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 7 : 14; break; default: DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " "mode\n", (int)(dpll & DPLL_MODE_MASK)); return 0; } /* XXX: Handle the 100Mhz refclk */ intel_clock(dev, 96000, &clock); } else { bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); if (is_lvds) { clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> DPLL_FPA01_P1_POST_DIV_SHIFT); clock.p2 = 14; if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) { /* XXX: might not be 66MHz */ intel_clock(dev, 66000, &clock); } else intel_clock(dev, 48000, &clock); } else { if (dpll & PLL_P1_DIVIDE_BY_TWO) clock.p1 = 2; else { clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; } if (dpll & PLL_P2_DIVIDE_BY_4) clock.p2 = 4; else clock.p2 = 2; intel_clock(dev, 48000, &clock); } } /* XXX: It would be nice to validate the clocks, but we can't reuse * i830PllIsValid() because it relies on the xf86_config connector * configuration being accurate, which it isn't necessarily. */ return clock.dot; } /** Returns the currently programmed mode of the given pipe. */ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; struct drm_display_mode *mode; int htot = I915_READ(HTOTAL(pipe)); int hsync = I915_READ(HSYNC(pipe)); int vtot = I915_READ(VTOTAL(pipe)); int vsync = I915_READ(VSYNC(pipe)); mode = malloc(sizeof(*mode), DRM_MEM_KMS, M_WAITOK | M_ZERO); mode->clock = intel_crtc_clock_get(dev, crtc); mode->hdisplay = (htot & 0xffff) + 1; mode->htotal = ((htot & 0xffff0000) >> 16) + 1; mode->hsync_start = (hsync & 0xffff) + 1; mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; mode->vdisplay = (vtot & 0xffff) + 1; mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; mode->vsync_start = (vsync & 0xffff) + 1; mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; drm_mode_set_name(mode); - drm_mode_set_crtcinfo(mode, 0); return mode; } #define GPU_IDLE_TIMEOUT (500 /* ms */ * 1000 / hz) /* When this timer fires, we've been idle for awhile */ static void intel_gpu_idle_timer(void *arg) { struct drm_device *dev = arg; drm_i915_private_t *dev_priv = dev->dev_private; if (!list_empty(&dev_priv->mm.active_list)) { /* Still processing requests, so just re-arm the timer. */ callout_schedule(&dev_priv->idle_callout, GPU_IDLE_TIMEOUT); return; } dev_priv->busy = false; taskqueue_enqueue(dev_priv->tq, &dev_priv->idle_task); } #define CRTC_IDLE_TIMEOUT (1000 /* ms */ * 1000 / hz) static void intel_crtc_idle_timer(void *arg) { struct intel_crtc *intel_crtc = arg; struct drm_crtc *crtc = &intel_crtc->base; drm_i915_private_t *dev_priv = crtc->dev->dev_private; struct intel_framebuffer *intel_fb; intel_fb = to_intel_framebuffer(crtc->fb); if (intel_fb && intel_fb->obj->active) { /* The framebuffer is still being accessed by the GPU. */ callout_schedule(&intel_crtc->idle_callout, CRTC_IDLE_TIMEOUT); return; } intel_crtc->busy = false; taskqueue_enqueue(dev_priv->tq, &dev_priv->idle_task); } static void intel_increase_pllclock(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int dpll_reg = DPLL(pipe); int dpll; if (HAS_PCH_SPLIT(dev)) return; if (!dev_priv->lvds_downclock_avail) return; dpll = I915_READ(dpll_reg); if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { DRM_DEBUG_DRIVER("upclocking LVDS\n"); assert_panel_unlocked(dev_priv, pipe); dpll &= ~DISPLAY_RATE_SELECT_FPA1; I915_WRITE(dpll_reg, dpll); intel_wait_for_vblank(dev, pipe); dpll = I915_READ(dpll_reg); if (dpll & DISPLAY_RATE_SELECT_FPA1) DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); } /* Schedule downclock */ callout_reset(&intel_crtc->idle_callout, CRTC_IDLE_TIMEOUT, intel_crtc_idle_timer, intel_crtc); } static void intel_decrease_pllclock(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); if (HAS_PCH_SPLIT(dev)) return; if (!dev_priv->lvds_downclock_avail) return; /* * Since this is called by a timer, we should never get here in * the manual case. */ if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { int pipe = intel_crtc->pipe; int dpll_reg = DPLL(pipe); u32 dpll; DRM_DEBUG_DRIVER("downclocking LVDS\n"); assert_panel_unlocked(dev_priv, pipe); dpll = I915_READ(dpll_reg); dpll |= DISPLAY_RATE_SELECT_FPA1; I915_WRITE(dpll_reg, dpll); intel_wait_for_vblank(dev, pipe); dpll = I915_READ(dpll_reg); if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); } } /** * intel_idle_update - adjust clocks for idleness * @work: work struct * * Either the GPU or display (or both) went idle. Check the busy status * here and adjust the CRTC and GPU clocks as necessary. */ static void intel_idle_update(void *arg, int pending) { drm_i915_private_t *dev_priv = arg; struct drm_device *dev = dev_priv->dev; struct drm_crtc *crtc; struct intel_crtc *intel_crtc; if (!i915_powersave) return; DRM_LOCK(dev); i915_update_gfx_val(dev_priv); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { /* Skip inactive CRTCs */ if (!crtc->fb) continue; intel_crtc = to_intel_crtc(crtc); if (!intel_crtc->busy) intel_decrease_pllclock(crtc); } DRM_UNLOCK(dev); } /** * intel_mark_busy - mark the GPU and possibly the display busy * @dev: drm device * @obj: object we're operating on * * Callers can use this function to indicate that the GPU is busy processing * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout * buffer), we'll also mark the display as busy, so we know to increase its * clock frequency. */ void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_crtc *crtc = NULL; struct intel_framebuffer *intel_fb; struct intel_crtc *intel_crtc; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return; - if (!dev_priv->busy) + if (!dev_priv->busy) { + intel_sanitize_pm(dev); dev_priv->busy = true; - else + } else callout_reset(&dev_priv->idle_callout, GPU_IDLE_TIMEOUT, intel_gpu_idle_timer, dev); + if (obj == NULL) + return; + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { if (!crtc->fb) continue; intel_crtc = to_intel_crtc(crtc); intel_fb = to_intel_framebuffer(crtc->fb); if (intel_fb->obj == obj) { if (!intel_crtc->busy) { /* Non-busy -> busy, upclock */ intel_increase_pllclock(crtc); intel_crtc->busy = true; } else { /* Busy -> busy, put off timer */ callout_reset(&intel_crtc->idle_callout, CRTC_IDLE_TIMEOUT, intel_crtc_idle_timer, intel_crtc); } } } } static void intel_crtc_destroy(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_unpin_work *work; mtx_lock(&dev->event_lock); work = intel_crtc->unpin_work; intel_crtc->unpin_work = NULL; mtx_unlock(&dev->event_lock); if (work) { taskqueue_cancel(dev_priv->tq, &work->task, NULL); taskqueue_drain(dev_priv->tq, &work->task); free(work, DRM_MEM_KMS); } drm_crtc_cleanup(crtc); free(intel_crtc, DRM_MEM_KMS); } static void intel_unpin_work_fn(void *arg, int pending) { struct intel_unpin_work *work = arg; struct drm_device *dev; dev = work->dev; DRM_LOCK(dev); intel_unpin_fb_obj(work->old_fb_obj); drm_gem_object_unreference(&work->pending_flip_obj->base); drm_gem_object_unreference(&work->old_fb_obj->base); intel_update_fbc(work->dev); DRM_UNLOCK(dev); free(work, DRM_MEM_KMS); } static void do_intel_finish_page_flip(struct drm_device *dev, struct drm_crtc *crtc) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_unpin_work *work; struct drm_i915_gem_object *obj; struct drm_pending_vblank_event *e; struct timeval tnow, tvbl; /* Ignore early vblank irqs */ if (intel_crtc == NULL) return; microtime(&tnow); mtx_lock(&dev->event_lock); work = intel_crtc->unpin_work; if (work == NULL || !work->pending) { mtx_unlock(&dev->event_lock); return; } intel_crtc->unpin_work = NULL; if (work->event) { e = work->event; e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl); /* Called before vblank count and timestamps have * been updated for the vblank interval of flip * completion? Need to increment vblank count and * add one videorefresh duration to returned timestamp * to account for this. We assume this happened if we * get called over 0.9 frame durations after the last * timestamped vblank. * * This calculation can not be used with vrefresh rates * below 5Hz (10Hz to be on the safe side) without * promoting to 64 integers. */ if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) > 9 * crtc->framedur_ns) { e->event.sequence++; tvbl = ns_to_timeval(timeval_to_ns(&tvbl) + crtc->framedur_ns); } e->event.tv_sec = tvbl.tv_sec; e->event.tv_usec = tvbl.tv_usec; list_add_tail(&e->base.link, &e->base.file_priv->event_list); drm_event_wakeup(&e->base); } drm_vblank_put(dev, intel_crtc->pipe); obj = work->old_fb_obj; atomic_clear_int(&obj->pending_flip, 1 << intel_crtc->plane); if (atomic_load_acq_int(&obj->pending_flip) == 0) wakeup(&obj->pending_flip); mtx_unlock(&dev->event_lock); taskqueue_enqueue(dev_priv->tq, &work->task); CTR2(KTR_DRM, "i915_flip_complete %d %p", intel_crtc->plane, work->pending_flip_obj); } void intel_finish_page_flip(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; do_intel_finish_page_flip(dev, crtc); } void intel_finish_page_flip_plane(struct drm_device *dev, int plane) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; do_intel_finish_page_flip(dev, crtc); } void intel_prepare_page_flip(struct drm_device *dev, int plane) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); mtx_lock(&dev->event_lock); if (intel_crtc->unpin_work) { if ((++intel_crtc->unpin_work->pending) > 1) DRM_ERROR("Prepared flip multiple times\n"); } else { DRM_DEBUG("preparing flip with no unpin work?\n"); } mtx_unlock(&dev->event_lock); } static int intel_gen2_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); unsigned long offset; u32 flip_mask; + struct intel_ring_buffer *ring = &dev_priv->rings[RCS]; int ret; - ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); + ret = intel_pin_and_fence_fb_obj(dev, obj, ring); if (ret) - goto out; + goto err; /* Offset into the new buffer for cases of shared fbs between CRTCs */ offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; - ret = BEGIN_LP_RING(6); + ret = intel_ring_begin(ring, 6); if (ret) - goto out; + goto err_unpin; /* Can't queue multiple flips, so wait for the previous * one to finish before executing the next. */ if (intel_crtc->plane) flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; else flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; - OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); - OUT_RING(MI_NOOP); - OUT_RING(MI_DISPLAY_FLIP | - MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); - OUT_RING(fb->pitches[0]); - OUT_RING(obj->gtt_offset + offset); - OUT_RING(0); /* aux display base address, unused */ - ADVANCE_LP_RING(); -out: + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_DISPLAY_FLIP | + MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); + intel_ring_emit(ring, fb->pitches[0]); + intel_ring_emit(ring, obj->gtt_offset + offset); + intel_ring_emit(ring, 0); /* aux display base address, unused */ + intel_ring_advance(ring); + return 0; + +err_unpin: + intel_unpin_fb_obj(obj); +err: return ret; } static int intel_gen3_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); unsigned long offset; u32 flip_mask; + struct intel_ring_buffer *ring = &dev_priv->rings[RCS]; int ret; - ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); + ret = intel_pin_and_fence_fb_obj(dev, obj, ring); if (ret) - goto out; + goto err; /* Offset into the new buffer for cases of shared fbs between CRTCs */ offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; - ret = BEGIN_LP_RING(6); + ret = intel_ring_begin(ring, 6); if (ret) - goto out; + goto err_unpin; if (intel_crtc->plane) flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; else flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; - OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); - OUT_RING(MI_NOOP); - OUT_RING(MI_DISPLAY_FLIP_I915 | - MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); - OUT_RING(fb->pitches[0]); - OUT_RING(obj->gtt_offset + offset); - OUT_RING(MI_NOOP); + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | + MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); + intel_ring_emit(ring, fb->pitches[0]); + intel_ring_emit(ring, obj->gtt_offset + offset); + intel_ring_emit(ring, MI_NOOP); - ADVANCE_LP_RING(); -out: + intel_ring_advance(ring); + return 0; + +err_unpin: + intel_unpin_fb_obj(obj); +err: return ret; } static int intel_gen4_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t pf, pipesrc; + struct intel_ring_buffer *ring = &dev_priv->rings[RCS]; int ret; - ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); + ret = intel_pin_and_fence_fb_obj(dev, obj, ring); if (ret) - goto out; + goto err; - ret = BEGIN_LP_RING(4); + ret = intel_ring_begin(ring, 4); if (ret) - goto out; + goto err_unpin; /* i965+ uses the linear or tiled offsets from the * Display Registers (which do not change across a page-flip) * so we need only reprogram the base address. */ - OUT_RING(MI_DISPLAY_FLIP | - MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); - OUT_RING(fb->pitches[0]); - OUT_RING(obj->gtt_offset | obj->tiling_mode); + intel_ring_emit(ring, MI_DISPLAY_FLIP | + MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); + intel_ring_emit(ring, fb->pitches[0]); + intel_ring_emit(ring, obj->gtt_offset | obj->tiling_mode); /* XXX Enabling the panel-fitter across page-flip is so far * untested on non-native modes, so ignore it for now. * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; */ pf = 0; pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; - OUT_RING(pf | pipesrc); - ADVANCE_LP_RING(); -out: + intel_ring_emit(ring, pf | pipesrc); + intel_ring_advance(ring); + return 0; + +err_unpin: + intel_unpin_fb_obj(obj); +err: return ret; } static int intel_gen6_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_ring_buffer *ring = &dev_priv->rings[RCS]; uint32_t pf, pipesrc; int ret; - ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); + ret = intel_pin_and_fence_fb_obj(dev, obj, ring); if (ret) - goto out; + goto err; - ret = BEGIN_LP_RING(4); + ret = intel_ring_begin(ring, 4); if (ret) - goto out; + goto err_unpin; - OUT_RING(MI_DISPLAY_FLIP | - MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); - OUT_RING(fb->pitches[0] | obj->tiling_mode); - OUT_RING(obj->gtt_offset); + intel_ring_emit(ring, MI_DISPLAY_FLIP | + MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); + intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); + intel_ring_emit(ring, obj->gtt_offset); /* Contrary to the suggestions in the documentation, * "Enable Panel Fitter" does not seem to be required when page * flipping with a non-native mode, and worse causes a normal * modeset to fail. * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; */ pf = 0; pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; - OUT_RING(pf | pipesrc); - ADVANCE_LP_RING(); -out: + intel_ring_emit(ring, pf | pipesrc); + intel_ring_advance(ring); + return 0; + +err_unpin: + intel_unpin_fb_obj(obj); +err: return ret; } /* * On gen7 we currently use the blit ring because (in early silicon at least) * the render ring doesn't give us interrpts for page flip completion, which * means clients will hang after the first flip is queued. Fortunately the * blit ring generates interrupts properly, so use it instead. */ static int intel_gen7_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_ring_buffer *ring = &dev_priv->rings[BCS]; int ret; ret = intel_pin_and_fence_fb_obj(dev, obj, ring); if (ret) - goto out; + goto err; ret = intel_ring_begin(ring, 4); if (ret) - goto out; + goto err_unpin; intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19)); intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); intel_ring_emit(ring, (obj->gtt_offset)); intel_ring_emit(ring, (MI_NOOP)); intel_ring_advance(ring); -out: + return 0; + +err_unpin: + intel_unpin_fb_obj(obj); +err: return ret; } static int intel_default_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj) { return -ENODEV; } static int intel_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_framebuffer *intel_fb; struct drm_i915_gem_object *obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_unpin_work *work; int ret; work = malloc(sizeof *work, DRM_MEM_KMS, M_WAITOK | M_ZERO); work->event = event; work->dev = crtc->dev; intel_fb = to_intel_framebuffer(crtc->fb); work->old_fb_obj = intel_fb->obj; TASK_INIT(&work->task, 0, intel_unpin_work_fn, work); ret = drm_vblank_get(dev, intel_crtc->pipe); if (ret) goto free_work; /* We borrow the event spin lock for protecting unpin_work */ mtx_lock(&dev->event_lock); if (intel_crtc->unpin_work) { mtx_unlock(&dev->event_lock); free(work, DRM_MEM_KMS); drm_vblank_put(dev, intel_crtc->pipe); DRM_DEBUG("flip queue: crtc already busy\n"); return -EBUSY; } intel_crtc->unpin_work = work; mtx_unlock(&dev->event_lock); intel_fb = to_intel_framebuffer(fb); obj = intel_fb->obj; DRM_LOCK(dev); /* Reference the objects for the scheduled work. */ drm_gem_object_reference(&work->old_fb_obj->base); drm_gem_object_reference(&obj->base); crtc->fb = fb; work->pending_flip_obj = obj; work->enable_stall_check = true; /* Block clients from rendering to the new back buffer until * the flip occurs and the object is no longer visible. */ atomic_set_int(&work->old_fb_obj->pending_flip, 1 << intel_crtc->plane); ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); if (ret) goto cleanup_pending; intel_disable_fbc(dev); + intel_mark_busy(dev, obj); DRM_UNLOCK(dev); CTR2(KTR_DRM, "i915_flip_request %d %p", intel_crtc->plane, obj); return 0; cleanup_pending: atomic_clear_int(&work->old_fb_obj->pending_flip, 1 << intel_crtc->plane); drm_gem_object_unreference(&work->old_fb_obj->base); drm_gem_object_unreference(&obj->base); DRM_UNLOCK(dev); mtx_lock(&dev->event_lock); intel_crtc->unpin_work = NULL; mtx_unlock(&dev->event_lock); drm_vblank_put(dev, intel_crtc->pipe); free_work: free(work, DRM_MEM_KMS); return ret; } static void intel_sanitize_modesetting(struct drm_device *dev, int pipe, int plane) { struct drm_i915_private *dev_priv = dev->dev_private; u32 reg, val; + int i; /* Clear any frame start delays used for debugging left by the BIOS */ - for_each_pipe(pipe) { - reg = PIPECONF(pipe); + for_each_pipe(i) { + reg = PIPECONF(i); I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); } if (HAS_PCH_SPLIT(dev)) return; /* Who knows what state these registers were left in by the BIOS or * grub? * * If we leave the registers in a conflicting state (e.g. with the * display plane reading from the other pipe than the one we intend * to use) then when we attempt to teardown the active mode, we will * not disable the pipes and planes in the correct order -- leaving * a plane reading from a disabled pipe and possibly leading to * undefined behaviour. */ reg = DSPCNTR(plane); val = I915_READ(reg); if ((val & DISPLAY_PLANE_ENABLE) == 0) return; if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe) return; /* This display plane is active and attached to the other CPU pipe. */ pipe = !pipe; /* Disable the plane and wait for it to stop reading from the pipe. */ intel_disable_plane(dev_priv, plane, pipe); intel_disable_pipe(dev_priv, pipe); } static void intel_crtc_reset(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); /* Reset flags back to the 'unknown' status so that they * will be correctly set on the initial modeset. */ intel_crtc->dpms_mode = -1; /* We need to fix up any BIOS configuration that conflicts with * our expectations. */ intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); } static struct drm_crtc_helper_funcs intel_helper_funcs = { .dpms = intel_crtc_dpms, .mode_fixup = intel_crtc_mode_fixup, .mode_set = intel_crtc_mode_set, .mode_set_base = intel_pipe_set_base, .mode_set_base_atomic = intel_pipe_set_base_atomic, .load_lut = intel_crtc_load_lut, .disable = intel_crtc_disable, }; static const struct drm_crtc_funcs intel_crtc_funcs = { .reset = intel_crtc_reset, .cursor_set = intel_crtc_cursor_set, .cursor_move = intel_crtc_cursor_move, .gamma_set = intel_crtc_gamma_set, .set_config = drm_crtc_helper_set_config, .destroy = intel_crtc_destroy, .page_flip = intel_crtc_page_flip, }; +static void intel_pch_pll_init(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int i; + + if (dev_priv->num_pch_pll == 0) { + DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n"); + return; + } + + for (i = 0; i < dev_priv->num_pch_pll; i++) { + dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i); + dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i); + dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i); + } +} + static void intel_crtc_init(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc; int i; intel_crtc = malloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), DRM_MEM_KMS, M_WAITOK | M_ZERO); drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); for (i = 0; i < 256; i++) { intel_crtc->lut_r[i] = i; intel_crtc->lut_g[i] = i; intel_crtc->lut_b[i] = i; } /* Swap pipes & planes for FBC on pre-965 */ intel_crtc->pipe = pipe; intel_crtc->plane = pipe; if (IS_MOBILE(dev) && IS_GEN3(dev)) { DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); intel_crtc->plane = !pipe; } KASSERT(pipe < DRM_ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) && dev_priv->plane_to_crtc_mapping[intel_crtc->plane] == NULL, ("plane_to_crtc is already initialized")); dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; intel_crtc_reset(&intel_crtc->base); intel_crtc->active = true; /* force the pipe off on setup_init_config */ intel_crtc->bpp = 24; /* default for pre-Ironlake */ if (HAS_PCH_SPLIT(dev)) { - if (pipe == 2 && IS_IVYBRIDGE(dev)) - intel_crtc->no_pll = true; intel_helper_funcs.prepare = ironlake_crtc_prepare; intel_helper_funcs.commit = ironlake_crtc_commit; } else { intel_helper_funcs.prepare = i9xx_crtc_prepare; intel_helper_funcs.commit = i9xx_crtc_commit; } drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); intel_crtc->busy = false; callout_init(&intel_crtc->idle_callout, CALLOUT_MPSAFE); } int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct drm_file *file) { - drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; struct drm_mode_object *drmmode_obj; struct intel_crtc *crtc; - if (!dev_priv) { - DRM_ERROR("called with no initialization\n"); - return -EINVAL; - } + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id, DRM_MODE_OBJECT_CRTC); if (!drmmode_obj) { DRM_ERROR("no such CRTC id\n"); return -EINVAL; } crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); pipe_from_crtc_id->pipe = crtc->pipe; return 0; } static int intel_encoder_clones(struct drm_device *dev, int type_mask) { struct intel_encoder *encoder; int index_mask = 0; int entry = 0; list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { if (type_mask & encoder->clone_mask) index_mask |= (1 << entry); entry++; } return index_mask; } static bool has_edp_a(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; if (!IS_MOBILE(dev)) return false; if ((I915_READ(DP_A) & DP_DETECTED) == 0) return false; if (IS_GEN5(dev) && (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE)) return false; return true; } static void intel_setup_outputs(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *encoder; bool dpd_is_edp = false; bool has_lvds; has_lvds = intel_lvds_init(dev); if (!has_lvds && !HAS_PCH_SPLIT(dev)) { /* disable the panel fitter on everything but LVDS */ I915_WRITE(PFIT_CONTROL, 0); } if (HAS_PCH_SPLIT(dev)) { dpd_is_edp = intel_dpd_is_edp(dev); if (has_edp_a(dev)) intel_dp_init(dev, DP_A); if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) intel_dp_init(dev, PCH_DP_D); } intel_crt_init(dev); - if (HAS_PCH_SPLIT(dev)) { + if (IS_HASWELL(dev)) { int found; + /* Haswell uses DDI functions to detect digital outputs */ + found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED; + /* DDI A only supports eDP */ + if (found) + intel_ddi_init(dev, PORT_A); + + /* DDI B, C and D detection is indicated by the SFUSE_STRAP + * register */ + found = I915_READ(SFUSE_STRAP); + + if (found & SFUSE_STRAP_DDIB_DETECTED) + intel_ddi_init(dev, PORT_B); + if (found & SFUSE_STRAP_DDIC_DETECTED) + intel_ddi_init(dev, PORT_C); + if (found & SFUSE_STRAP_DDID_DETECTED) + intel_ddi_init(dev, PORT_D); + } else if (HAS_PCH_SPLIT(dev)) { + int found; + DRM_DEBUG_KMS( "HDMIB %d PCH_DP_B %d HDMIC %d HDMID %d PCH_DP_C %d PCH_DP_D %d LVDS %d\n", (I915_READ(HDMIB) & PORT_DETECTED) != 0, (I915_READ(PCH_DP_B) & DP_DETECTED) != 0, (I915_READ(HDMIC) & PORT_DETECTED) != 0, (I915_READ(HDMID) & PORT_DETECTED) != 0, (I915_READ(PCH_DP_C) & DP_DETECTED) != 0, (I915_READ(PCH_DP_D) & DP_DETECTED) != 0, (I915_READ(PCH_LVDS) & LVDS_DETECTED) != 0); if (I915_READ(HDMIB) & PORT_DETECTED) { /* PCH SDVOB multiplex with HDMIB */ - found = intel_sdvo_init(dev, PCH_SDVOB); + found = intel_sdvo_init(dev, PCH_SDVOB, true); if (!found) intel_hdmi_init(dev, HDMIB); if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) intel_dp_init(dev, PCH_DP_B); } if (I915_READ(HDMIC) & PORT_DETECTED) intel_hdmi_init(dev, HDMIC); if (I915_READ(HDMID) & PORT_DETECTED) intel_hdmi_init(dev, HDMID); if (I915_READ(PCH_DP_C) & DP_DETECTED) intel_dp_init(dev, PCH_DP_C); if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) intel_dp_init(dev, PCH_DP_D); } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { bool found = false; if (I915_READ(SDVOB) & SDVO_DETECTED) { DRM_DEBUG_KMS("probing SDVOB\n"); - found = intel_sdvo_init(dev, SDVOB); + found = intel_sdvo_init(dev, SDVOB, true); if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); intel_hdmi_init(dev, SDVOB); } if (!found && SUPPORTS_INTEGRATED_DP(dev)) { DRM_DEBUG_KMS("probing DP_B\n"); intel_dp_init(dev, DP_B); } } /* Before G4X SDVOC doesn't have its own detect register */ if (I915_READ(SDVOB) & SDVO_DETECTED) { DRM_DEBUG_KMS("probing SDVOC\n"); - found = intel_sdvo_init(dev, SDVOC); + found = intel_sdvo_init(dev, SDVOC, false); } if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { if (SUPPORTS_INTEGRATED_HDMI(dev)) { DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); intel_hdmi_init(dev, SDVOC); } if (SUPPORTS_INTEGRATED_DP(dev)) { DRM_DEBUG_KMS("probing DP_C\n"); intel_dp_init(dev, DP_C); } } if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) { DRM_DEBUG_KMS("probing DP_D\n"); intel_dp_init(dev, DP_D); } } else if (IS_GEN2(dev)) { #if 1 KIB_NOTYET(); #else intel_dvo_init(dev); #endif } if (SUPPORTS_TV(dev)) intel_tv_init(dev); list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { encoder->base.possible_crtcs = encoder->crtc_mask; encoder->base.possible_clones = intel_encoder_clones(dev, encoder->clone_mask); } /* disable all the possible outputs/crtcs before entering KMS mode */ drm_helper_disable_unused_functions(dev); if (HAS_PCH_SPLIT(dev)) ironlake_init_pch_refclk(dev); } static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); drm_framebuffer_cleanup(fb); drm_gem_object_unreference_unlocked(&intel_fb->obj->base); free(intel_fb, DRM_MEM_KMS); } static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, struct drm_file *file, unsigned int *handle) { struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct drm_i915_gem_object *obj = intel_fb->obj; return drm_gem_handle_create(file, &obj->base, handle); } static const struct drm_framebuffer_funcs intel_fb_funcs = { .destroy = intel_user_framebuffer_destroy, .create_handle = intel_user_framebuffer_create_handle, }; int intel_framebuffer_init(struct drm_device *dev, struct intel_framebuffer *intel_fb, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_i915_gem_object *obj) { int ret; if (obj->tiling_mode == I915_TILING_Y) return -EINVAL; if (mode_cmd->pitches[0] & 63) return -EINVAL; switch (mode_cmd->pixel_format) { case DRM_FORMAT_RGB332: case DRM_FORMAT_RGB565: case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ARGB8888: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_ARGB2101010: /* RGB formats are common across chipsets */ break; case DRM_FORMAT_YUYV: case DRM_FORMAT_UYVY: case DRM_FORMAT_YVYU: case DRM_FORMAT_VYUY: break; default: DRM_DEBUG_KMS("unsupported pixel format %u\n", mode_cmd->pixel_format); return -EINVAL; } ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); if (ret) { DRM_ERROR("framebuffer init failed %d\n", ret); return ret; } drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); intel_fb->obj = obj; return 0; } static int intel_user_framebuffer_create(struct drm_device *dev, struct drm_file *filp, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_framebuffer **res) { struct drm_i915_gem_object *obj; obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handles[0])); if (&obj->base == NULL) return (-ENOENT); return (intel_framebuffer_create(dev, mode_cmd, obj, res)); } static const struct drm_mode_config_funcs intel_mode_funcs = { .fb_create = intel_user_framebuffer_create, .output_poll_changed = intel_fb_output_poll_changed, }; -static struct drm_i915_gem_object * -intel_alloc_context_page(struct drm_device *dev) -{ - struct drm_i915_gem_object *ctx; - int ret; - - DRM_LOCK_ASSERT(dev); - - ctx = i915_gem_alloc_object(dev, 4096); - if (!ctx) { - DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); - return NULL; - } - - ret = i915_gem_object_pin(ctx, 4096, true); - if (ret) { - DRM_ERROR("failed to pin power context: %d\n", ret); - goto err_unref; - } - - ret = i915_gem_object_set_to_gtt_domain(ctx, 1); - if (ret) { - DRM_ERROR("failed to set-domain on power context: %d\n", ret); - goto err_unpin; - } - - return ctx; - -err_unpin: - i915_gem_object_unpin(ctx); -err_unref: - drm_gem_object_unreference(&ctx->base); - DRM_UNLOCK(dev); - return NULL; -} - -bool ironlake_set_drps(struct drm_device *dev, u8 val) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u16 rgvswctl; - - rgvswctl = I915_READ16(MEMSWCTL); - if (rgvswctl & MEMCTL_CMD_STS) { - DRM_DEBUG("gpu busy, RCS change rejected\n"); - return false; /* still busy with another command */ - } - - rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | - (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; - I915_WRITE16(MEMSWCTL, rgvswctl); - POSTING_READ16(MEMSWCTL); - - rgvswctl |= MEMCTL_CMD_STS; - I915_WRITE16(MEMSWCTL, rgvswctl); - - return true; -} - -void ironlake_enable_drps(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 rgvmodectl = I915_READ(MEMMODECTL); - u8 fmax, fmin, fstart, vstart; - - /* Enable temp reporting */ - I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); - I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); - - /* 100ms RC evaluation intervals */ - I915_WRITE(RCUPEI, 100000); - I915_WRITE(RCDNEI, 100000); - - /* Set max/min thresholds to 90ms and 80ms respectively */ - I915_WRITE(RCBMAXAVG, 90000); - I915_WRITE(RCBMINAVG, 80000); - - I915_WRITE(MEMIHYST, 1); - - /* Set up min, max, and cur for interrupt handling */ - fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; - fmin = (rgvmodectl & MEMMODE_FMIN_MASK); - fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> - MEMMODE_FSTART_SHIFT; - - vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> - PXVFREQ_PX_SHIFT; - - dev_priv->fmax = fmax; /* IPS callback will increase this */ - dev_priv->fstart = fstart; - - dev_priv->max_delay = fstart; - dev_priv->min_delay = fmin; - dev_priv->cur_delay = fstart; - - DRM_DEBUG("fmax: %d, fmin: %d, fstart: %d\n", - fmax, fmin, fstart); - - I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); - - /* - * Interrupts will be enabled in ironlake_irq_postinstall - */ - - I915_WRITE(VIDSTART, vstart); - POSTING_READ(VIDSTART); - - rgvmodectl |= MEMMODE_SWMODE_EN; - I915_WRITE(MEMMODECTL, rgvmodectl); - - if (_intel_wait_for(dev, - (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10, - 1, "915per")) - DRM_ERROR("stuck trying to change perf mode\n"); - pause("915dsp", 1); - - ironlake_set_drps(dev, fstart); - - dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + - I915_READ(0x112e0); - dev_priv->last_time1 = jiffies_to_msecs(jiffies); - dev_priv->last_count2 = I915_READ(0x112f4); - nanotime(&dev_priv->last_time2); -} - -void ironlake_disable_drps(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u16 rgvswctl = I915_READ16(MEMSWCTL); - - /* Ack interrupts, disable EFC interrupt */ - I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); - I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG); - I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT); - I915_WRITE(DEIIR, DE_PCU_EVENT); - I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); - - /* Go back to the starting frequency */ - ironlake_set_drps(dev, dev_priv->fstart); - pause("915dsp", 1); - rgvswctl |= MEMCTL_CMD_STS; - I915_WRITE(MEMSWCTL, rgvswctl); - pause("915dsp", 1); - -} - -void gen6_set_rps(struct drm_device *dev, u8 val) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 swreq; - - swreq = (val & 0x3ff) << 25; - I915_WRITE(GEN6_RPNSWREQ, swreq); -} - -void gen6_disable_rps(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - I915_WRITE(GEN6_RPNSWREQ, 1 << 31); - I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); - I915_WRITE(GEN6_PMIER, 0); - /* Complete PM interrupt masking here doesn't race with the rps work - * item again unmasking PM interrupts because that is using a different - * register (PMIMR) to mask PM interrupts. The only risk is in leaving - * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ - - mtx_lock(&dev_priv->rps_lock); - dev_priv->pm_iir = 0; - mtx_unlock(&dev_priv->rps_lock); - - I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); -} - -static unsigned long intel_pxfreq(u32 vidfreq) -{ - unsigned long freq; - int div = (vidfreq & 0x3f0000) >> 16; - int post = (vidfreq & 0x3000) >> 12; - int pre = (vidfreq & 0x7); - - if (!pre) - return 0; - - freq = ((div * 133333) / ((1<dev_private; - u32 lcfuse; - u8 pxw[16]; - int i; - - /* Disable to program */ - I915_WRITE(ECR, 0); - POSTING_READ(ECR); - - /* Program energy weights for various events */ - I915_WRITE(SDEW, 0x15040d00); - I915_WRITE(CSIEW0, 0x007f0000); - I915_WRITE(CSIEW1, 0x1e220004); - I915_WRITE(CSIEW2, 0x04000004); - - for (i = 0; i < 5; i++) - I915_WRITE(PEW + (i * 4), 0); - for (i = 0; i < 3; i++) - I915_WRITE(DEW + (i * 4), 0); - - /* Program P-state weights to account for frequency power adjustment */ - for (i = 0; i < 16; i++) { - u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4)); - unsigned long freq = intel_pxfreq(pxvidfreq); - unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> - PXVFREQ_PX_SHIFT; - unsigned long val; - - val = vid * vid; - val *= (freq / 1000); - val *= 255; - val /= (127*127*900); - if (val > 0xff) - DRM_ERROR("bad pxval: %ld\n", val); - pxw[i] = val; - } - /* Render standby states get 0 weight */ - pxw[14] = 0; - pxw[15] = 0; - - for (i = 0; i < 4; i++) { - u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | - (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); - I915_WRITE(PXW + (i * 4), val); - } - - /* Adjust magic regs to magic values (more experimental results) */ - I915_WRITE(OGW0, 0); - I915_WRITE(OGW1, 0); - I915_WRITE(EG0, 0x00007f00); - I915_WRITE(EG1, 0x0000000e); - I915_WRITE(EG2, 0x000e0000); - I915_WRITE(EG3, 0x68000300); - I915_WRITE(EG4, 0x42000000); - I915_WRITE(EG5, 0x00140031); - I915_WRITE(EG6, 0); - I915_WRITE(EG7, 0); - - for (i = 0; i < 8; i++) - I915_WRITE(PXWL + (i * 4), 0); - - /* Enable PMON + select events */ - I915_WRITE(ECR, 0x80000019); - - lcfuse = I915_READ(LCFUSE02); - - dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); -} - -static int intel_enable_rc6(struct drm_device *dev) -{ - /* - * Respect the kernel parameter if it is set - */ - if (i915_enable_rc6 >= 0) - return i915_enable_rc6; - - /* - * Disable RC6 on Ironlake - */ - if (INTEL_INFO(dev)->gen == 5) - return 0; - - /* - * Enable rc6 on Sandybridge if DMA remapping is disabled - */ - if (INTEL_INFO(dev)->gen == 6) { - DRM_DEBUG_DRIVER( - "Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n", - intel_iommu_enabled ? "true" : "false", - !intel_iommu_enabled ? "en" : "dis"); - return (intel_iommu_enabled ? 0 : INTEL_RC6_ENABLE); - } - DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n"); - return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); -} - -void gen6_enable_rps(struct drm_i915_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); - u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); - u32 pcu_mbox, rc6_mask = 0; - u32 gtfifodbg; - int cur_freq, min_freq, max_freq; - int rc6_mode; - int i; - - /* Here begins a magic sequence of register writes to enable - * auto-downclocking. - * - * Perhaps there might be some value in exposing these to - * userspace... - */ - I915_WRITE(GEN6_RC_STATE, 0); - DRM_LOCK(dev); - - /* Clear the DBG now so we don't confuse earlier errors */ - if ((gtfifodbg = I915_READ(GTFIFODBG))) { - DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); - I915_WRITE(GTFIFODBG, gtfifodbg); - } - - gen6_gt_force_wake_get(dev_priv); - - /* disable the counters and set deterministic thresholds */ - I915_WRITE(GEN6_RC_CONTROL, 0); - - I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); - I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); - I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); - I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); - I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); - - for (i = 0; i < I915_NUM_RINGS; i++) - I915_WRITE(RING_MAX_IDLE(dev_priv->rings[i].mmio_base), 10); - - I915_WRITE(GEN6_RC_SLEEP, 0); - I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); - I915_WRITE(GEN6_RC6_THRESHOLD, 50000); - I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); - I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ - - rc6_mode = intel_enable_rc6(dev_priv->dev); - if (rc6_mode & INTEL_RC6_ENABLE) - rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; - - if (rc6_mode & INTEL_RC6p_ENABLE) - rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; - - if (rc6_mode & INTEL_RC6pp_ENABLE) - rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; - - DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", - (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off", - (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off", - (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off"); - - I915_WRITE(GEN6_RC_CONTROL, - rc6_mask | - GEN6_RC_CTL_EI_MODE(1) | - GEN6_RC_CTL_HW_ENABLE); - - I915_WRITE(GEN6_RPNSWREQ, - GEN6_FREQUENCY(10) | - GEN6_OFFSET(0) | - GEN6_AGGRESSIVE_TURBO); - I915_WRITE(GEN6_RC_VIDEO_FREQ, - GEN6_FREQUENCY(12)); - - I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); - I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, - 18 << 24 | - 6 << 16); - I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); - I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); - I915_WRITE(GEN6_RP_UP_EI, 100000); - I915_WRITE(GEN6_RP_DOWN_EI, 5000000); - I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); - I915_WRITE(GEN6_RP_CONTROL, - GEN6_RP_MEDIA_TURBO | - GEN6_RP_MEDIA_HW_MODE | - GEN6_RP_MEDIA_IS_GFX | - GEN6_RP_ENABLE | - GEN6_RP_UP_BUSY_AVG | - GEN6_RP_DOWN_IDLE_CONT); - - if (_intel_wait_for(dev, - (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500, - 1, "915pr1")) - DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); - - I915_WRITE(GEN6_PCODE_DATA, 0); - I915_WRITE(GEN6_PCODE_MAILBOX, - GEN6_PCODE_READY | - GEN6_PCODE_WRITE_MIN_FREQ_TABLE); - if (_intel_wait_for(dev, - (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500, - 1, "915pr2")) - DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); - - min_freq = (rp_state_cap & 0xff0000) >> 16; - max_freq = rp_state_cap & 0xff; - cur_freq = (gt_perf_status & 0xff00) >> 8; - - /* Check for overclock support */ - if (_intel_wait_for(dev, - (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500, - 1, "915pr3")) - DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); - I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS); - pcu_mbox = I915_READ(GEN6_PCODE_DATA); - if (_intel_wait_for(dev, - (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500, - 1, "915pr4")) - DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); - if (pcu_mbox & (1<<31)) { /* OC supported */ - max_freq = pcu_mbox & 0xff; - DRM_DEBUG("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); - } - - /* In units of 100MHz */ - dev_priv->max_delay = max_freq; - dev_priv->min_delay = min_freq; - dev_priv->cur_delay = cur_freq; - - /* requires MSI enabled */ - I915_WRITE(GEN6_PMIER, - GEN6_PM_MBOX_EVENT | - GEN6_PM_THERMAL_EVENT | - GEN6_PM_RP_DOWN_TIMEOUT | - GEN6_PM_RP_UP_THRESHOLD | - GEN6_PM_RP_DOWN_THRESHOLD | - GEN6_PM_RP_UP_EI_EXPIRED | - GEN6_PM_RP_DOWN_EI_EXPIRED); - mtx_lock(&dev_priv->rps_lock); - if (dev_priv->pm_iir != 0) - printf("pm_iir %x\n", dev_priv->pm_iir); - I915_WRITE(GEN6_PMIMR, 0); - mtx_unlock(&dev_priv->rps_lock); - /* enable all PM interrupts */ - I915_WRITE(GEN6_PMINTRMSK, 0); - - gen6_gt_force_wake_put(dev_priv); - DRM_UNLOCK(dev); -} - -void gen6_update_ring_freq(struct drm_i915_private *dev_priv) -{ - struct drm_device *dev; - int min_freq = 15; - int gpu_freq, ia_freq, max_ia_freq; - int scaling_factor = 180; - uint64_t tsc_freq; - - dev = dev_priv->dev; -#if 0 - max_ia_freq = cpufreq_quick_get_max(0); - /* - * Default to measured freq if none found, PCU will ensure we don't go - * over - */ - if (!max_ia_freq) - max_ia_freq = tsc_freq; - - /* Convert from Hz to MHz */ - max_ia_freq /= 1000; -#else - tsc_freq = atomic_load_acq_64(&tsc_freq); - max_ia_freq = tsc_freq / 1000 / 1000; -#endif - - DRM_LOCK(dev); - - /* - * For each potential GPU frequency, load a ring frequency we'd like - * to use for memory access. We do this by specifying the IA frequency - * the PCU should use as a reference to determine the ring frequency. - */ - for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay; - gpu_freq--) { - int diff = dev_priv->max_delay - gpu_freq; - int d; - - /* - * For GPU frequencies less than 750MHz, just use the lowest - * ring freq. - */ - if (gpu_freq < min_freq) - ia_freq = 800; - else - ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); - d = 100; - ia_freq = (ia_freq + d / 2) / d; - - I915_WRITE(GEN6_PCODE_DATA, - (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) | - gpu_freq); - I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | - GEN6_PCODE_WRITE_MIN_FREQ_TABLE); - if (_intel_wait_for(dev, - (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, - 10, 1, "915frq")) { - DRM_ERROR("pcode write of freq table timed out\n"); - continue; - } - } - - DRM_UNLOCK(dev); -} - -static void ironlake_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; - - /* Required for FBC */ - dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | - DPFCRUNIT_CLOCK_GATE_DISABLE | - DPFDUNIT_CLOCK_GATE_DISABLE; - /* Required for CxSR */ - dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; - - I915_WRITE(PCH_3DCGDIS0, - MARIUNIT_CLOCK_GATE_DISABLE | - SVSMUNIT_CLOCK_GATE_DISABLE); - I915_WRITE(PCH_3DCGDIS1, - VFMUNIT_CLOCK_GATE_DISABLE); - - I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); - - /* - * According to the spec the following bits should be set in - * order to enable memory self-refresh - * The bit 22/21 of 0x42004 - * The bit 5 of 0x42020 - * The bit 15 of 0x45000 - */ - I915_WRITE(ILK_DISPLAY_CHICKEN2, - (I915_READ(ILK_DISPLAY_CHICKEN2) | - ILK_DPARB_GATE | ILK_VSDPFD_FULL)); - I915_WRITE(ILK_DSPCLK_GATE, - (I915_READ(ILK_DSPCLK_GATE) | - ILK_DPARB_CLK_GATE)); - I915_WRITE(DISP_ARB_CTL, - (I915_READ(DISP_ARB_CTL) | - DISP_FBC_WM_DIS)); - I915_WRITE(WM3_LP_ILK, 0); - I915_WRITE(WM2_LP_ILK, 0); - I915_WRITE(WM1_LP_ILK, 0); - - /* - * Based on the document from hardware guys the following bits - * should be set unconditionally in order to enable FBC. - * The bit 22 of 0x42000 - * The bit 22 of 0x42004 - * The bit 7,8,9 of 0x42020. - */ - if (IS_IRONLAKE_M(dev)) { - I915_WRITE(ILK_DISPLAY_CHICKEN1, - I915_READ(ILK_DISPLAY_CHICKEN1) | - ILK_FBCQ_DIS); - I915_WRITE(ILK_DISPLAY_CHICKEN2, - I915_READ(ILK_DISPLAY_CHICKEN2) | - ILK_DPARB_GATE); - I915_WRITE(ILK_DSPCLK_GATE, - I915_READ(ILK_DSPCLK_GATE) | - ILK_DPFC_DIS1 | - ILK_DPFC_DIS2 | - ILK_CLK_FBC); - } - - I915_WRITE(ILK_DISPLAY_CHICKEN2, - I915_READ(ILK_DISPLAY_CHICKEN2) | - ILK_ELPIN_409_SELECT); - I915_WRITE(_3D_CHICKEN2, - _3D_CHICKEN2_WM_READ_PIPELINED << 16 | - _3D_CHICKEN2_WM_READ_PIPELINED); -} - -static void gen6_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int pipe; - uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; - - I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); - - I915_WRITE(ILK_DISPLAY_CHICKEN2, - I915_READ(ILK_DISPLAY_CHICKEN2) | - ILK_ELPIN_409_SELECT); - - I915_WRITE(WM3_LP_ILK, 0); - I915_WRITE(WM2_LP_ILK, 0); - I915_WRITE(WM1_LP_ILK, 0); - - I915_WRITE(GEN6_UCGCTL1, - I915_READ(GEN6_UCGCTL1) | - GEN6_BLBUNIT_CLOCK_GATE_DISABLE); - - /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock - * gating disable must be set. Failure to set it results in - * flickering pixels due to Z write ordering failures after - * some amount of runtime in the Mesa "fire" demo, and Unigine - * Sanctuary and Tropics, and apparently anything else with - * alpha test or pixel discard. - * - * According to the spec, bit 11 (RCCUNIT) must also be set, - * but we didn't debug actual testcases to find it out. - */ - I915_WRITE(GEN6_UCGCTL2, - GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | - GEN6_RCCUNIT_CLOCK_GATE_DISABLE); - - /* - * According to the spec the following bits should be - * set in order to enable memory self-refresh and fbc: - * The bit21 and bit22 of 0x42000 - * The bit21 and bit22 of 0x42004 - * The bit5 and bit7 of 0x42020 - * The bit14 of 0x70180 - * The bit14 of 0x71180 - */ - I915_WRITE(ILK_DISPLAY_CHICKEN1, - I915_READ(ILK_DISPLAY_CHICKEN1) | - ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); - I915_WRITE(ILK_DISPLAY_CHICKEN2, - I915_READ(ILK_DISPLAY_CHICKEN2) | - ILK_DPARB_GATE | ILK_VSDPFD_FULL); - I915_WRITE(ILK_DSPCLK_GATE, - I915_READ(ILK_DSPCLK_GATE) | - ILK_DPARB_CLK_GATE | - ILK_DPFD_CLK_GATE); - - for_each_pipe(pipe) { - I915_WRITE(DSPCNTR(pipe), - I915_READ(DSPCNTR(pipe)) | - DISPPLANE_TRICKLE_FEED_DISABLE); - intel_flush_display_plane(dev_priv, pipe); - } -} - -static void ivybridge_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int pipe; - uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; - - I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); - - I915_WRITE(WM3_LP_ILK, 0); - I915_WRITE(WM2_LP_ILK, 0); - I915_WRITE(WM1_LP_ILK, 0); - - /* According to the spec, bit 13 (RCZUNIT) must be set on IVB. - * This implements the WaDisableRCZUnitClockGating workaround. - */ - I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); - - I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); - - I915_WRITE(IVB_CHICKEN3, - CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | - CHICKEN3_DGMG_DONE_FIX_DISABLE); - - /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ - I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, - GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); - - /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ - I915_WRITE(GEN7_L3CNTLREG1, - GEN7_WA_FOR_GEN7_L3_CONTROL); - I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, - GEN7_WA_L3_CHICKEN_MODE); - - /* This is required by WaCatErrorRejectionIssue */ - I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, - I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | - GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); - - for_each_pipe(pipe) { - I915_WRITE(DSPCNTR(pipe), - I915_READ(DSPCNTR(pipe)) | - DISPPLANE_TRICKLE_FEED_DISABLE); - intel_flush_display_plane(dev_priv, pipe); - } -} - -static void g4x_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t dspclk_gate; - - I915_WRITE(RENCLK_GATE_D1, 0); - I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | - GS_UNIT_CLOCK_GATE_DISABLE | - CL_UNIT_CLOCK_GATE_DISABLE); - I915_WRITE(RAMCLK_GATE_D, 0); - dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | - OVRUNIT_CLOCK_GATE_DISABLE | - OVCUNIT_CLOCK_GATE_DISABLE; - if (IS_GM45(dev)) - dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; - I915_WRITE(DSPCLK_GATE_D, dspclk_gate); -} - -static void crestline_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); - I915_WRITE(RENCLK_GATE_D2, 0); - I915_WRITE(DSPCLK_GATE_D, 0); - I915_WRITE(RAMCLK_GATE_D, 0); - I915_WRITE16(DEUC, 0); -} - -static void broadwater_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | - I965_RCC_CLOCK_GATE_DISABLE | - I965_RCPB_CLOCK_GATE_DISABLE | - I965_ISC_CLOCK_GATE_DISABLE | - I965_FBC_CLOCK_GATE_DISABLE); - I915_WRITE(RENCLK_GATE_D2, 0); -} - -static void gen3_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 dstate = I915_READ(D_STATE); - - dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | - DSTATE_DOT_CLOCK_GATING; - I915_WRITE(D_STATE, dstate); -} - -static void i85x_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); -} - -static void i830_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); -} - -static void ibx_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - /* - * On Ibex Peak and Cougar Point, we need to disable clock - * gating for the panel power sequencer or it will fail to - * start up when no ports are active. - */ - I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); -} - -static void cpt_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int pipe; - - /* - * On Ibex Peak and Cougar Point, we need to disable clock - * gating for the panel power sequencer or it will fail to - * start up when no ports are active. - */ - I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); - I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | - DPLS_EDP_PPS_FIX_DIS); - /* Without this, mode sets may fail silently on FDI */ - for_each_pipe(pipe) - I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS); -} - -static void ironlake_teardown_rc6(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (dev_priv->renderctx) { - i915_gem_object_unpin(dev_priv->renderctx); - drm_gem_object_unreference(&dev_priv->renderctx->base); - dev_priv->renderctx = NULL; - } - - if (dev_priv->pwrctx) { - i915_gem_object_unpin(dev_priv->pwrctx); - drm_gem_object_unreference(&dev_priv->pwrctx->base); - dev_priv->pwrctx = NULL; - } -} - -static void ironlake_disable_rc6(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (I915_READ(PWRCTXA)) { - /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ - I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); - (void)_intel_wait_for(dev, - ((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), - 50, 1, "915pro"); - - I915_WRITE(PWRCTXA, 0); - POSTING_READ(PWRCTXA); - - I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); - POSTING_READ(RSTDBYCTL); - } - - ironlake_teardown_rc6(dev); -} - -static int ironlake_setup_rc6(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (dev_priv->renderctx == NULL) - dev_priv->renderctx = intel_alloc_context_page(dev); - if (!dev_priv->renderctx) - return -ENOMEM; - - if (dev_priv->pwrctx == NULL) - dev_priv->pwrctx = intel_alloc_context_page(dev); - if (!dev_priv->pwrctx) { - ironlake_teardown_rc6(dev); - return -ENOMEM; - } - - return 0; -} - -void ironlake_enable_rc6(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - - /* rc6 disabled by default due to repeated reports of hanging during - * boot and resume. - */ - if (!intel_enable_rc6(dev)) - return; - - DRM_LOCK(dev); - ret = ironlake_setup_rc6(dev); - if (ret) { - DRM_UNLOCK(dev); - return; - } - - /* - * GPU can automatically power down the render unit if given a page - * to save state. - */ - ret = BEGIN_LP_RING(6); - if (ret) { - ironlake_teardown_rc6(dev); - DRM_UNLOCK(dev); - return; - } - - OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); - OUT_RING(MI_SET_CONTEXT); - OUT_RING(dev_priv->renderctx->gtt_offset | - MI_MM_SPACE_GTT | - MI_SAVE_EXT_STATE_EN | - MI_RESTORE_EXT_STATE_EN | - MI_RESTORE_INHIBIT); - OUT_RING(MI_SUSPEND_FLUSH); - OUT_RING(MI_NOOP); - OUT_RING(MI_FLUSH); - ADVANCE_LP_RING(); - - /* - * Wait for the command parser to advance past MI_SET_CONTEXT. The HW - * does an implicit flush, combined with MI_FLUSH above, it should be - * safe to assume that renderctx is valid - */ - ret = intel_wait_ring_idle(LP_RING(dev_priv)); - if (ret) { - DRM_ERROR("failed to enable ironlake power power savings\n"); - ironlake_teardown_rc6(dev); - DRM_UNLOCK(dev); - return; - } - - I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); - I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); - DRM_UNLOCK(dev); -} - -void intel_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - dev_priv->display.init_clock_gating(dev); - - if (dev_priv->display.init_pch_clock_gating) - dev_priv->display.init_pch_clock_gating(dev); -} - /* Set up chip specific display functions */ static void intel_init_display(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; /* We always want a DPMS function */ if (HAS_PCH_SPLIT(dev)) { dev_priv->display.dpms = ironlake_crtc_dpms; dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; + dev_priv->display.off = ironlake_crtc_off; dev_priv->display.update_plane = ironlake_update_plane; } else { dev_priv->display.dpms = i9xx_crtc_dpms; dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; + dev_priv->display.off = i9xx_crtc_off; dev_priv->display.update_plane = i9xx_update_plane; } - if (I915_HAS_FBC(dev)) { - if (HAS_PCH_SPLIT(dev)) { - dev_priv->display.fbc_enabled = ironlake_fbc_enabled; - dev_priv->display.enable_fbc = ironlake_enable_fbc; - dev_priv->display.disable_fbc = ironlake_disable_fbc; - } else if (IS_GM45(dev)) { - dev_priv->display.fbc_enabled = g4x_fbc_enabled; - dev_priv->display.enable_fbc = g4x_enable_fbc; - dev_priv->display.disable_fbc = g4x_disable_fbc; - } else if (IS_CRESTLINE(dev)) { - dev_priv->display.fbc_enabled = i8xx_fbc_enabled; - dev_priv->display.enable_fbc = i8xx_enable_fbc; - dev_priv->display.disable_fbc = i8xx_disable_fbc; - } - /* 855GM needs testing */ - } - /* Returns the core display clock speed */ - if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev))) + if (IS_VALLEYVIEW(dev)) dev_priv->display.get_display_clock_speed = + valleyview_get_display_clock_speed; + else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev))) + dev_priv->display.get_display_clock_speed = i945_get_display_clock_speed; else if (IS_I915G(dev)) dev_priv->display.get_display_clock_speed = i915_get_display_clock_speed; else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev)) dev_priv->display.get_display_clock_speed = i9xx_misc_get_display_clock_speed; else if (IS_I915GM(dev)) dev_priv->display.get_display_clock_speed = i915gm_get_display_clock_speed; else if (IS_I865G(dev)) dev_priv->display.get_display_clock_speed = i865_get_display_clock_speed; else if (IS_I85X(dev)) dev_priv->display.get_display_clock_speed = i855_get_display_clock_speed; else /* 852, 830 */ dev_priv->display.get_display_clock_speed = i830_get_display_clock_speed; - /* For FIFO watermark updates */ if (HAS_PCH_SPLIT(dev)) { - dev_priv->display.force_wake_get = __gen6_gt_force_wake_get; - dev_priv->display.force_wake_put = __gen6_gt_force_wake_put; - - /* IVB configs may use multi-threaded forcewake */ - if (IS_IVYBRIDGE(dev)) { - u32 ecobus; - - /* A small trick here - if the bios hasn't configured MT forcewake, - * and if the device is in RC6, then force_wake_mt_get will not wake - * the device and the ECOBUS read will return zero. Which will be - * (correctly) interpreted by the test below as MT forcewake being - * disabled. - */ - DRM_LOCK(dev); - __gen6_gt_force_wake_mt_get(dev_priv); - ecobus = I915_READ_NOTRACE(ECOBUS); - __gen6_gt_force_wake_mt_put(dev_priv); - DRM_UNLOCK(dev); - - if (ecobus & FORCEWAKE_MT_ENABLE) { - DRM_DEBUG_KMS("Using MT version of forcewake\n"); - dev_priv->display.force_wake_get = - __gen6_gt_force_wake_mt_get; - dev_priv->display.force_wake_put = - __gen6_gt_force_wake_mt_put; - } - } - - if (HAS_PCH_IBX(dev)) - dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating; - else if (HAS_PCH_CPT(dev)) - dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating; - if (IS_GEN5(dev)) { - if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) - dev_priv->display.update_wm = ironlake_update_wm; - else { - DRM_DEBUG_KMS("Failed to get proper latency. " - "Disable CxSR\n"); - dev_priv->display.update_wm = NULL; - } dev_priv->display.fdi_link_train = ironlake_fdi_link_train; - dev_priv->display.init_clock_gating = ironlake_init_clock_gating; dev_priv->display.write_eld = ironlake_write_eld; } else if (IS_GEN6(dev)) { - if (SNB_READ_WM0_LATENCY()) { - dev_priv->display.update_wm = sandybridge_update_wm; - dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; - } else { - DRM_DEBUG_KMS("Failed to read display plane latency. " - "Disable CxSR\n"); - dev_priv->display.update_wm = NULL; - } dev_priv->display.fdi_link_train = gen6_fdi_link_train; - dev_priv->display.init_clock_gating = gen6_init_clock_gating; dev_priv->display.write_eld = ironlake_write_eld; } else if (IS_IVYBRIDGE(dev)) { /* FIXME: detect B0+ stepping and use auto training */ dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; - if (SNB_READ_WM0_LATENCY()) { - dev_priv->display.update_wm = sandybridge_update_wm; - dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; - } else { - DRM_DEBUG_KMS("Failed to read display plane latency. " - "Disable CxSR\n"); - dev_priv->display.update_wm = NULL; - } - dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; dev_priv->display.write_eld = ironlake_write_eld; + } else if (IS_HASWELL(dev)) { + dev_priv->display.fdi_link_train = hsw_fdi_link_train; + dev_priv->display.write_eld = ironlake_write_eld; } else dev_priv->display.update_wm = NULL; - } else if (IS_PINEVIEW(dev)) { - if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), - dev_priv->is_ddr3, - dev_priv->fsb_freq, - dev_priv->mem_freq)) { - DRM_INFO("failed to find known CxSR latency " - "(found ddr%s fsb freq %d, mem freq %d), " - "disabling CxSR\n", - (dev_priv->is_ddr3 == 1) ? "3" : "2", - dev_priv->fsb_freq, dev_priv->mem_freq); - /* Disable CxSR and never update its watermark again */ - pineview_disable_cxsr(dev); - dev_priv->display.update_wm = NULL; - } else - dev_priv->display.update_wm = pineview_update_wm; - dev_priv->display.init_clock_gating = gen3_init_clock_gating; + } else if (IS_VALLEYVIEW(dev)) { + dev_priv->display.force_wake_get = vlv_force_wake_get; + dev_priv->display.force_wake_put = vlv_force_wake_put; } else if (IS_G4X(dev)) { dev_priv->display.write_eld = g4x_write_eld; - dev_priv->display.update_wm = g4x_update_wm; - dev_priv->display.init_clock_gating = g4x_init_clock_gating; - } else if (IS_GEN4(dev)) { - dev_priv->display.update_wm = i965_update_wm; - if (IS_CRESTLINE(dev)) - dev_priv->display.init_clock_gating = crestline_init_clock_gating; - else if (IS_BROADWATER(dev)) - dev_priv->display.init_clock_gating = broadwater_init_clock_gating; - } else if (IS_GEN3(dev)) { - dev_priv->display.update_wm = i9xx_update_wm; - dev_priv->display.get_fifo_size = i9xx_get_fifo_size; - dev_priv->display.init_clock_gating = gen3_init_clock_gating; - } else if (IS_I865G(dev)) { - dev_priv->display.update_wm = i830_update_wm; - dev_priv->display.init_clock_gating = i85x_init_clock_gating; - dev_priv->display.get_fifo_size = i830_get_fifo_size; - } else if (IS_I85X(dev)) { - dev_priv->display.update_wm = i9xx_update_wm; - dev_priv->display.get_fifo_size = i85x_get_fifo_size; - dev_priv->display.init_clock_gating = i85x_init_clock_gating; - } else { - dev_priv->display.update_wm = i830_update_wm; - dev_priv->display.init_clock_gating = i830_init_clock_gating; - if (IS_845G(dev)) - dev_priv->display.get_fifo_size = i845_get_fifo_size; - else - dev_priv->display.get_fifo_size = i830_get_fifo_size; } /* Default just returns -ENODEV to indicate unsupported */ dev_priv->display.queue_flip = intel_default_queue_flip; switch (INTEL_INFO(dev)->gen) { case 2: dev_priv->display.queue_flip = intel_gen2_queue_flip; break; case 3: dev_priv->display.queue_flip = intel_gen3_queue_flip; break; case 4: case 5: dev_priv->display.queue_flip = intel_gen4_queue_flip; break; case 6: dev_priv->display.queue_flip = intel_gen6_queue_flip; break; case 7: dev_priv->display.queue_flip = intel_gen7_queue_flip; break; } } /* * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, * resume, or other times. This quirk makes sure that's the case for * affected systems. */ static void quirk_pipea_force(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->quirks |= QUIRK_PIPEA_FORCE; - DRM_DEBUG("applying pipe a force quirk\n"); + DRM_INFO("applying pipe a force quirk\n"); } /* * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason */ static void quirk_ssc_force_disable(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; + DRM_INFO("applying lvds SSC disable quirk\n"); } +/* + * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight + * brightness value + */ +static void quirk_invert_brightness(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; + DRM_INFO("applying inverted panel brightness quirk\n"); +} + struct intel_quirk { int device; int subsystem_vendor; int subsystem_device; void (*hook)(struct drm_device *dev); }; #define PCI_ANY_ID (~0u) -struct intel_quirk intel_quirks[] = { +static struct intel_quirk intel_quirks[] = { /* HP Mini needs pipe A force quirk (LP: #322104) */ { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, /* Thinkpad R31 needs pipe A force quirk */ { 0x3577, 0x1014, 0x0505, quirk_pipea_force }, /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */ { 0x3577, 0x1014, 0x0513, quirk_pipea_force }, /* ThinkPad X40 needs pipe A force quirk */ /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, /* 855 & before need to leave pipe A & dpll A up */ { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, /* Lenovo U160 cannot use SSC on LVDS */ { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, /* Sony Vaio Y cannot use SSC on LVDS */ { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, + + /* Acer Aspire 5734Z must invert backlight brightness */ + { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, }; static void intel_init_quirks(struct drm_device *dev) { struct intel_quirk *q; device_t d; int i; d = dev->device; for (i = 0; i < DRM_ARRAY_SIZE(intel_quirks); i++) { q = &intel_quirks[i]; if (pci_get_device(d) == q->device && (pci_get_subvendor(d) == q->subsystem_vendor || q->subsystem_vendor == PCI_ANY_ID) && (pci_get_subdevice(d) == q->subsystem_device || q->subsystem_device == PCI_ANY_ID)) q->hook(dev); } } /* Disable the VGA plane that we never use */ static void i915_disable_vga(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u8 sr1; u32 vga_reg; if (HAS_PCH_SPLIT(dev)) vga_reg = CPU_VGACNTRL; else vga_reg = VGACNTRL; #if 0 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); #endif - outb(VGA_SR_INDEX, 1); + outb(VGA_SR_INDEX, SR01); sr1 = inb(VGA_SR_DATA); outb(VGA_SR_DATA, sr1 | 1 << 5); #if 0 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); #endif DELAY(300); I915_WRITE(vga_reg, VGA_DISP_DISABLE); POSTING_READ(vga_reg); } +static void ivb_pch_pwm_override(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* + * IVB has CPU eDP backlight regs too, set things up to let the + * PCH regs control the backlight + */ + I915_WRITE(BLC_PWM_CPU_CTL2, PWM_ENABLE); + I915_WRITE(BLC_PWM_CPU_CTL, 0); + I915_WRITE(BLC_PWM_PCH_CTL1, PWM_ENABLE | (1<<30)); +} + +void intel_modeset_init_hw(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + intel_init_clock_gating(dev); + + if (IS_IRONLAKE_M(dev)) { + ironlake_enable_drps(dev); + ironlake_enable_rc6(dev); + intel_init_emon(dev); + } + + if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { + gen6_enable_rps(dev_priv); + gen6_update_ring_freq(dev_priv); + } + + if (IS_IVYBRIDGE(dev)) + ivb_pch_pwm_override(dev); +} + void intel_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i, ret; drm_mode_config_init(dev); dev->mode_config.min_width = 0; dev->mode_config.min_height = 0; dev->mode_config.preferred_depth = 24; dev->mode_config.prefer_shadow = 1; - dev->mode_config.funcs = __DECONST(struct drm_mode_config_funcs *, - &intel_mode_funcs); + dev->mode_config.funcs = &intel_mode_funcs; intel_init_quirks(dev); + intel_init_pm(dev); + + intel_prepare_ddi(dev); + intel_init_display(dev); if (IS_GEN2(dev)) { dev->mode_config.max_width = 2048; dev->mode_config.max_height = 2048; } else if (IS_GEN3(dev)) { dev->mode_config.max_width = 4096; dev->mode_config.max_height = 4096; } else { dev->mode_config.max_width = 8192; dev->mode_config.max_height = 8192; } dev->mode_config.fb_base = dev->agp->base; DRM_DEBUG_KMS("%d display pipe%s available.\n", dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); for (i = 0; i < dev_priv->num_pipe; i++) { intel_crtc_init(dev, i); ret = intel_plane_init(dev, i); if (ret) DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret); } + intel_pch_pll_init(dev); + /* Just disable it once at startup */ i915_disable_vga(dev); intel_setup_outputs(dev); - intel_init_clock_gating(dev); - - if (IS_IRONLAKE_M(dev)) { - ironlake_enable_drps(dev); - intel_init_emon(dev); - } - - if (IS_GEN6(dev)) { - gen6_enable_rps(dev_priv); - gen6_update_ring_freq(dev_priv); - } - TASK_INIT(&dev_priv->idle_task, 0, intel_idle_update, dev_priv); callout_init(&dev_priv->idle_callout, CALLOUT_MPSAFE); } void intel_modeset_gem_init(struct drm_device *dev) { - if (IS_IRONLAKE_M(dev)) - ironlake_enable_rc6(dev); + intel_modeset_init_hw(dev); intel_setup_overlay(dev); } void intel_modeset_cleanup(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; struct intel_crtc *intel_crtc; drm_kms_helper_poll_fini(dev); DRM_LOCK(dev); #if 0 intel_unregister_dsm_handler(); #endif list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { /* Skip inactive CRTCs */ if (!crtc->fb) continue; intel_crtc = to_intel_crtc(crtc); intel_increase_pllclock(crtc); } intel_disable_fbc(dev); if (IS_IRONLAKE_M(dev)) ironlake_disable_drps(dev); - if (IS_GEN6(dev)) + if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) gen6_disable_rps(dev); if (IS_IRONLAKE_M(dev)) ironlake_disable_rc6(dev); + + if (IS_VALLEYVIEW(dev)) + vlv_init_dpio(dev); /* Disable the irq before mode object teardown, for the irq might * enqueue unpin/hotplug work. */ drm_irq_uninstall(dev); DRM_UNLOCK(dev); if (taskqueue_cancel(dev_priv->tq, &dev_priv->hotplug_task, NULL)) taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task); if (taskqueue_cancel(dev_priv->tq, &dev_priv->rps_task, NULL)) taskqueue_drain(dev_priv->tq, &dev_priv->rps_task); /* Shut off idle work before the crtcs get freed. */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { intel_crtc = to_intel_crtc(crtc); callout_drain(&intel_crtc->idle_callout); } callout_drain(&dev_priv->idle_callout); if (taskqueue_cancel(dev_priv->tq, &dev_priv->idle_task, NULL)) taskqueue_drain(dev_priv->tq, &dev_priv->idle_task); drm_mode_config_cleanup(dev); } /* * Return which encoder is currently attached for connector. */ struct drm_encoder *intel_best_encoder(struct drm_connector *connector) { return &intel_attached_encoder(connector)->base; } void intel_connector_attach_encoder(struct intel_connector *connector, struct intel_encoder *encoder) { connector->encoder = encoder; drm_mode_connector_attach_encoder(&connector->base, &encoder->base); } /* * set vga decode state - true == enable VGA decode */ int intel_modeset_vga_set_state(struct drm_device *dev, bool state) { struct drm_i915_private *dev_priv; device_t bridge_dev; u16 gmch_ctrl; dev_priv = dev->dev_private; bridge_dev = intel_gtt_get_bridge_device(); gmch_ctrl = pci_read_config(bridge_dev, INTEL_GMCH_CTRL, 2); if (state) gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; else gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; pci_write_config(bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl, 2); return (0); } struct intel_display_error_state { struct intel_cursor_error_state { u32 control; u32 position; u32 base; u32 size; } cursor[2]; struct intel_pipe_error_state { u32 conf; u32 source; u32 htotal; u32 hblank; u32 hsync; u32 vtotal; u32 vblank; u32 vsync; } pipe[2]; struct intel_plane_error_state { u32 control; u32 stride; u32 size; u32 pos; u32 addr; u32 surface; u32 tile_offset; } plane[2]; }; struct intel_display_error_state * intel_display_capture_error_state(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_display_error_state *error; int i; error = malloc(sizeof(*error), DRM_MEM_KMS, M_NOWAIT); if (error == NULL) return NULL; for (i = 0; i < 2; i++) { error->cursor[i].control = I915_READ(CURCNTR(i)); error->cursor[i].position = I915_READ(CURPOS(i)); error->cursor[i].base = I915_READ(CURBASE(i)); error->plane[i].control = I915_READ(DSPCNTR(i)); error->plane[i].stride = I915_READ(DSPSTRIDE(i)); error->plane[i].size = I915_READ(DSPSIZE(i)); error->plane[i].pos = I915_READ(DSPPOS(i)); error->plane[i].addr = I915_READ(DSPADDR(i)); if (INTEL_INFO(dev)->gen >= 4) { error->plane[i].surface = I915_READ(DSPSURF(i)); error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); } error->pipe[i].conf = I915_READ(PIPECONF(i)); error->pipe[i].source = I915_READ(PIPESRC(i)); error->pipe[i].htotal = I915_READ(HTOTAL(i)); error->pipe[i].hblank = I915_READ(HBLANK(i)); error->pipe[i].hsync = I915_READ(HSYNC(i)); error->pipe[i].vtotal = I915_READ(VTOTAL(i)); error->pipe[i].vblank = I915_READ(VBLANK(i)); error->pipe[i].vsync = I915_READ(VSYNC(i)); } return error; } void intel_display_print_error_state(struct sbuf *m, struct drm_device *dev, struct intel_display_error_state *error) { int i; for (i = 0; i < 2; i++) { sbuf_printf(m, "Pipe [%d]:\n", i); sbuf_printf(m, " CONF: %08x\n", error->pipe[i].conf); sbuf_printf(m, " SRC: %08x\n", error->pipe[i].source); sbuf_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); sbuf_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); sbuf_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); sbuf_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); sbuf_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); sbuf_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); sbuf_printf(m, "Plane [%d]:\n", i); sbuf_printf(m, " CNTR: %08x\n", error->plane[i].control); sbuf_printf(m, " STRIDE: %08x\n", error->plane[i].stride); sbuf_printf(m, " SIZE: %08x\n", error->plane[i].size); sbuf_printf(m, " POS: %08x\n", error->plane[i].pos); sbuf_printf(m, " ADDR: %08x\n", error->plane[i].addr); if (INTEL_INFO(dev)->gen >= 4) { sbuf_printf(m, " SURF: %08x\n", error->plane[i].surface); sbuf_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); } sbuf_printf(m, "Cursor [%d]:\n", i); sbuf_printf(m, " CNTR: %08x\n", error->cursor[i].control); sbuf_printf(m, " POS: %08x\n", error->cursor[i].position); sbuf_printf(m, " BASE: %08x\n", error->cursor[i].base); } } Index: head/sys/dev/drm2/i915/intel_dp.c =================================================================== --- head/sys/dev/drm2/i915/intel_dp.c (revision 277486) +++ head/sys/dev/drm2/i915/intel_dp.c (revision 277487) @@ -1,2554 +1,2593 @@ /* * Copyright © 2008 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Authors: * Keith Packard * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #define DP_RECEIVER_CAP_SIZE 0xf #define DP_LINK_STATUS_SIZE 6 #define DP_LINK_CHECK_TIMEOUT (10 * 1000) #define DP_LINK_CONFIGURATION_SIZE 9 struct intel_dp { struct intel_encoder base; uint32_t output_reg; uint32_t DP; uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; bool has_audio; enum hdmi_force_audio force_audio; uint32_t color_range; int dpms_mode; uint8_t link_bw; uint8_t lane_count; uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; device_t dp_iic_bus; device_t adapter; bool is_pch_edp; uint8_t train_set[4]; int panel_power_up_delay; int panel_power_down_delay; int panel_power_cycle_delay; int backlight_on_delay; int backlight_off_delay; struct drm_display_mode *panel_fixed_mode; /* for eDP */ struct timeout_task panel_vdd_task; bool want_panel_vdd; }; /** * is_edp - is the given port attached to an eDP panel (either CPU or PCH) * @intel_dp: DP struct * * If a CPU or PCH DP output is attached to an eDP panel, this function * will return true, and false otherwise. */ static bool is_edp(struct intel_dp *intel_dp) { return intel_dp->base.type == INTEL_OUTPUT_EDP; } /** * is_pch_edp - is the port on the PCH and attached to an eDP panel? * @intel_dp: DP struct * * Returns true if the given DP struct corresponds to a PCH DP port attached * to an eDP panel, false otherwise. Helpful for determining whether we * may need FDI resources for a given DP output or not. */ static bool is_pch_edp(struct intel_dp *intel_dp) { return intel_dp->is_pch_edp; } /** * is_cpu_edp - is the port on the CPU and attached to an eDP panel? * @intel_dp: DP struct * * Returns true if the given DP struct corresponds to a CPU eDP port. */ static bool is_cpu_edp(struct intel_dp *intel_dp) { return is_edp(intel_dp) && !is_pch_edp(intel_dp); } static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) { return container_of(encoder, struct intel_dp, base.base); } static struct intel_dp *intel_attached_dp(struct drm_connector *connector) { return container_of(intel_attached_encoder(connector), struct intel_dp, base); } /** * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP? * @encoder: DRM encoder * * Return true if @encoder corresponds to a PCH attached eDP panel. Needed * by intel_display.c. */ bool intel_encoder_is_pch_edp(struct drm_encoder *encoder) { struct intel_dp *intel_dp; if (!encoder) return false; intel_dp = enc_to_intel_dp(encoder); return is_pch_edp(intel_dp); } static void intel_dp_start_link_train(struct intel_dp *intel_dp); static void intel_dp_complete_link_train(struct intel_dp *intel_dp); static void intel_dp_link_down(struct intel_dp *intel_dp); void intel_edp_link_config(struct intel_encoder *intel_encoder, int *lane_num, int *link_bw) { struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); *lane_num = intel_dp->lane_count; if (intel_dp->link_bw == DP_LINK_BW_1_62) *link_bw = 162000; else if (intel_dp->link_bw == DP_LINK_BW_2_7) *link_bw = 270000; } static int intel_dp_max_lane_count(struct intel_dp *intel_dp) { int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; switch (max_lane_count) { case 1: case 2: case 4: break; default: max_lane_count = 4; } return max_lane_count; } static int intel_dp_max_link_bw(struct intel_dp *intel_dp) { int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; switch (max_link_bw) { case DP_LINK_BW_1_62: case DP_LINK_BW_2_7: break; default: max_link_bw = DP_LINK_BW_1_62; break; } return max_link_bw; } static int intel_dp_link_clock(uint8_t link_bw) { if (link_bw == DP_LINK_BW_2_7) return 270000; else return 162000; } /* * The units on the numbers in the next two are... bizarre. Examples will * make it clearer; this one parallels an example in the eDP spec. * * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as: * * 270000 * 1 * 8 / 10 == 216000 * * The actual data capacity of that configuration is 2.16Gbit/s, so the * units are decakilobits. ->clock in a drm_display_mode is in kilohertz - * or equivalently, kilopixels per second - so for 1680x1050R it'd be * 119000. At 18bpp that's 2142000 kilobits per second. * * Thus the strange-looking division by 10 in intel_dp_link_required, to * get the result in decakilobits instead of kilobits. */ static int intel_dp_link_required(int pixel_clock, int bpp) { return (pixel_clock * bpp + 9) / 10; } static int intel_dp_max_data_rate(int max_link_clock, int max_lanes) { return (max_link_clock * max_lanes * 8) / 10; } static bool intel_dp_adjust_dithering(struct intel_dp *intel_dp, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); int max_lanes = intel_dp_max_lane_count(intel_dp); int max_rate, mode_rate; mode_rate = intel_dp_link_required(mode->clock, 24); max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); if (mode_rate > max_rate) { mode_rate = intel_dp_link_required(mode->clock, 18); if (mode_rate > max_rate) return false; if (adjusted_mode) adjusted_mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC; return true; } return true; } static int intel_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct intel_dp *intel_dp = intel_attached_dp(connector); if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay) return MODE_PANEL; if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay) return MODE_PANEL; } if (!intel_dp_adjust_dithering(intel_dp, mode, NULL)) return MODE_CLOCK_HIGH; if (mode->clock < 10000) return MODE_CLOCK_LOW; return MODE_OK; } static uint32_t pack_aux(uint8_t *src, int src_bytes) { int i; uint32_t v = 0; if (src_bytes > 4) src_bytes = 4; for (i = 0; i < src_bytes; i++) v |= ((uint32_t) src[i]) << ((3-i) * 8); return v; } static void unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) { int i; if (dst_bytes > 4) dst_bytes = 4; for (i = 0; i < dst_bytes; i++) dst[i] = src >> ((3-i) * 8); } /* hrawclock is 1/4 the FSB frequency */ static int intel_hrawclk(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t clkcfg; clkcfg = I915_READ(CLKCFG); switch (clkcfg & CLKCFG_FSB_MASK) { case CLKCFG_FSB_400: return 100; case CLKCFG_FSB_533: return 133; case CLKCFG_FSB_667: return 166; case CLKCFG_FSB_800: return 200; case CLKCFG_FSB_1067: return 266; case CLKCFG_FSB_1333: return 333; /* these two are just a guess; one of them might be right */ case CLKCFG_FSB_1600: case CLKCFG_FSB_1600_ALT: return 400; default: return 133; } } static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0; } static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0; } static void intel_dp_check_edp(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; if (!is_edp(intel_dp)) return; if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { printf("eDP powered off while attempting aux channel communication.\n"); DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); } } static int intel_dp_aux_ch(struct intel_dp *intel_dp, uint8_t *send, int send_bytes, uint8_t *recv, int recv_size) { uint32_t output_reg = intel_dp->output_reg; struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t ch_ctl = output_reg + 0x10; uint32_t ch_data = ch_ctl + 4; int i; int recv_bytes; uint32_t status; uint32_t aux_clock_divider; int try, precharge = 5; intel_dp_check_edp(intel_dp); /* The clock divider is based off the hrawclk, * and would like to run at 2MHz. So, take the * hrawclk value and divide by 2 and use that * * Note that PCH attached eDP panels should use a 125MHz input * clock divider. */ if (is_cpu_edp(intel_dp)) { if (IS_GEN6(dev) || IS_GEN7(dev)) aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ else aux_clock_divider = 225; /* eDP input clock at 450Mhz */ } else if (HAS_PCH_SPLIT(dev)) aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */ else aux_clock_divider = intel_hrawclk(dev) / 2; /* Try to wait for any previous AUX channel activity */ for (try = 0; try < 3; try++) { status = I915_READ(ch_ctl); if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) break; drm_msleep(1, "915ach"); } if (try == 3) { printf("dp_aux_ch not started status 0x%08x\n", I915_READ(ch_ctl)); return -EBUSY; } /* Must try at least 3 times according to DP spec */ for (try = 0; try < 5; try++) { /* Load the send data into the aux channel data registers */ for (i = 0; i < send_bytes; i += 4) I915_WRITE(ch_data + i, pack_aux(send + i, send_bytes - i)); /* Send the command and wait for it to complete */ I915_WRITE(ch_ctl, DP_AUX_CH_CTL_SEND_BUSY | DP_AUX_CH_CTL_TIME_OUT_400us | (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | DP_AUX_CH_CTL_DONE | DP_AUX_CH_CTL_TIME_OUT_ERROR | DP_AUX_CH_CTL_RECEIVE_ERROR); for (;;) { status = I915_READ(ch_ctl); if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) break; DELAY(100); } /* Clear done status and any errors */ I915_WRITE(ch_ctl, status | DP_AUX_CH_CTL_DONE | DP_AUX_CH_CTL_TIME_OUT_ERROR | DP_AUX_CH_CTL_RECEIVE_ERROR); if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | DP_AUX_CH_CTL_RECEIVE_ERROR)) continue; if (status & DP_AUX_CH_CTL_DONE) break; } if ((status & DP_AUX_CH_CTL_DONE) == 0) { DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); return -EBUSY; } /* Check for timeout or receive error. * Timeouts occur when the sink is not connected */ if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); return -EIO; } /* Timeouts occur when the device isn't connected, so they're * "normal" -- don't fill the kernel log with these */ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); return -ETIMEDOUT; } /* Unload any bytes sent back from the other side */ recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); if (recv_bytes > recv_size) recv_bytes = recv_size; for (i = 0; i < recv_bytes; i += 4) unpack_aux(I915_READ(ch_data + i), recv + i, recv_bytes - i); return recv_bytes; } /* Write data to the aux channel in native mode */ static int intel_dp_aux_native_write(struct intel_dp *intel_dp, uint16_t address, uint8_t *send, int send_bytes) { int ret; uint8_t msg[20]; int msg_bytes; uint8_t ack; intel_dp_check_edp(intel_dp); if (send_bytes > 16) return -1; msg[0] = AUX_NATIVE_WRITE << 4; msg[1] = address >> 8; msg[2] = address & 0xff; msg[3] = send_bytes - 1; memcpy(&msg[4], send, send_bytes); msg_bytes = send_bytes + 4; for (;;) { ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); if (ret < 0) return ret; if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) break; else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) DELAY(100); else return -EIO; } return send_bytes; } /* Write a single byte to the aux channel in native mode */ static int intel_dp_aux_native_write_1(struct intel_dp *intel_dp, uint16_t address, uint8_t byte) { return intel_dp_aux_native_write(intel_dp, address, &byte, 1); } /* read bytes from a native aux channel */ static int intel_dp_aux_native_read(struct intel_dp *intel_dp, uint16_t address, uint8_t *recv, int recv_bytes) { uint8_t msg[4]; int msg_bytes; uint8_t reply[20]; int reply_bytes; uint8_t ack; int ret; intel_dp_check_edp(intel_dp); msg[0] = AUX_NATIVE_READ << 4; msg[1] = address >> 8; msg[2] = address & 0xff; msg[3] = recv_bytes - 1; msg_bytes = 4; reply_bytes = recv_bytes + 1; for (;;) { ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, reply, reply_bytes); if (ret == 0) return -EPROTO; if (ret < 0) return ret; ack = reply[0]; if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) { memcpy(recv, reply + 1, ret - 1); return ret - 1; } else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) DELAY(100); else return -EIO; } } static int intel_dp_i2c_aux_ch(device_t idev, int mode, uint8_t write_byte, uint8_t *read_byte) { struct iic_dp_aux_data *data; struct intel_dp *intel_dp; uint16_t address; uint8_t msg[5]; uint8_t reply[2]; unsigned retry; int msg_bytes; int reply_bytes; int ret; data = device_get_softc(idev); intel_dp = data->priv; address = data->address; intel_dp_check_edp(intel_dp); /* Set up the command byte */ if (mode & MODE_I2C_READ) msg[0] = AUX_I2C_READ << 4; else msg[0] = AUX_I2C_WRITE << 4; if (!(mode & MODE_I2C_STOP)) msg[0] |= AUX_I2C_MOT << 4; msg[1] = address >> 8; msg[2] = address; switch (mode) { case MODE_I2C_WRITE: msg[3] = 0; msg[4] = write_byte; msg_bytes = 5; reply_bytes = 1; break; case MODE_I2C_READ: msg[3] = 0; msg_bytes = 4; reply_bytes = 2; break; default: msg_bytes = 3; reply_bytes = 1; break; } for (retry = 0; retry < 5; retry++) { ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, reply, reply_bytes); if (ret < 0) { DRM_DEBUG_KMS("aux_ch failed %d\n", ret); return (-ret); } switch (reply[0] & AUX_NATIVE_REPLY_MASK) { case AUX_NATIVE_REPLY_ACK: /* I2C-over-AUX Reply field is only valid * when paired with AUX ACK. */ break; case AUX_NATIVE_REPLY_NACK: DRM_DEBUG_KMS("aux_ch native nack\n"); return (EREMOTEIO); case AUX_NATIVE_REPLY_DEFER: DELAY(100); continue; default: DRM_ERROR("aux_ch invalid native reply 0x%02x\n", reply[0]); return (EREMOTEIO); } switch (reply[0] & AUX_I2C_REPLY_MASK) { case AUX_I2C_REPLY_ACK: if (mode == MODE_I2C_READ) { *read_byte = reply[1]; } return (0/*reply_bytes - 1*/); case AUX_I2C_REPLY_NACK: DRM_DEBUG_KMS("aux_i2c nack\n"); return (EREMOTEIO); case AUX_I2C_REPLY_DEFER: DRM_DEBUG_KMS("aux_i2c defer\n"); DELAY(100); break; default: DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); return (EREMOTEIO); } } DRM_ERROR("too many retries, giving up\n"); return (EREMOTEIO); } static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp); static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); static int intel_dp_i2c_init(struct intel_dp *intel_dp, struct intel_connector *intel_connector, const char *name) { int ret; DRM_DEBUG_KMS("i2c_init %s\n", name); ironlake_edp_panel_vdd_on(intel_dp); ret = iic_dp_aux_add_bus(intel_connector->base.dev->device, name, intel_dp_i2c_aux_ch, intel_dp, &intel_dp->dp_iic_bus, &intel_dp->adapter); ironlake_edp_panel_vdd_off(intel_dp, false); return (ret); } static bool -intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, +intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); int lane_count, clock; int max_lane_count = intel_dp_max_lane_count(intel_dp); int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; - int bpp; + int bpp, mode_rate; static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode); intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, mode, adjusted_mode); + /* + * the mode->clock is used to calculate the Data&Link M/N + * of the pipe. For the eDP the fixed clock should be used. + */ + mode->clock = intel_dp->panel_fixed_mode->clock; } + DRM_DEBUG_KMS("DP link computation with max lane count %i " + "max bw %02x pixel clock %iKHz\n", + max_lane_count, bws[max_clock], mode->clock); + if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, adjusted_mode)) return false; bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; + mode_rate = intel_dp_link_required(mode->clock, bpp); for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { for (clock = 0; clock <= max_clock; clock++) { int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); - if (intel_dp_link_required(adjusted_mode->clock, bpp) - <= link_avail) { + if (mode_rate <= link_avail) { intel_dp->link_bw = bws[clock]; intel_dp->lane_count = lane_count; adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); - DRM_DEBUG_KMS("Display port link bw %02x lane " - "count %d clock %d\n", + DRM_DEBUG_KMS("DP link bw %02x lane " + "count %d clock %d bpp %d\n", intel_dp->link_bw, intel_dp->lane_count, - adjusted_mode->clock); + adjusted_mode->clock, bpp); + DRM_DEBUG_KMS("DP link bw required %i available %i\n", + mode_rate, link_avail); return true; } } } return false; } struct intel_dp_m_n { uint32_t tu; uint32_t gmch_m; uint32_t gmch_n; uint32_t link_m; uint32_t link_n; }; static void intel_reduce_ratio(uint32_t *num, uint32_t *den) { while (*num > 0xffffff || *den > 0xffffff) { *num >>= 1; *den >>= 1; } } static void intel_dp_compute_m_n(int bpp, int nlanes, int pixel_clock, int link_clock, struct intel_dp_m_n *m_n) { m_n->tu = 64; m_n->gmch_m = (pixel_clock * bpp) >> 3; m_n->gmch_n = link_clock * nlanes; intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); m_n->link_m = pixel_clock; m_n->link_n = link_clock; intel_reduce_ratio(&m_n->link_m, &m_n->link_n); } void intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = crtc->dev; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_encoder *encoder; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int lane_count = 4; struct intel_dp_m_n m_n; int pipe = intel_crtc->pipe; /* * Find the lane count in the intel_encoder private */ list_for_each_entry(encoder, &mode_config->encoder_list, head) { struct intel_dp *intel_dp; if (encoder->crtc != crtc) continue; intel_dp = enc_to_intel_dp(encoder); if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || intel_dp->base.type == INTEL_OUTPUT_EDP) { lane_count = intel_dp->lane_count; break; } } /* * Compute the GMCH and Link ratios. The '3' here is * the number of bytes_per_pixel post-LUT, which we always * set up for 8-bits of R/G/B, or 3 bytes total. */ intel_dp_compute_m_n(intel_crtc->bpp, lane_count, mode->clock, adjusted_mode->clock, &m_n); if (HAS_PCH_SPLIT(dev)) { I915_WRITE(TRANSDATA_M1(pipe), ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | m_n.gmch_m); I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n); I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m); I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n); } else { I915_WRITE(PIPE_GMCH_DATA_M(pipe), ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | m_n.gmch_m); I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); } } static void ironlake_edp_pll_on(struct drm_encoder *encoder); static void ironlake_edp_pll_off(struct drm_encoder *encoder); static void intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_crtc *crtc = intel_dp->base.base.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); /* Turn on the eDP PLL if needed */ if (is_edp(intel_dp)) { if (!is_pch_edp(intel_dp)) ironlake_edp_pll_on(encoder); else ironlake_edp_pll_off(encoder); } /* * There are four kinds of DP registers: * * IBX PCH * SNB CPU * IVB CPU * CPT PCH * * IBX PCH and CPU are the same for almost everything, * except that the CPU DP PLL is configured in this * register * * CPT PCH is quite different, having many bits moved * to the TRANS_DP_CTL register instead. That * configuration happens (oddly) in ironlake_pch_enable */ /* Preserve the BIOS-computed detected bit. This is * supposed to be read-only. */ intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; /* Handle DP bits in common between all three register formats */ intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; switch (intel_dp->lane_count) { case 1: intel_dp->DP |= DP_PORT_WIDTH_1; break; case 2: intel_dp->DP |= DP_PORT_WIDTH_2; break; case 4: intel_dp->DP |= DP_PORT_WIDTH_4; break; } if (intel_dp->has_audio) { DRM_DEBUG_KMS("Enabling DP audio on pipe %c\n", pipe_name(intel_crtc->pipe)); intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; intel_write_eld(encoder, adjusted_mode); } memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); intel_dp->link_configuration[0] = intel_dp->link_bw; intel_dp->link_configuration[1] = intel_dp->lane_count; /* * Check for DPCD version > 1.1 and enhanced framing support */ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; } /* Split out the IBX/CPU vs CPT settings */ if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) { if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) intel_dp->DP |= DP_SYNC_HS_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) intel_dp->DP |= DP_SYNC_VS_HIGH; intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) intel_dp->DP |= DP_ENHANCED_FRAMING; intel_dp->DP |= intel_crtc->pipe << 29; /* don't miss out required setting for eDP */ intel_dp->DP |= DP_PLL_ENABLE; if (adjusted_mode->clock < 200000) intel_dp->DP |= DP_PLL_FREQ_160MHZ; else intel_dp->DP |= DP_PLL_FREQ_270MHZ; } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { intel_dp->DP |= intel_dp->color_range; if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) intel_dp->DP |= DP_SYNC_HS_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) intel_dp->DP |= DP_SYNC_VS_HIGH; intel_dp->DP |= DP_LINK_TRAIN_OFF; if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) intel_dp->DP |= DP_ENHANCED_FRAMING; if (intel_crtc->pipe == 1) intel_dp->DP |= DP_PIPEB_SELECT; if (is_cpu_edp(intel_dp)) { /* don't miss out required setting for eDP */ intel_dp->DP |= DP_PLL_ENABLE; if (adjusted_mode->clock < 200000) intel_dp->DP |= DP_PLL_FREQ_160MHZ; else intel_dp->DP |= DP_PLL_FREQ_270MHZ; } } else { intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; } } #define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) #define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) #define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) #define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) #define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) #define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) static void ironlake_wait_panel_status(struct intel_dp *intel_dp, u32 mask, u32 value) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", mask, value, I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); if (_intel_wait_for(dev, (I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10, "915iwp")) { DRM_ERROR("Panel status timeout: status %08x control %08x\n", I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); } } static void ironlake_wait_panel_on(struct intel_dp *intel_dp) { DRM_DEBUG_KMS("Wait for panel power on\n"); ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); } static void ironlake_wait_panel_off(struct intel_dp *intel_dp) { DRM_DEBUG_KMS("Wait for panel power off time\n"); ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); } static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp) { DRM_DEBUG_KMS("Wait for panel power cycle\n"); ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); } /* Read the current pp_control value, unlocking the register if it * is locked */ static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv) { u32 control = I915_READ(PCH_PP_CONTROL); control &= ~PANEL_UNLOCK_MASK; control |= PANEL_UNLOCK_REGS; return control; } static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; if (!is_edp(intel_dp)) return; DRM_DEBUG_KMS("Turn eDP VDD on\n"); if (intel_dp->want_panel_vdd) printf("eDP VDD already requested on\n"); intel_dp->want_panel_vdd = true; if (ironlake_edp_have_panel_vdd(intel_dp)) { DRM_DEBUG_KMS("eDP VDD already on\n"); return; } if (!ironlake_edp_have_panel_power(intel_dp)) ironlake_wait_panel_power_cycle(intel_dp); pp = ironlake_get_pp_control(dev_priv); pp |= EDP_FORCE_VDD; I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); /* * If the panel wasn't on, delay before accessing aux channel */ if (!ironlake_edp_have_panel_power(intel_dp)) { DRM_DEBUG_KMS("eDP was not running\n"); drm_msleep(intel_dp->panel_power_up_delay, "915edpon"); } } static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { pp = ironlake_get_pp_control(dev_priv); pp &= ~EDP_FORCE_VDD; I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); /* Make sure sequencer is idle before allowing subsequent activity */ DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); drm_msleep(intel_dp->panel_power_down_delay, "915vddo"); } } static void ironlake_panel_vdd_work(void *arg, int pending __unused) { struct intel_dp *intel_dp = arg; struct drm_device *dev = intel_dp->base.base.dev; sx_xlock(&dev->mode_config.mutex); ironlake_panel_vdd_off_sync(intel_dp); sx_xunlock(&dev->mode_config.mutex); } static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) { if (!is_edp(intel_dp)) return; DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); if (!intel_dp->want_panel_vdd) printf("eDP VDD not forced on\n"); intel_dp->want_panel_vdd = false; if (sync) { ironlake_panel_vdd_off_sync(intel_dp); } else { /* * Queue the timer to fire a long * time from now (relative to the power down delay) * to keep the panel power up across a sequence of operations */ struct drm_i915_private *dev_priv = intel_dp->base.base.dev->dev_private; taskqueue_enqueue_timeout(dev_priv->tq, &intel_dp->panel_vdd_task, msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); } } static void ironlake_edp_panel_on(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; if (!is_edp(intel_dp)) return; DRM_DEBUG_KMS("Turn eDP power on\n"); if (ironlake_edp_have_panel_power(intel_dp)) { DRM_DEBUG_KMS("eDP power already on\n"); return; } ironlake_wait_panel_power_cycle(intel_dp); pp = ironlake_get_pp_control(dev_priv); if (IS_GEN5(dev)) { /* ILK workaround: disable reset around power sequence */ pp &= ~PANEL_POWER_RESET; I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); } pp |= POWER_TARGET_ON; if (!IS_GEN5(dev)) pp |= PANEL_POWER_RESET; I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); ironlake_wait_panel_on(intel_dp); if (IS_GEN5(dev)) { pp |= PANEL_POWER_RESET; /* restore panel reset bit */ I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); } } static void ironlake_edp_panel_off(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; if (!is_edp(intel_dp)) return; DRM_DEBUG_KMS("Turn eDP power off\n"); if (intel_dp->want_panel_vdd) printf("Cannot turn power off while VDD is on\n"); + ironlake_panel_vdd_off_sync(intel_dp); /* finish any pending work */ pp = ironlake_get_pp_control(dev_priv); pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); ironlake_wait_panel_off(intel_dp); } static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; if (!is_edp(intel_dp)) return; DRM_DEBUG_KMS("\n"); /* * If we enable the backlight right away following a panel power * on, we may see slight flicker as the panel syncs with the eDP * link. So delay a bit to make sure the image is solid before * allowing it to appear. */ drm_msleep(intel_dp->backlight_on_delay, "915ebo"); pp = ironlake_get_pp_control(dev_priv); pp |= EDP_BLC_ENABLE; I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); } static void ironlake_edp_backlight_off(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; if (!is_edp(intel_dp)) return; DRM_DEBUG_KMS("\n"); pp = ironlake_get_pp_control(dev_priv); pp &= ~EDP_BLC_ENABLE; I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); drm_msleep(intel_dp->backlight_off_delay, "915bo1"); } static void ironlake_edp_pll_on(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 dpa_ctl; DRM_DEBUG_KMS("\n"); dpa_ctl = I915_READ(DP_A); dpa_ctl |= DP_PLL_ENABLE; I915_WRITE(DP_A, dpa_ctl); POSTING_READ(DP_A); DELAY(200); } static void ironlake_edp_pll_off(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 dpa_ctl; dpa_ctl = I915_READ(DP_A); dpa_ctl &= ~DP_PLL_ENABLE; I915_WRITE(DP_A, dpa_ctl); POSTING_READ(DP_A); DELAY(200); } /* If the sink supports it, try to set the power state appropriately */ static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) { int ret, i; /* Should have a valid DPCD by this point */ if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) return; if (mode != DRM_MODE_DPMS_ON) { ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, DP_SET_POWER_D3); if (ret != 1) DRM_DEBUG("failed to write sink power state\n"); } else { /* * When turning on, we need to retry for 1ms to give the sink * time to wake up. */ for (i = 0; i < 3; i++) { ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, DP_SET_POWER_D0); if (ret == 1) break; drm_msleep(1, "915dps"); } } } static void intel_dp_prepare(struct drm_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); ironlake_edp_backlight_off(intel_dp); ironlake_edp_panel_off(intel_dp); /* Wake up the sink first */ ironlake_edp_panel_vdd_on(intel_dp); intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); intel_dp_link_down(intel_dp); ironlake_edp_panel_vdd_off(intel_dp, false); /* Make sure the panel is off before trying to * change the mode */ } static void intel_dp_commit(struct drm_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_device *dev = encoder->dev; struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); ironlake_edp_panel_vdd_on(intel_dp); intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); intel_dp_start_link_train(intel_dp); ironlake_edp_panel_on(intel_dp); ironlake_edp_panel_vdd_off(intel_dp, true); intel_dp_complete_link_train(intel_dp); ironlake_edp_backlight_on(intel_dp); intel_dp->dpms_mode = DRM_MODE_DPMS_ON; if (HAS_PCH_CPT(dev)) intel_cpt_verify_modeset(dev, intel_crtc->pipe); } static void intel_dp_dpms(struct drm_encoder *encoder, int mode) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t dp_reg = I915_READ(intel_dp->output_reg); if (mode != DRM_MODE_DPMS_ON) { ironlake_edp_backlight_off(intel_dp); ironlake_edp_panel_off(intel_dp); ironlake_edp_panel_vdd_on(intel_dp); intel_dp_sink_dpms(intel_dp, mode); intel_dp_link_down(intel_dp); ironlake_edp_panel_vdd_off(intel_dp, false); if (is_cpu_edp(intel_dp)) ironlake_edp_pll_off(encoder); } else { if (is_cpu_edp(intel_dp)) ironlake_edp_pll_on(encoder); ironlake_edp_panel_vdd_on(intel_dp); intel_dp_sink_dpms(intel_dp, mode); if (!(dp_reg & DP_PORT_EN)) { intel_dp_start_link_train(intel_dp); ironlake_edp_panel_on(intel_dp); ironlake_edp_panel_vdd_off(intel_dp, true); intel_dp_complete_link_train(intel_dp); } else ironlake_edp_panel_vdd_off(intel_dp, false); ironlake_edp_backlight_on(intel_dp); } intel_dp->dpms_mode = mode; } /* * Native read with retry for link status and receiver capability reads for * cases where the sink may still be asleep. */ static bool intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, uint8_t *recv, int recv_bytes) { int ret, i; /* * Sinks are *supposed* to come up within 1ms from an off state, * but we're also supposed to retry 3 times per the spec. */ for (i = 0; i < 3; i++) { ret = intel_dp_aux_native_read(intel_dp, address, recv, recv_bytes); if (ret == recv_bytes) return true; drm_msleep(1, "915dpl"); } return false; } /* * Fetch AUX CH registers 0x202 - 0x207 which contain * link status information */ static bool intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) { return intel_dp_aux_native_read_retry(intel_dp, DP_LANE0_1_STATUS, link_status, DP_LINK_STATUS_SIZE); } static uint8_t intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], int r) { return link_status[r - DP_LANE0_1_STATUS]; } static uint8_t intel_get_adjust_request_voltage(uint8_t adjust_request[2], int lane) { int s = ((lane & 1) ? DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); uint8_t l = adjust_request[lane>>1]; return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; } static uint8_t intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2], int lane) { int s = ((lane & 1) ? DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); uint8_t l = adjust_request[lane>>1]; return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; } #if 0 static char *voltage_names[] = { "0.4V", "0.6V", "0.8V", "1.2V" }; static char *pre_emph_names[] = { "0dB", "3.5dB", "6dB", "9.5dB" }; static char *link_train_names[] = { "pattern 1", "pattern 2", "idle", "off" }; #endif /* * These are source-specific values; current Intel hardware supports * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB */ static uint8_t intel_dp_voltage_max(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) return DP_TRAIN_VOLTAGE_SWING_800; else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) return DP_TRAIN_VOLTAGE_SWING_1200; else return DP_TRAIN_VOLTAGE_SWING_800; } static uint8_t intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) { struct drm_device *dev = intel_dp->base.base.dev; if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_400: return DP_TRAIN_PRE_EMPHASIS_6; case DP_TRAIN_VOLTAGE_SWING_600: case DP_TRAIN_VOLTAGE_SWING_800: return DP_TRAIN_PRE_EMPHASIS_3_5; default: return DP_TRAIN_PRE_EMPHASIS_0; } } else { switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_400: return DP_TRAIN_PRE_EMPHASIS_6; case DP_TRAIN_VOLTAGE_SWING_600: return DP_TRAIN_PRE_EMPHASIS_6; case DP_TRAIN_VOLTAGE_SWING_800: return DP_TRAIN_PRE_EMPHASIS_3_5; case DP_TRAIN_VOLTAGE_SWING_1200: default: return DP_TRAIN_PRE_EMPHASIS_0; } } } static void intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) { uint8_t v = 0; uint8_t p = 0; int lane; uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS); uint8_t voltage_max; uint8_t preemph_max; for (lane = 0; lane < intel_dp->lane_count; lane++) { uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane); uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane); if (this_v > v) v = this_v; if (this_p > p) p = this_p; } voltage_max = intel_dp_voltage_max(intel_dp); if (v >= voltage_max) v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; preemph_max = intel_dp_pre_emphasis_max(intel_dp, v); if (p >= preemph_max) p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; for (lane = 0; lane < 4; lane++) intel_dp->train_set[lane] = v | p; } static uint32_t intel_dp_signal_levels(uint8_t train_set) { uint32_t signal_levels = 0; switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_400: default: signal_levels |= DP_VOLTAGE_0_4; break; case DP_TRAIN_VOLTAGE_SWING_600: signal_levels |= DP_VOLTAGE_0_6; break; case DP_TRAIN_VOLTAGE_SWING_800: signal_levels |= DP_VOLTAGE_0_8; break; case DP_TRAIN_VOLTAGE_SWING_1200: signal_levels |= DP_VOLTAGE_1_2; break; } switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { case DP_TRAIN_PRE_EMPHASIS_0: default: signal_levels |= DP_PRE_EMPHASIS_0; break; case DP_TRAIN_PRE_EMPHASIS_3_5: signal_levels |= DP_PRE_EMPHASIS_3_5; break; case DP_TRAIN_PRE_EMPHASIS_6: signal_levels |= DP_PRE_EMPHASIS_6; break; case DP_TRAIN_PRE_EMPHASIS_9_5: signal_levels |= DP_PRE_EMPHASIS_9_5; break; } return signal_levels; } /* Gen6's DP voltage swing and pre-emphasis control */ static uint32_t intel_gen6_edp_signal_levels(uint8_t train_set) { int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | DP_TRAIN_PRE_EMPHASIS_MASK); switch (signal_levels) { case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; default: DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" "0x%x\n", signal_levels); return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; } } /* Gen7's DP voltage swing and pre-emphasis control */ static uint32_t intel_gen7_edp_signal_levels(uint8_t train_set) { int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | DP_TRAIN_PRE_EMPHASIS_MASK); switch (signal_levels) { case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: return EDP_LINK_TRAIN_400MV_0DB_IVB; case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: return EDP_LINK_TRAIN_400MV_3_5DB_IVB; case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: return EDP_LINK_TRAIN_400MV_6DB_IVB; case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: return EDP_LINK_TRAIN_600MV_0DB_IVB; case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: return EDP_LINK_TRAIN_600MV_3_5DB_IVB; case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: return EDP_LINK_TRAIN_800MV_0DB_IVB; case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: return EDP_LINK_TRAIN_800MV_3_5DB_IVB; default: DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" "0x%x\n", signal_levels); return EDP_LINK_TRAIN_500MV_0DB_IVB; } } static uint8_t intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane) { int s = (lane & 1) * 4; uint8_t l = link_status[lane>>1]; return (l >> s) & 0xf; } /* Check for clock recovery is done on all channels */ static bool intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) { int lane; uint8_t lane_status; for (lane = 0; lane < lane_count; lane++) { lane_status = intel_get_lane_status(link_status, lane); if ((lane_status & DP_LANE_CR_DONE) == 0) return false; } return true; } /* Check to see if channel eq is done on all channels */ #define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\ DP_LANE_CHANNEL_EQ_DONE|\ DP_LANE_SYMBOL_LOCKED) static bool intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) { uint8_t lane_align; uint8_t lane_status; int lane; lane_align = intel_dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED); if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) return false; for (lane = 0; lane < intel_dp->lane_count; lane++) { lane_status = intel_get_lane_status(link_status, lane); if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) return false; } return true; } static bool intel_dp_set_link_train(struct intel_dp *intel_dp, uint32_t dp_reg_value, uint8_t dp_train_pat) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; int ret; I915_WRITE(intel_dp->output_reg, dp_reg_value); POSTING_READ(intel_dp->output_reg); intel_dp_aux_native_write_1(intel_dp, DP_TRAINING_PATTERN_SET, dp_train_pat); ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET, intel_dp->train_set, intel_dp->lane_count); if (ret != intel_dp->lane_count) return false; return true; } /* Enable corresponding port and start training pattern 1 */ static void intel_dp_start_link_train(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); int i; uint8_t voltage; bool clock_recovery = false; int voltage_tries, loop_tries; u32 reg; uint32_t DP = intel_dp->DP; /* Enable output, wait for it to become active */ I915_WRITE(intel_dp->output_reg, intel_dp->DP); POSTING_READ(intel_dp->output_reg); intel_wait_for_vblank(dev, intel_crtc->pipe); /* Write the link configuration data */ intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, intel_dp->link_configuration, DP_LINK_CONFIGURATION_SIZE); DP |= DP_PORT_EN; if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) DP &= ~DP_LINK_TRAIN_MASK_CPT; else DP &= ~DP_LINK_TRAIN_MASK; memset(intel_dp->train_set, 0, 4); voltage = 0xff; voltage_tries = 0; loop_tries = 0; clock_recovery = false; for (;;) { /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ uint8_t link_status[DP_LINK_STATUS_SIZE]; uint32_t signal_levels; if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; } else { signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels); DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) reg = DP | DP_LINK_TRAIN_PAT_1_CPT; else reg = DP | DP_LINK_TRAIN_PAT_1; if (!intel_dp_set_link_train(intel_dp, reg, DP_TRAINING_PATTERN_1)) break; /* Set training pattern 1 */ DELAY(100); if (!intel_dp_get_link_status(intel_dp, link_status)) { DRM_ERROR("failed to get link status\n"); break; } if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { DRM_DEBUG_KMS("clock recovery OK\n"); clock_recovery = true; break; } /* Check to see if we've tried the max voltage */ for (i = 0; i < intel_dp->lane_count; i++) if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) break; if (i == intel_dp->lane_count) { ++loop_tries; if (loop_tries == 5) { DRM_DEBUG_KMS("too many full retries, give up\n"); break; } memset(intel_dp->train_set, 0, 4); voltage_tries = 0; continue; } /* Check to see if we've tried the same voltage 5 times */ if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { ++voltage_tries; if (voltage_tries == 5) { DRM_DEBUG_KMS("too many voltage retries, give up\n"); break; } } else voltage_tries = 0; voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; /* Compute new intel_dp->train_set as requested by target */ intel_get_adjust_train(intel_dp, link_status); } intel_dp->DP = DP; } static void intel_dp_complete_link_train(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; bool channel_eq = false; int tries, cr_tries; u32 reg; uint32_t DP = intel_dp->DP; /* channel equalization */ tries = 0; cr_tries = 0; channel_eq = false; for (;;) { /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ uint32_t signal_levels; uint8_t link_status[DP_LINK_STATUS_SIZE]; if (cr_tries > 5) { DRM_ERROR("failed to train DP, aborting\n"); intel_dp_link_down(intel_dp); break; } if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; } else { signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) reg = DP | DP_LINK_TRAIN_PAT_2_CPT; else reg = DP | DP_LINK_TRAIN_PAT_2; /* channel eq pattern */ if (!intel_dp_set_link_train(intel_dp, reg, DP_TRAINING_PATTERN_2)) break; DELAY(400); if (!intel_dp_get_link_status(intel_dp, link_status)) break; /* Make sure clock is still ok */ if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { intel_dp_start_link_train(intel_dp); cr_tries++; continue; } if (intel_channel_eq_ok(intel_dp, link_status)) { channel_eq = true; break; } /* Try 5 times, then try clock recovery if that fails */ if (tries > 5) { intel_dp_link_down(intel_dp); intel_dp_start_link_train(intel_dp); tries = 0; cr_tries++; continue; } /* Compute new intel_dp->train_set as requested by target */ intel_get_adjust_train(intel_dp, link_status); ++tries; } if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) reg = DP | DP_LINK_TRAIN_OFF_CPT; else reg = DP | DP_LINK_TRAIN_OFF; I915_WRITE(intel_dp->output_reg, reg); POSTING_READ(intel_dp->output_reg); intel_dp_aux_native_write_1(intel_dp, DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); } static void intel_dp_link_down(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t DP = intel_dp->DP; if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0) return; DRM_DEBUG_KMS("\n"); if (is_edp(intel_dp)) { DP &= ~DP_PLL_ENABLE; I915_WRITE(intel_dp->output_reg, DP); POSTING_READ(intel_dp->output_reg); DELAY(100); } if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { DP &= ~DP_LINK_TRAIN_MASK_CPT; I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); } else { DP &= ~DP_LINK_TRAIN_MASK; I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); } POSTING_READ(intel_dp->output_reg); drm_msleep(17, "915dlo"); if (is_edp(intel_dp)) { if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) DP |= DP_LINK_TRAIN_OFF_CPT; else DP |= DP_LINK_TRAIN_OFF; } if (!HAS_PCH_CPT(dev) && I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { struct drm_crtc *crtc = intel_dp->base.base.crtc; /* Hardware workaround: leaving our transcoder select * set to transcoder B while it's off will prevent the * corresponding HDMI output on transcoder A. * * Combine this with another hardware workaround: * transcoder select bit can only be cleared while the * port is enabled. */ DP &= ~DP_PIPEB_SELECT; I915_WRITE(intel_dp->output_reg, DP); /* Changes to enable or select take place the vblank * after being written. */ if (crtc == NULL) { /* We can arrive here never having been attached * to a CRTC, for instance, due to inheriting * random state from the BIOS. * * If the pipe is not running, play safe and * wait for the clocks to stabilise before * continuing. */ POSTING_READ(intel_dp->output_reg); drm_msleep(50, "915dla"); } else intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); } DP &= ~DP_AUDIO_OUTPUT_ENABLE; I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); POSTING_READ(intel_dp->output_reg); drm_msleep(intel_dp->panel_power_down_delay, "915ldo"); } static bool intel_dp_get_dpcd(struct intel_dp *intel_dp) { if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, sizeof(intel_dp->dpcd)) && (intel_dp->dpcd[DP_DPCD_REV] != 0)) { return true; } return false; } +static void +intel_dp_probe_oui(struct intel_dp *intel_dp) +{ + u8 buf[3]; + + if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) + return; + + if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) + DRM_DEBUG_KMS("Sink OUI: %02x%02x%02x\n", + buf[0], buf[1], buf[2]); + + if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3)) + DRM_DEBUG_KMS("Branch OUI: %02x%02x%02x\n", + buf[0], buf[1], buf[2]); +} + static bool intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) { int ret; ret = intel_dp_aux_native_read_retry(intel_dp, DP_DEVICE_SERVICE_IRQ_VECTOR, sink_irq_vector, 1); if (!ret) return false; return true; } static void intel_dp_handle_test_request(struct intel_dp *intel_dp) { /* NAK by default */ intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK); } /* * According to DP spec * 5.1.2: * 1. Read DPCD * 2. Configure link according to Receiver Capabilities * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 * 4. Check link status on receipt of hot-plug interrupt */ static void intel_dp_check_link_status(struct intel_dp *intel_dp) { u8 sink_irq_vector; u8 link_status[DP_LINK_STATUS_SIZE]; if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON) return; if (!intel_dp->base.base.crtc) return; /* Try to read receiver status if the link appears to be up */ if (!intel_dp_get_link_status(intel_dp, link_status)) { intel_dp_link_down(intel_dp); return; } /* Now read the DPCD to see if it's actually running */ if (!intel_dp_get_dpcd(intel_dp)) { intel_dp_link_down(intel_dp); return; } /* Try to read the source of the interrupt */ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { /* Clear interrupt source */ intel_dp_aux_native_write_1(intel_dp, DP_DEVICE_SERVICE_IRQ_VECTOR, sink_irq_vector); if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) intel_dp_handle_test_request(intel_dp); if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) DRM_DEBUG_KMS("CP or sink specific irq unhandled\n"); } if (!intel_channel_eq_ok(intel_dp, link_status)) { DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", drm_get_encoder_name(&intel_dp->base.base)); intel_dp_start_link_train(intel_dp); intel_dp_complete_link_train(intel_dp); } } static enum drm_connector_status intel_dp_detect_dpcd(struct intel_dp *intel_dp) { if (intel_dp_get_dpcd(intel_dp)) return connector_status_connected; return connector_status_disconnected; } static enum drm_connector_status ironlake_dp_detect(struct intel_dp *intel_dp) { enum drm_connector_status status; /* Can't disconnect eDP, but you can close the lid... */ if (is_edp(intel_dp)) { status = intel_panel_detect(intel_dp->base.base.dev); if (status == connector_status_unknown) status = connector_status_connected; return status; } return intel_dp_detect_dpcd(intel_dp); } static enum drm_connector_status g4x_dp_detect(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t temp, bit; switch (intel_dp->output_reg) { case DP_B: bit = DPB_HOTPLUG_INT_STATUS; break; case DP_C: bit = DPC_HOTPLUG_INT_STATUS; break; case DP_D: bit = DPD_HOTPLUG_INT_STATUS; break; default: return connector_status_unknown; } temp = I915_READ(PORT_HOTPLUG_STAT); if ((temp & bit) == 0) return connector_status_disconnected; return intel_dp_detect_dpcd(intel_dp); } static struct edid * intel_dp_get_edid(struct drm_connector *connector, device_t adapter) { struct intel_dp *intel_dp = intel_attached_dp(connector); struct edid *edid; ironlake_edp_panel_vdd_on(intel_dp); edid = drm_get_edid(connector, adapter); ironlake_edp_panel_vdd_off(intel_dp, false); return edid; } static int intel_dp_get_edid_modes(struct drm_connector *connector, device_t adapter) { struct intel_dp *intel_dp = intel_attached_dp(connector); int ret; ironlake_edp_panel_vdd_on(intel_dp); ret = intel_ddc_get_modes(connector, adapter); ironlake_edp_panel_vdd_off(intel_dp, false); return ret; } /** * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. * * \return true if DP port is connected. * \return false if DP port is disconnected. */ static enum drm_connector_status intel_dp_detect(struct drm_connector *connector, bool force) { struct intel_dp *intel_dp = intel_attached_dp(connector); struct drm_device *dev = intel_dp->base.base.dev; enum drm_connector_status status; struct edid *edid = NULL; intel_dp->has_audio = false; if (HAS_PCH_SPLIT(dev)) status = ironlake_dp_detect(intel_dp); else status = g4x_dp_detect(intel_dp); if (status != connector_status_connected) return status; + intel_dp_probe_oui(intel_dp); + if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); } else { edid = intel_dp_get_edid(connector, intel_dp->adapter); if (edid) { intel_dp->has_audio = drm_detect_monitor_audio(edid); connector->display_info.raw_edid = NULL; free(edid, DRM_MEM_KMS); } } return connector_status_connected; } static int intel_dp_get_modes(struct drm_connector *connector) { struct intel_dp *intel_dp = intel_attached_dp(connector); struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; int ret; /* We should parse the EDID data and find out if it has an audio sink */ ret = intel_dp_get_edid_modes(connector, intel_dp->adapter); if (ret) { if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) { struct drm_display_mode *newmode; list_for_each_entry(newmode, &connector->probed_modes, head) { if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) { intel_dp->panel_fixed_mode = drm_mode_duplicate(dev, newmode); break; } } } return ret; } /* if eDP has no EDID, try to use fixed panel mode from VBT */ if (is_edp(intel_dp)) { /* initialize panel mode from VBT if available for eDP */ if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) { intel_dp->panel_fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); if (intel_dp->panel_fixed_mode) { intel_dp->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; } } if (intel_dp->panel_fixed_mode) { struct drm_display_mode *mode; mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode); drm_mode_probed_add(connector, mode); return 1; } } return 0; } static bool intel_dp_detect_audio(struct drm_connector *connector) { struct intel_dp *intel_dp = intel_attached_dp(connector); struct edid *edid; bool has_audio = false; edid = intel_dp_get_edid(connector, intel_dp->adapter); if (edid) { has_audio = drm_detect_monitor_audio(edid); connector->display_info.raw_edid = NULL; free(edid, DRM_MEM_KMS); } return has_audio; } static int intel_dp_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t val) { struct drm_i915_private *dev_priv = connector->dev->dev_private; struct intel_dp *intel_dp = intel_attached_dp(connector); int ret; ret = drm_connector_property_set_value(connector, property, val); if (ret) return ret; if (property == dev_priv->force_audio_property) { int i = val; bool has_audio; if (i == intel_dp->force_audio) return 0; intel_dp->force_audio = i; if (i == HDMI_AUDIO_AUTO) has_audio = intel_dp_detect_audio(connector); else has_audio = (i == HDMI_AUDIO_ON); if (has_audio == intel_dp->has_audio) return 0; intel_dp->has_audio = has_audio; goto done; } if (property == dev_priv->broadcast_rgb_property) { if (val == !!intel_dp->color_range) return 0; intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0; goto done; } return -EINVAL; done: if (intel_dp->base.base.crtc) { struct drm_crtc *crtc = intel_dp->base.base.crtc; drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); } return 0; } static void intel_dp_destroy(struct drm_connector *connector) { struct drm_device *dev = connector->dev; if (intel_dpd_is_edp(dev)) intel_panel_destroy_backlight(dev); #if 0 drm_sysfs_connector_remove(connector); #endif drm_connector_cleanup(connector); free(connector, DRM_MEM_KMS); } static void intel_dp_encoder_destroy(struct drm_encoder *encoder) { struct drm_device *dev; struct intel_dp *intel_dp; intel_dp = enc_to_intel_dp(encoder); dev = encoder->dev; if (intel_dp->dp_iic_bus != NULL) { if (intel_dp->adapter != NULL) { device_delete_child(intel_dp->dp_iic_bus, intel_dp->adapter); } device_delete_child(dev->device, intel_dp->dp_iic_bus); } drm_encoder_cleanup(encoder); if (is_edp(intel_dp)) { struct drm_i915_private *dev_priv = intel_dp->base.base.dev->dev_private; taskqueue_cancel_timeout(dev_priv->tq, &intel_dp->panel_vdd_task, NULL); taskqueue_drain_timeout(dev_priv->tq, &intel_dp->panel_vdd_task); ironlake_panel_vdd_off_sync(intel_dp); } free(intel_dp, DRM_MEM_KMS); } static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { .dpms = intel_dp_dpms, .mode_fixup = intel_dp_mode_fixup, .prepare = intel_dp_prepare, .mode_set = intel_dp_mode_set, .commit = intel_dp_commit, }; static const struct drm_connector_funcs intel_dp_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = intel_dp_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_dp_set_property, .destroy = intel_dp_destroy, }; static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { .get_modes = intel_dp_get_modes, .mode_valid = intel_dp_mode_valid, .best_encoder = intel_best_encoder, }; static const struct drm_encoder_funcs intel_dp_enc_funcs = { .destroy = intel_dp_encoder_destroy, }; static void intel_dp_hot_plug(struct intel_encoder *intel_encoder) { struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); intel_dp_check_link_status(intel_dp); } /* Return which DP Port should be selected for Transcoder DP control */ int intel_trans_dp_port_sel(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_encoder *encoder; list_for_each_entry(encoder, &mode_config->encoder_list, head) { struct intel_dp *intel_dp; if (encoder->crtc != crtc) continue; intel_dp = enc_to_intel_dp(encoder); if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || intel_dp->base.type == INTEL_OUTPUT_EDP) return intel_dp->output_reg; } return -1; } /* check the VBT to see whether the eDP is on DP-D port */ bool intel_dpd_is_edp(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct child_device_config *p_child; int i; if (!dev_priv->child_dev_num) return false; for (i = 0; i < dev_priv->child_dev_num; i++) { p_child = dev_priv->child_dev + i; if (p_child->dvo_port == PORT_IDPD && p_child->device_type == DEVICE_TYPE_eDP) return true; } return false; } static void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) { intel_attach_force_audio_property(connector); intel_attach_broadcast_rgb_property(connector); } void intel_dp_init(struct drm_device *dev, int output_reg) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_connector *connector; struct intel_dp *intel_dp; struct intel_encoder *intel_encoder; struct intel_connector *intel_connector; const char *name = NULL; int type; intel_dp = malloc(sizeof(struct intel_dp), DRM_MEM_KMS, M_WAITOK | M_ZERO); intel_dp->output_reg = output_reg; intel_dp->dpms_mode = -1; intel_connector = malloc(sizeof(struct intel_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO); intel_encoder = &intel_dp->base; if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D) if (intel_dpd_is_edp(dev)) intel_dp->is_pch_edp = true; if (output_reg == DP_A || is_pch_edp(intel_dp)) { type = DRM_MODE_CONNECTOR_eDP; intel_encoder->type = INTEL_OUTPUT_EDP; } else { type = DRM_MODE_CONNECTOR_DisplayPort; intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; } connector = &intel_connector->base; drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); connector->polled = DRM_CONNECTOR_POLL_HPD; if (output_reg == DP_B || output_reg == PCH_DP_B) intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); else if (output_reg == DP_C || output_reg == PCH_DP_C) intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); else if (output_reg == DP_D || output_reg == PCH_DP_D) intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); if (is_edp(intel_dp)) { intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); TIMEOUT_TASK_INIT(dev_priv->tq, &intel_dp->panel_vdd_task, 0, ironlake_panel_vdd_work, intel_dp); } intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); + connector->interlace_allowed = true; connector->doublescan_allowed = 0; drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); intel_connector_attach_encoder(intel_connector, intel_encoder); #if 0 drm_sysfs_connector_add(connector); #endif /* Set up the DDC bus. */ switch (output_reg) { case DP_A: name = "DPDDC-A"; break; case DP_B: case PCH_DP_B: dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; name = "DPDDC-B"; break; case DP_C: case PCH_DP_C: dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; name = "DPDDC-C"; break; case DP_D: case PCH_DP_D: dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; name = "DPDDC-D"; break; } /* Cache some DPCD data in the eDP case */ if (is_edp(intel_dp)) { bool ret; struct edp_power_seq cur, vbt; u32 pp_on, pp_off, pp_div; pp_on = I915_READ(PCH_PP_ON_DELAYS); pp_off = I915_READ(PCH_PP_OFF_DELAYS); pp_div = I915_READ(PCH_PP_DIVISOR); + + if (!pp_on || !pp_off || !pp_div) { + DRM_INFO("bad panel power sequencing delays, disabling panel\n"); + intel_dp_encoder_destroy(&intel_dp->base.base); + intel_dp_destroy(&intel_connector->base); + return; + } /* Pull timing values out of registers */ cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> PANEL_POWER_UP_DELAY_SHIFT; cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> PANEL_LIGHT_ON_DELAY_SHIFT; cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> PANEL_LIGHT_OFF_DELAY_SHIFT; cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> PANEL_POWER_DOWN_DELAY_SHIFT; cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); vbt = dev_priv->edp.pps; DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); #define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10) intel_dp->panel_power_up_delay = get_delay(t1_t3); intel_dp->backlight_on_delay = get_delay(t8); intel_dp->backlight_off_delay = get_delay(t9); intel_dp->panel_power_down_delay = get_delay(t10); intel_dp->panel_power_cycle_delay = get_delay(t11_t12); DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, intel_dp->panel_power_cycle_delay); DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); ironlake_edp_panel_vdd_on(intel_dp); ret = intel_dp_get_dpcd(intel_dp); ironlake_edp_panel_vdd_off(intel_dp, false); if (ret) { if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] & DP_NO_AUX_HANDSHAKE_LINK_TRAINING; } else { /* if this fails, presume the device is a ghost */ DRM_INFO("failed to retrieve link info, disabling eDP\n"); intel_dp_encoder_destroy(&intel_dp->base.base); intel_dp_destroy(&intel_connector->base); return; } } intel_dp_i2c_init(intel_dp, intel_connector, name); intel_encoder->hot_plug = intel_dp_hot_plug; if (is_edp(intel_dp)) { dev_priv->int_edp_connector = connector; intel_panel_setup_backlight(dev); } intel_dp_add_properties(intel_dp, connector); /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written * 0xd. Failure to do so will result in spurious interrupts being * generated on the port when a cable is not attached. */ if (IS_G4X(dev) && !IS_GM45(dev)) { u32 temp = I915_READ(PEG_BAND_GAP_DATA); I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); } } Index: head/sys/dev/drm2/i915/intel_drv.h =================================================================== --- head/sys/dev/drm2/i915/intel_drv.h (revision 277486) +++ head/sys/dev/drm2/i915/intel_drv.h (revision 277487) @@ -1,428 +1,508 @@ /* * Copyright (c) 2006 Dave Airlie * Copyright (c) 2007-2008 Intel Corporation * Jesse Barnes * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * $FreeBSD$ */ #ifndef DRM_INTEL_DRV_H #define DRM_INTEL_DRV_H #include #include #include #include #include #define _intel_wait_for(DEV, COND, MS, W, WMSG) \ ({ \ int end, ret; \ \ end = ticks + (MS) * hz / 1000; \ ret = 0; \ \ while (!(COND)) { \ if (time_after(ticks, end)) { \ ret = -ETIMEDOUT; \ break; \ } \ if (W) \ pause((WMSG), 1); \ else \ DELAY(1000); \ } \ \ ret; \ }) +#define wait_for_atomic_us(COND, US) ({ \ + int i, ret__ = -ETIMEDOUT; \ + for (i = 0; i < (US); i++) { \ + if ((COND)) { \ + ret__ = 0; \ + break; \ + } \ + DELAY(1); \ + } \ + ret__; \ +}) + +#define wait_for(COND, MS) _intel_wait_for(NULL, COND, MS, 1, "915wfi") +#define wait_for_atomic(COND, MS) _intel_wait_for(NULL, COND, MS, 0, "915wfa") + #define KHz(x) (1000*x) #define MHz(x) KHz(1000*x) /* store information about an Ixxx DVO */ /* The i830->i865 use multiple DVOs with multiple i2cs */ /* the i915, i945 have a single sDVO i2c bus - which is different */ #define MAX_OUTPUTS 6 /* maximum connectors per crtcs in the mode set */ #define INTELFB_CONN_LIMIT 4 #define INTEL_I2C_BUS_DVO 1 #define INTEL_I2C_BUS_SDVO 2 /* these are outputs from the chip - integrated only external chips are via DVO or SDVO output */ #define INTEL_OUTPUT_UNUSED 0 #define INTEL_OUTPUT_ANALOG 1 #define INTEL_OUTPUT_DVO 2 #define INTEL_OUTPUT_SDVO 3 #define INTEL_OUTPUT_LVDS 4 #define INTEL_OUTPUT_TVOUT 5 #define INTEL_OUTPUT_HDMI 6 #define INTEL_OUTPUT_DISPLAYPORT 7 #define INTEL_OUTPUT_EDP 8 /* Intel Pipe Clone Bit */ #define INTEL_HDMIB_CLONE_BIT 1 #define INTEL_HDMIC_CLONE_BIT 2 #define INTEL_HDMID_CLONE_BIT 3 #define INTEL_HDMIE_CLONE_BIT 4 #define INTEL_HDMIF_CLONE_BIT 5 #define INTEL_SDVO_NON_TV_CLONE_BIT 6 #define INTEL_SDVO_TV_CLONE_BIT 7 #define INTEL_SDVO_LVDS_CLONE_BIT 8 #define INTEL_ANALOG_CLONE_BIT 9 #define INTEL_TV_CLONE_BIT 10 #define INTEL_DP_B_CLONE_BIT 11 #define INTEL_DP_C_CLONE_BIT 12 #define INTEL_DP_D_CLONE_BIT 13 #define INTEL_LVDS_CLONE_BIT 14 #define INTEL_DVO_TMDS_CLONE_BIT 15 #define INTEL_DVO_LVDS_CLONE_BIT 16 #define INTEL_EDP_CLONE_BIT 17 #define INTEL_DVO_CHIP_NONE 0 #define INTEL_DVO_CHIP_LVDS 1 #define INTEL_DVO_CHIP_TMDS 2 #define INTEL_DVO_CHIP_TVOUT 4 /* drm_display_mode->private_flags */ #define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0) #define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT) #define INTEL_MODE_DP_FORCE_6BPC (0x10) /* This flag must be set by the encoder's mode_fixup if it changes the crtc * timings in the mode to prevent the crtc fixup from overwriting them. * Currently only lvds needs that. */ #define INTEL_MODE_CRTC_TIMINGS_SET (0x20) static inline void intel_mode_set_pixel_multiplier(struct drm_display_mode *mode, int multiplier) { mode->clock *= multiplier; mode->private_flags |= multiplier; } static inline int intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode) { return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK) >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT; } struct intel_framebuffer { struct drm_framebuffer base; struct drm_i915_gem_object *obj; }; struct intel_fbdev { struct drm_fb_helper helper; struct intel_framebuffer ifb; struct list_head fbdev_list; struct drm_display_mode *our_mode; }; struct intel_encoder { struct drm_encoder base; int type; bool needs_tv_clock; void (*hot_plug)(struct intel_encoder *); int crtc_mask; int clone_mask; }; struct intel_connector { struct drm_connector base; struct intel_encoder *encoder; }; struct intel_crtc { struct drm_crtc base; enum pipe pipe; enum plane plane; u8 lut_r[256], lut_g[256], lut_b[256]; int dpms_mode; bool active; /* is the crtc on? independent of the dpms mode */ bool busy; /* is scanout buffer being updated frequently? */ struct callout idle_callout; bool lowfreq_avail; struct intel_overlay *overlay; struct intel_unpin_work *unpin_work; int fdi_lanes; struct drm_i915_gem_object *cursor_bo; uint32_t cursor_addr; int16_t cursor_x, cursor_y; int16_t cursor_width, cursor_height; bool cursor_visible; unsigned int bpp; - bool no_pll; /* tertiary pipe for IVB */ - bool use_pll_a; + /* We can share PLLs across outputs if the timings match */ + struct intel_pch_pll *pch_pll; }; struct intel_plane { struct drm_plane base; enum pipe pipe; struct drm_i915_gem_object *obj; bool primary_disabled; int max_downscale; u32 lut_r[1024], lut_g[1024], lut_b[1024]; void (*update_plane)(struct drm_plane *plane, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t x, uint32_t y, uint32_t src_w, uint32_t src_h); void (*disable_plane)(struct drm_plane *plane); int (*update_colorkey)(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key); void (*get_colorkey)(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key); }; +struct intel_watermark_params { + unsigned long fifo_size; + unsigned long max_wm; + unsigned long default_wm; + unsigned long guard_size; + unsigned long cacheline_size; +}; + +struct cxsr_latency { + int is_desktop; + int is_ddr3; + unsigned long fsb_freq; + unsigned long mem_freq; + unsigned long display_sr; + unsigned long display_hpll_disable; + unsigned long cursor_sr; + unsigned long cursor_hpll_disable; +}; + #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) #define to_intel_connector(x) container_of(x, struct intel_connector, base) #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) #define to_intel_plane(x) container_of(x, struct intel_plane, base) #define DIP_HEADER_SIZE 5 #define DIP_TYPE_AVI 0x82 #define DIP_VERSION_AVI 0x2 #define DIP_LEN_AVI 13 +#define DIP_AVI_PR_1 0 +#define DIP_AVI_PR_2 1 #define DIP_TYPE_SPD 0x83 #define DIP_VERSION_SPD 0x1 #define DIP_LEN_SPD 25 #define DIP_SPD_UNKNOWN 0 #define DIP_SPD_DSTB 0x1 #define DIP_SPD_DVDP 0x2 #define DIP_SPD_DVHS 0x3 #define DIP_SPD_HDDVR 0x4 #define DIP_SPD_DVC 0x5 #define DIP_SPD_DSC 0x6 #define DIP_SPD_VCD 0x7 #define DIP_SPD_GAME 0x8 #define DIP_SPD_PC 0x9 #define DIP_SPD_BD 0xa #define DIP_SPD_SCD 0xb struct dip_infoframe { uint8_t type; /* HB0 */ uint8_t ver; /* HB1 */ uint8_t len; /* HB2 - body len, not including checksum */ uint8_t ecc; /* Header ECC */ uint8_t checksum; /* PB0 */ union { struct { /* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */ uint8_t Y_A_B_S; /* PB2 - C 7:6, M 5:4, R 3:0 */ uint8_t C_M_R; /* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */ uint8_t ITC_EC_Q_SC; /* PB4 - VIC 6:0 */ uint8_t VIC; - /* PB5 - PR 3:0 */ - uint8_t PR; + /* PB5 - YQ 7:6, CN 5:4, PR 3:0 */ + uint8_t YQ_CN_PR; /* PB6 to PB13 */ uint16_t top_bar_end; uint16_t bottom_bar_start; uint16_t left_bar_end; uint16_t right_bar_start; - } avi; + } __attribute__ ((packed)) avi; struct { uint8_t vn[8]; uint8_t pd[16]; uint8_t sdi; - } spd; + } __attribute__ ((packed)) spd; uint8_t payload[27]; } __attribute__ ((packed)) body; } __attribute__((packed)); +struct intel_hdmi { + struct intel_encoder base; + u32 sdvox_reg; + int ddc_bus; + int ddi_port; + uint32_t color_range; + bool has_hdmi_sink; + bool has_audio; + enum hdmi_force_audio force_audio; + void (*write_infoframe)(struct drm_encoder *encoder, + struct dip_infoframe *frame); +}; + static inline struct drm_crtc * intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; return dev_priv->pipe_to_crtc_mapping[pipe]; } static inline struct drm_crtc * intel_get_crtc_for_plane(struct drm_device *dev, int plane) { struct drm_i915_private *dev_priv = dev->dev_private; return dev_priv->plane_to_crtc_mapping[plane]; } struct intel_unpin_work { struct task task; struct drm_device *dev; struct drm_i915_gem_object *old_fb_obj; struct drm_i915_gem_object *pending_flip_obj; struct drm_pending_vblank_event *event; int pending; bool enable_stall_check; }; struct intel_fbc_work { struct timeout_task task; struct drm_crtc *crtc; struct drm_framebuffer *fb; int interval; }; int intel_ddc_get_modes(struct drm_connector *c, device_t adapter); extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus); extern void intel_attach_force_audio_property(struct drm_connector *connector); extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector); extern void intel_crt_init(struct drm_device *dev); extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); -void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); -extern bool intel_sdvo_init(struct drm_device *dev, int output_device); +extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); +extern void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, + struct drm_display_mode *adjusted_mode); +extern void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder); +extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); +extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, + bool is_sdvob); extern void intel_dvo_init(struct drm_device *dev); extern void intel_tv_init(struct drm_device *dev); extern void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj); extern bool intel_lvds_init(struct drm_device *dev); extern void intel_dp_init(struct drm_device *dev, int dp_reg); void intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); extern bool intel_dpd_is_edp(struct drm_device *dev); extern void intel_edp_link_config(struct intel_encoder *, int *, int *); extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder); extern int intel_plane_init(struct drm_device *dev, enum pipe pipe); +extern void intel_flush_display_plane(struct drm_i915_private *dev_priv, + enum plane plane); +void intel_sanitize_pm(struct drm_device *dev); + /* intel_panel.c */ extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode); extern void intel_pch_panel_fitting(struct drm_device *dev, int fitting_mode, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); extern u32 intel_panel_get_max_backlight(struct drm_device *dev); extern u32 intel_panel_get_backlight(struct drm_device *dev); extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); extern int intel_panel_setup_backlight(struct drm_device *dev); extern void intel_panel_enable_backlight(struct drm_device *dev); extern void intel_panel_disable_backlight(struct drm_device *dev); extern void intel_panel_destroy_backlight(struct drm_device *dev); extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); extern void intel_crtc_load_lut(struct drm_crtc *crtc); extern void intel_encoder_prepare(struct drm_encoder *encoder); extern void intel_encoder_commit(struct drm_encoder *encoder); extern void intel_encoder_destroy(struct drm_encoder *encoder); static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector) { return to_intel_connector(connector)->encoder; } extern void intel_connector_attach_encoder(struct intel_connector *connector, struct intel_encoder *encoder); extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc); int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); struct intel_load_detect_pipe { struct drm_framebuffer *release_fb; bool load_detect_temp; int dpms_mode; }; extern bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, struct drm_connector *connector, struct drm_display_mode *mode, struct intel_load_detect_pipe *old); extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, struct drm_connector *connector, struct intel_load_detect_pipe *old); extern void intelfb_restore(void); extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno); extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, int regno); extern void intel_enable_clock_gating(struct drm_device *dev); +extern void ironlake_disable_rc6(struct drm_device *dev); extern void ironlake_enable_drps(struct drm_device *dev); extern void ironlake_disable_drps(struct drm_device *dev); extern void gen6_enable_rps(struct drm_i915_private *dev_priv); extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv); extern void gen6_disable_rps(struct drm_device *dev); extern void intel_init_emon(struct drm_device *dev); +extern int intel_enable_rc6(const struct drm_device *dev); +extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode); +extern void intel_ddi_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_i915_gem_object *obj, struct intel_ring_buffer *pipelined); extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj); extern int intel_framebuffer_init(struct drm_device *dev, struct intel_framebuffer *ifb, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_i915_gem_object *obj); extern int intel_fbdev_init(struct drm_device *dev); extern void intel_fbdev_fini(struct drm_device *dev); extern void intel_prepare_page_flip(struct drm_device *dev, int plane); extern void intel_finish_page_flip(struct drm_device *dev, int pipe); extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane); extern void intel_setup_overlay(struct drm_device *dev); extern void intel_cleanup_overlay(struct drm_device *dev); extern int intel_overlay_switch_off(struct intel_overlay *overlay); extern int intel_overlay_put_image(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int intel_overlay_attrs(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void intel_fb_output_poll_changed(struct drm_device *dev); extern void intel_fb_restore_mode(struct drm_device *dev); extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state); #define assert_pipe_enabled(d, p) assert_pipe(d, p, true) #define assert_pipe_disabled(d, p) assert_pipe(d, p, false) extern void intel_init_clock_gating(struct drm_device *dev); extern void intel_write_eld(struct drm_encoder *encoder, struct drm_display_mode *mode); extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe); +extern void intel_prepare_ddi(struct drm_device *dev); +extern void hsw_fdi_link_train(struct drm_crtc *crtc); +extern void intel_ddi_init(struct drm_device *dev, enum port port); /* For use by IVB LP watermark workaround in intel_sprite.c */ -extern void sandybridge_update_wm(struct drm_device *dev); +extern void intel_update_watermarks(struct drm_device *dev); extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, uint32_t sprite_width, int pixel_size); +extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe, + struct drm_display_mode *mode); + extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, struct drm_file *file_priv); -#endif +extern u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg); + +/* Power-related functions, located in intel_pm.c */ +extern void intel_init_pm(struct drm_device *dev); +/* FBC */ +extern bool intel_fbc_enabled(struct drm_device *dev); +extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); +extern void intel_update_fbc(struct drm_device *dev); + +#endif /* __INTEL_DRV_H__ */ Index: head/sys/dev/drm2/i915/intel_fb.c =================================================================== --- head/sys/dev/drm2/i915/intel_fb.c (revision 277486) +++ head/sys/dev/drm2/i915/intel_fb.c (revision 277487) @@ -1,287 +1,287 @@ /* * Copyright © 2007 David Airlie * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * David Airlie */ #include __FBSDID("$FreeBSD$"); #include "opt_syscons.h" #include #include #include #include #include #include #include static int intelfb_create(struct intel_fbdev *ifbdev, struct drm_fb_helper_surface_size *sizes) { struct drm_device *dev = ifbdev->helper.dev; #if 0 struct drm_i915_private *dev_priv = dev->dev_private; #endif struct fb_info *info; struct drm_framebuffer *fb; struct drm_mode_fb_cmd2 mode_cmd; struct drm_i915_gem_object *obj; int size, ret; /* we don't do packed 24bpp */ if (sizes->surface_bpp == 24) sizes->surface_bpp = 32; mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; mode_cmd.pitches[0] = roundup2(mode_cmd.width * ((sizes->surface_bpp + 7) / 8), 64); mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); size = mode_cmd.pitches[0] * mode_cmd.height; size = roundup2(size, PAGE_SIZE); obj = i915_gem_alloc_object(dev, size); if (!obj) { DRM_ERROR("failed to allocate framebuffer\n"); ret = -ENOMEM; goto out; } DRM_LOCK(dev); /* Flush everything out, we'll be doing GTT only from now on */ - ret = intel_pin_and_fence_fb_obj(dev, obj, false); + ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); if (ret) { DRM_ERROR("failed to pin fb: %d\n", ret); goto out_unref; } #if 0 info = framebuffer_alloc(0, device); if (!info) { ret = -ENOMEM; goto out_unpin; } info->par = ifbdev; #else info = malloc(sizeof(struct fb_info), DRM_MEM_KMS, M_WAITOK | M_ZERO); info->fb_size = size; info->fb_bpp = sizes->surface_bpp; info->fb_width = sizes->fb_width; info->fb_height = sizes->fb_height; info->fb_pbase = dev->agp->base + obj->gtt_offset; info->fb_vbase = (vm_offset_t)pmap_mapdev_attr(info->fb_pbase, size, PAT_WRITE_COMBINING); #endif ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj); if (ret) goto out_unpin; fb = &ifbdev->ifb.base; ifbdev->helper.fb = fb; ifbdev->helper.fbdev = info; #if 0 strcpy(info->fix.id, "inteldrmfb"); info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &intelfb_ops; ret = fb_alloc_cmap(&info->cmap, 256, 0); if (ret) { ret = -ENOMEM; goto out_unpin; } /* setup aperture base/size for vesafb takeover */ info->apertures = alloc_apertures(1); if (!info->apertures) { ret = -ENOMEM; goto out_unpin; } info->apertures->ranges[0].base = dev->mode_config.fb_base; info->apertures->ranges[0].size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; info->fix.smem_len = size; info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size); if (!info->screen_base) { ret = -ENOSPC; goto out_unpin; } info->screen_size = size; // memset(info->screen_base, 0, size); drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ #endif DRM_DEBUG_KMS("allocated %dx%d (s %dbits) fb: 0x%08x, bo %p\n", fb->width, fb->height, fb->depth, obj->gtt_offset, obj); DRM_UNLOCK(dev); #if 1 KIB_NOTYET(); #else vga_switcheroo_client_fb_set(dev->pdev, info); #endif return 0; out_unpin: i915_gem_object_unpin(obj); out_unref: drm_gem_object_unreference(&obj->base); DRM_UNLOCK(dev); out: return ret; } static int intel_fb_find_or_create_single(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper; int new_fb = 0; int ret; if (!helper->fb) { ret = intelfb_create(ifbdev, sizes); if (ret) return ret; new_fb = 1; } return new_fb; } static struct drm_fb_helper_funcs intel_fb_helper_funcs = { .gamma_set = intel_crtc_fb_gamma_set, .gamma_get = intel_crtc_fb_gamma_get, .fb_probe = intel_fb_find_or_create_single, }; static void intel_fbdev_destroy(struct drm_device *dev, struct intel_fbdev *ifbdev) { #if 0 struct fb_info *info; #endif struct intel_framebuffer *ifb = &ifbdev->ifb; #if 0 if (ifbdev->helper.fbdev) { info = ifbdev->helper.fbdev; unregister_framebuffer(info); iounmap(info->screen_base); if (info->cmap.len) fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } #endif drm_fb_helper_fini(&ifbdev->helper); drm_framebuffer_cleanup(&ifb->base); if (ifb->obj) { drm_gem_object_unreference_unlocked(&ifb->obj->base); ifb->obj = NULL; } } #ifdef DEV_SC extern int sc_txtmouse_no_retrace_wait; #endif int intel_fbdev_init(struct drm_device *dev) { struct intel_fbdev *ifbdev; drm_i915_private_t *dev_priv = dev->dev_private; int ret; ifbdev = malloc(sizeof(struct intel_fbdev), DRM_MEM_KMS, M_WAITOK | M_ZERO); dev_priv->fbdev = ifbdev; ifbdev->helper.funcs = &intel_fb_helper_funcs; ret = drm_fb_helper_init(dev, &ifbdev->helper, dev_priv->num_pipe, INTELFB_CONN_LIMIT); if (ret) { free(ifbdev, DRM_MEM_KMS); return ret; } drm_fb_helper_single_add_all_connectors(&ifbdev->helper); drm_fb_helper_initial_config(&ifbdev->helper, 32); #ifdef DEV_SC sc_txtmouse_no_retrace_wait = 1; #endif return 0; } void intel_fbdev_fini(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; if (!dev_priv->fbdev) return; intel_fbdev_destroy(dev, dev_priv->fbdev); free(dev_priv->fbdev, DRM_MEM_KMS); dev_priv->fbdev = NULL; } void intel_fb_output_poll_changed(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); } void intel_fb_restore_mode(struct drm_device *dev) { int ret; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_mode_config *config = &dev->mode_config; struct drm_plane *plane; sx_xlock(&dev->mode_config.mutex); ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper); if (ret) DRM_DEBUG("failed to restore crtc mode\n"); /* Be sure to shut off any planes that may be active */ list_for_each_entry(plane, &config->plane_list, head) plane->funcs->disable_plane(plane); sx_xunlock(&dev->mode_config.mutex); } Index: head/sys/dev/drm2/i915/intel_hdmi.c =================================================================== --- head/sys/dev/drm2/i915/intel_hdmi.c (revision 277486) +++ head/sys/dev/drm2/i915/intel_hdmi.c (revision 277487) @@ -1,576 +1,754 @@ /* * Copyright 2006 Dave Airlie * Copyright © 2006-2009 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt * Jesse Barnes */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include -struct intel_hdmi { - struct intel_encoder base; - u32 sdvox_reg; - int ddc_bus; - uint32_t color_range; - bool has_hdmi_sink; - bool has_audio; - enum hdmi_force_audio force_audio; - void (*write_infoframe)(struct drm_encoder *encoder, - struct dip_infoframe *frame); -}; - -static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) +struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) { return container_of(encoder, struct intel_hdmi, base.base); } static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) { return container_of(intel_attached_encoder(connector), struct intel_hdmi, base); } void intel_dip_infoframe_csum(struct dip_infoframe *frame) { uint8_t *data = (uint8_t *)frame; uint8_t sum = 0; unsigned i; frame->checksum = 0; frame->ecc = 0; for (i = 0; i < frame->len + DIP_HEADER_SIZE; i++) sum += data[i]; frame->checksum = 0x100 - sum; } -static u32 intel_infoframe_index(struct dip_infoframe *frame) +static u32 g4x_infoframe_index(struct dip_infoframe *frame) { - u32 flags = 0; - switch (frame->type) { case DIP_TYPE_AVI: - flags |= VIDEO_DIP_SELECT_AVI; - break; + return VIDEO_DIP_SELECT_AVI; case DIP_TYPE_SPD: - flags |= VIDEO_DIP_SELECT_SPD; - break; + return VIDEO_DIP_SELECT_SPD; default: - DRM_DEBUG("unknown info frame type %d\n", frame->type); - break; + DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); + return 0; } - - return flags; } -static u32 intel_infoframe_flags(struct dip_infoframe *frame) +static u32 g4x_infoframe_enable(struct dip_infoframe *frame) { - u32 flags = 0; + switch (frame->type) { + case DIP_TYPE_AVI: + return VIDEO_DIP_ENABLE_AVI; + case DIP_TYPE_SPD: + return VIDEO_DIP_ENABLE_SPD; + default: + DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); + return 0; + } +} +static u32 hsw_infoframe_enable(struct dip_infoframe *frame) +{ switch (frame->type) { case DIP_TYPE_AVI: - flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC; - break; + return VIDEO_DIP_ENABLE_AVI_HSW; case DIP_TYPE_SPD: - flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_VSYNC; - break; + return VIDEO_DIP_ENABLE_SPD_HSW; default: - DRM_DEBUG("unknown info frame type %d\n", frame->type); - break; + DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); + return 0; } +} - return flags; +static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame, enum pipe pipe) +{ + switch (frame->type) { + case DIP_TYPE_AVI: + return HSW_TVIDEO_DIP_AVI_DATA(pipe); + case DIP_TYPE_SPD: + return HSW_TVIDEO_DIP_SPD_DATA(pipe); + default: + DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); + return 0; + } } -static void i9xx_write_infoframe(struct drm_encoder *encoder, - struct dip_infoframe *frame) +static void g4x_write_infoframe(struct drm_encoder *encoder, + struct dip_infoframe *frame) { uint32_t *data = (uint32_t *)frame; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); - u32 port, flags, val = I915_READ(VIDEO_DIP_CTL); + u32 val = I915_READ(VIDEO_DIP_CTL); unsigned i, len = DIP_HEADER_SIZE + frame->len; - - /* XXX first guess at handling video port, is this corrent? */ + val &= ~VIDEO_DIP_PORT_MASK; if (intel_hdmi->sdvox_reg == SDVOB) - port = VIDEO_DIP_PORT_B; + val |= VIDEO_DIP_PORT_B; else if (intel_hdmi->sdvox_reg == SDVOC) - port = VIDEO_DIP_PORT_C; + val |= VIDEO_DIP_PORT_C; else return; - flags = intel_infoframe_index(frame); + val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ + val |= g4x_infoframe_index(frame); - val &= ~VIDEO_DIP_SELECT_MASK; + val &= ~g4x_infoframe_enable(frame); + val |= VIDEO_DIP_ENABLE; - I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags); + I915_WRITE(VIDEO_DIP_CTL, val); for (i = 0; i < len; i += 4) { I915_WRITE(VIDEO_DIP_DATA, *data); data++; } - flags |= intel_infoframe_flags(frame); + val |= g4x_infoframe_enable(frame); + val &= ~VIDEO_DIP_FREQ_MASK; + val |= VIDEO_DIP_FREQ_VSYNC; - I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags); + I915_WRITE(VIDEO_DIP_CTL, val); } -static void ironlake_write_infoframe(struct drm_encoder *encoder, - struct dip_infoframe *frame) +static void ibx_write_infoframe(struct drm_encoder *encoder, + struct dip_infoframe *frame) { uint32_t *data = (uint32_t *)frame; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc = encoder->crtc; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); unsigned i, len = DIP_HEADER_SIZE + frame->len; - u32 flags, val = I915_READ(reg); + u32 val = I915_READ(reg); + val &= ~VIDEO_DIP_PORT_MASK; + switch (intel_hdmi->sdvox_reg) { + case HDMIB: + val |= VIDEO_DIP_PORT_B; + break; + case HDMIC: + val |= VIDEO_DIP_PORT_C; + break; + case HDMID: + val |= VIDEO_DIP_PORT_D; + break; + default: + return; + } + intel_wait_for_vblank(dev, intel_crtc->pipe); - flags = intel_infoframe_index(frame); + val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ + val |= g4x_infoframe_index(frame); + val &= ~g4x_infoframe_enable(frame); + val |= VIDEO_DIP_ENABLE; + + I915_WRITE(reg, val); + + for (i = 0; i < len; i += 4) { + I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); + data++; + } + + val |= g4x_infoframe_enable(frame); + val &= ~VIDEO_DIP_FREQ_MASK; + val |= VIDEO_DIP_FREQ_VSYNC; + + I915_WRITE(reg, val); +} + +static void cpt_write_infoframe(struct drm_encoder *encoder, + struct dip_infoframe *frame) +{ + uint32_t *data = (uint32_t *)frame; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); + unsigned i, len = DIP_HEADER_SIZE + frame->len; + u32 val = I915_READ(reg); + + intel_wait_for_vblank(dev, intel_crtc->pipe); + val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ + val |= g4x_infoframe_index(frame); - I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags); + /* The DIP control register spec says that we need to update the AVI + * infoframe without clearing its enable bit */ + if (frame->type == DIP_TYPE_AVI) + val |= VIDEO_DIP_ENABLE_AVI; + else + val &= ~g4x_infoframe_enable(frame); + val |= VIDEO_DIP_ENABLE; + + I915_WRITE(reg, val); + for (i = 0; i < len; i += 4) { I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); data++; } - flags |= intel_infoframe_flags(frame); + val |= g4x_infoframe_enable(frame); + val &= ~VIDEO_DIP_FREQ_MASK; + val |= VIDEO_DIP_FREQ_VSYNC; - I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags); + I915_WRITE(reg, val); } +static void vlv_write_infoframe(struct drm_encoder *encoder, + struct dip_infoframe *frame) +{ + uint32_t *data = (uint32_t *)frame; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); + unsigned i, len = DIP_HEADER_SIZE + frame->len; + u32 val = I915_READ(reg); + + intel_wait_for_vblank(dev, intel_crtc->pipe); + + val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ + val |= g4x_infoframe_index(frame); + + val &= ~g4x_infoframe_enable(frame); + val |= VIDEO_DIP_ENABLE; + + I915_WRITE(reg, val); + + for (i = 0; i < len; i += 4) { + I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data); + data++; + } + + val |= g4x_infoframe_enable(frame); + val &= ~VIDEO_DIP_FREQ_MASK; + val |= VIDEO_DIP_FREQ_VSYNC; + + I915_WRITE(reg, val); +} + +static void hsw_write_infoframe(struct drm_encoder *encoder, + struct dip_infoframe *frame) +{ + uint32_t *data = (uint32_t *)frame; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe); + u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->pipe); + unsigned int i, len = DIP_HEADER_SIZE + frame->len; + u32 val = I915_READ(ctl_reg); + + if (data_reg == 0) + return; + + intel_wait_for_vblank(dev, intel_crtc->pipe); + + val &= ~hsw_infoframe_enable(frame); + I915_WRITE(ctl_reg, val); + + for (i = 0; i < len; i += 4) { + I915_WRITE(data_reg + i, *data); + data++; + } + + val |= hsw_infoframe_enable(frame); + I915_WRITE(ctl_reg, val); +} + static void intel_set_infoframe(struct drm_encoder *encoder, struct dip_infoframe *frame) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); if (!intel_hdmi->has_hdmi_sink) return; intel_dip_infoframe_csum(frame); intel_hdmi->write_infoframe(encoder, frame); } -static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder) +void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, + struct drm_display_mode *adjusted_mode) { struct dip_infoframe avi_if = { .type = DIP_TYPE_AVI, .ver = DIP_VERSION_AVI, .len = DIP_LEN_AVI, }; + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) + avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2; + intel_set_infoframe(encoder, &avi_if); } -static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) +void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) { struct dip_infoframe spd_if; memset(&spd_if, 0, sizeof(spd_if)); spd_if.type = DIP_TYPE_SPD; spd_if.ver = DIP_VERSION_SPD; spd_if.len = DIP_LEN_SPD; strcpy(spd_if.body.spd.vn, "Intel"); strcpy(spd_if.body.spd.pd, "Integrated gfx"); spd_if.body.spd.sdi = DIP_SPD_PC; intel_set_infoframe(encoder, &spd_if); } static void intel_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc = encoder->crtc; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); u32 sdvox; sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE; if (!HAS_PCH_SPLIT(dev)) sdvox |= intel_hdmi->color_range; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) sdvox |= SDVO_VSYNC_ACTIVE_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) sdvox |= SDVO_HSYNC_ACTIVE_HIGH; if (intel_crtc->bpp > 24) sdvox |= COLOR_FORMAT_12bpc; else sdvox |= COLOR_FORMAT_8bpc; /* Required on CPT */ if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev)) sdvox |= HDMI_MODE_SELECT; if (intel_hdmi->has_audio) { DRM_DEBUG_KMS("Enabling HDMI audio on pipe %c\n", pipe_name(intel_crtc->pipe)); sdvox |= SDVO_AUDIO_ENABLE; sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC; intel_write_eld(encoder, adjusted_mode); } if (HAS_PCH_CPT(dev)) sdvox |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); else if (intel_crtc->pipe == 1) sdvox |= SDVO_PIPE_B_SELECT; I915_WRITE(intel_hdmi->sdvox_reg, sdvox); POSTING_READ(intel_hdmi->sdvox_reg); - intel_hdmi_set_avi_infoframe(encoder); + intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); } static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); u32 temp; u32 enable_bits = SDVO_ENABLE; if (intel_hdmi->has_audio) enable_bits |= SDVO_AUDIO_ENABLE; temp = I915_READ(intel_hdmi->sdvox_reg); /* HW workaround, need to toggle enable bit off and on for 12bpc, but * we do this anyway which shows more stable in testing. */ if (HAS_PCH_SPLIT(dev)) { I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE); POSTING_READ(intel_hdmi->sdvox_reg); } if (mode != DRM_MODE_DPMS_ON) { temp &= ~enable_bits; } else { temp |= enable_bits; } I915_WRITE(intel_hdmi->sdvox_reg, temp); POSTING_READ(intel_hdmi->sdvox_reg); /* HW workaround, need to write this twice for issue that may result * in first write getting masked. */ if (HAS_PCH_SPLIT(dev)) { I915_WRITE(intel_hdmi->sdvox_reg, temp); POSTING_READ(intel_hdmi->sdvox_reg); } } static int intel_hdmi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { if (mode->clock > 165000) return MODE_CLOCK_HIGH; if (mode->clock < 20000) return MODE_CLOCK_LOW; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; return MODE_OK; } static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, + struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; } static enum drm_connector_status intel_hdmi_detect(struct drm_connector *connector, bool force) { struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); struct drm_i915_private *dev_priv = connector->dev->dev_private; struct edid *edid; enum drm_connector_status status = connector_status_disconnected; intel_hdmi->has_hdmi_sink = false; intel_hdmi->has_audio = false; - edid = drm_get_edid(connector, dev_priv->gmbus[intel_hdmi->ddc_bus]); + edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, + intel_hdmi->ddc_bus)); if (edid) { if (edid->input & DRM_EDID_INPUT_DIGITAL) { status = connector_status_connected; if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI) intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid); intel_hdmi->has_audio = drm_detect_monitor_audio(edid); } connector->display_info.raw_edid = NULL; free(edid, DRM_MEM_KMS); } else { DRM_DEBUG_KMS("[CONNECTOR:%d:%s] got no edid, ddc port %d\n", connector->base.id, drm_get_connector_name(connector), intel_hdmi->ddc_bus); } if (status == connector_status_connected) { if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO) intel_hdmi->has_audio = (intel_hdmi->force_audio == HDMI_AUDIO_ON); } return status; } static int intel_hdmi_get_modes(struct drm_connector *connector) { struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); struct drm_i915_private *dev_priv = connector->dev->dev_private; /* We should parse the EDID data and find out if it's an HDMI sink so * we can send audio to it. */ return intel_ddc_get_modes(connector, - dev_priv->gmbus[intel_hdmi->ddc_bus]); + intel_gmbus_get_adapter(dev_priv, + intel_hdmi->ddc_bus)); } static bool intel_hdmi_detect_audio(struct drm_connector *connector) { struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); struct drm_i915_private *dev_priv = connector->dev->dev_private; struct edid *edid; bool has_audio = false; - edid = drm_get_edid(connector, dev_priv->gmbus[intel_hdmi->ddc_bus]); + edid = drm_get_edid(connector, + intel_gmbus_get_adapter(dev_priv, + intel_hdmi->ddc_bus)); if (edid) { if (edid->input & DRM_EDID_INPUT_DIGITAL) has_audio = drm_detect_monitor_audio(edid); connector->display_info.raw_edid = NULL; free(edid, DRM_MEM_KMS); } return has_audio; } static int intel_hdmi_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t val) { struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); struct drm_i915_private *dev_priv = connector->dev->dev_private; int ret; ret = drm_connector_property_set_value(connector, property, val); if (ret) return ret; if (property == dev_priv->force_audio_property) { enum hdmi_force_audio i = val; bool has_audio; if (i == intel_hdmi->force_audio) return 0; intel_hdmi->force_audio = i; if (i == HDMI_AUDIO_AUTO) has_audio = intel_hdmi_detect_audio(connector); else has_audio = (i == HDMI_AUDIO_ON); if (i == HDMI_AUDIO_OFF_DVI) intel_hdmi->has_hdmi_sink = 0; intel_hdmi->has_audio = has_audio; goto done; } if (property == dev_priv->broadcast_rgb_property) { if (val == !!intel_hdmi->color_range) return 0; intel_hdmi->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0; goto done; } return -EINVAL; done: if (intel_hdmi->base.base.crtc) { struct drm_crtc *crtc = intel_hdmi->base.base.crtc; drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); } return 0; } static void intel_hdmi_destroy(struct drm_connector *connector) { #if 0 drm_sysfs_connector_remove(connector); #endif drm_connector_cleanup(connector); free(connector, DRM_MEM_KMS); } +static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = { + .dpms = intel_ddi_dpms, + .mode_fixup = intel_hdmi_mode_fixup, + .prepare = intel_encoder_prepare, + .mode_set = intel_ddi_mode_set, + .commit = intel_encoder_commit, +}; + static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { .dpms = intel_hdmi_dpms, .mode_fixup = intel_hdmi_mode_fixup, .prepare = intel_encoder_prepare, .mode_set = intel_hdmi_mode_set, .commit = intel_encoder_commit, }; static const struct drm_connector_funcs intel_hdmi_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = intel_hdmi_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_hdmi_set_property, .destroy = intel_hdmi_destroy, }; static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { .get_modes = intel_hdmi_get_modes, .mode_valid = intel_hdmi_mode_valid, .best_encoder = intel_best_encoder, }; static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { .destroy = intel_encoder_destroy, }; static void intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) { intel_attach_force_audio_property(connector); intel_attach_broadcast_rgb_property(connector); } void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_connector *connector; struct intel_encoder *intel_encoder; struct intel_connector *intel_connector; struct intel_hdmi *intel_hdmi; int i; intel_hdmi = malloc(sizeof(struct intel_hdmi), DRM_MEM_KMS, M_WAITOK | M_ZERO); intel_connector = malloc(sizeof(struct intel_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO); intel_encoder = &intel_hdmi->base; drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS); connector = &intel_connector->base; drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA); drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); intel_encoder->type = INTEL_OUTPUT_HDMI; connector->polled = DRM_CONNECTOR_POLL_HPD; connector->interlace_allowed = 1; connector->doublescan_allowed = 0; intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); /* Set up the DDC bus. */ if (sdvox_reg == SDVOB) { intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); intel_hdmi->ddc_bus = GMBUS_PORT_DPB; dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; } else if (sdvox_reg == SDVOC) { intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); intel_hdmi->ddc_bus = GMBUS_PORT_DPC; dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; } else if (sdvox_reg == HDMIB) { intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); intel_hdmi->ddc_bus = GMBUS_PORT_DPB; dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; } else if (sdvox_reg == HDMIC) { intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); intel_hdmi->ddc_bus = GMBUS_PORT_DPC; dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; } else if (sdvox_reg == HDMID) { intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); intel_hdmi->ddc_bus = GMBUS_PORT_DPD; dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; + } else if (sdvox_reg == DDI_BUF_CTL(PORT_B)) { + DRM_DEBUG_DRIVER("LPT: detected output on DDI B\n"); + intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); + intel_hdmi->ddc_bus = GMBUS_PORT_DPB; + intel_hdmi->ddi_port = PORT_B; + dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; + } else if (sdvox_reg == DDI_BUF_CTL(PORT_C)) { + DRM_DEBUG_DRIVER("LPT: detected output on DDI C\n"); + intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); + intel_hdmi->ddc_bus = GMBUS_PORT_DPC; + intel_hdmi->ddi_port = PORT_C; + dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; + } else if (sdvox_reg == DDI_BUF_CTL(PORT_D)) { + DRM_DEBUG_DRIVER("LPT: detected output on DDI D\n"); + intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); + intel_hdmi->ddc_bus = GMBUS_PORT_DPD; + intel_hdmi->ddi_port = PORT_D; + dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; + } else { + /* If we got an unknown sdvox_reg, things are pretty much broken + * in a way that we should let the kernel know about it */ + DRM_DEBUG_KMS("unknown sdvox_reg %d\n", sdvox_reg); } - intel_hdmi->sdvox_reg = sdvox_reg; - if (!HAS_PCH_SPLIT(dev)) { - intel_hdmi->write_infoframe = i9xx_write_infoframe; + intel_hdmi->write_infoframe = g4x_write_infoframe; I915_WRITE(VIDEO_DIP_CTL, 0); + } else if (IS_VALLEYVIEW(dev)) { + intel_hdmi->write_infoframe = vlv_write_infoframe; + for_each_pipe(i) + I915_WRITE(VLV_TVIDEO_DIP_CTL(i), 0); + } else if (IS_HASWELL(dev)) { + /* FIXME: Haswell has a new set of DIP frame registers, but we are + * just doing the minimal required for HDMI to work at this stage. + */ + intel_hdmi->write_infoframe = hsw_write_infoframe; + for_each_pipe(i) + I915_WRITE(HSW_TVIDEO_DIP_CTL(i), 0); + } else if (HAS_PCH_IBX(dev)) { + intel_hdmi->write_infoframe = ibx_write_infoframe; + for_each_pipe(i) + I915_WRITE(TVIDEO_DIP_CTL(i), 0); } else { - intel_hdmi->write_infoframe = ironlake_write_infoframe; + intel_hdmi->write_infoframe = cpt_write_infoframe; for_each_pipe(i) I915_WRITE(TVIDEO_DIP_CTL(i), 0); } - drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); + if (IS_HASWELL(dev)) + drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs_hsw); + else + drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); intel_hdmi_add_properties(intel_hdmi, connector); intel_connector_attach_encoder(intel_connector, intel_encoder); #if 0 drm_sysfs_connector_add(connector); #endif /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written * 0xd. Failure to do so will result in spurious interrupts being * generated on the port when a cable is not attached. */ if (IS_G4X(dev) && !IS_GM45(dev)) { u32 temp = I915_READ(PEG_BAND_GAP_DATA); I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); } } Index: head/sys/dev/drm2/i915/intel_iic.c =================================================================== --- head/sys/dev/drm2/i915/intel_iic.c (revision 277486) +++ head/sys/dev/drm2/i915/intel_iic.c (revision 277487) @@ -1,710 +1,810 @@ /* * Copyright (c) 2006 Dave Airlie * Copyright © 2006-2008,2010 Intel Corporation * Jesse Barnes * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt * Chris Wilson * * Copyright (c) 2011 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include "iicbus_if.h" #include "iicbb_if.h" -static int intel_iic_quirk_xfer(device_t idev, struct iic_msg *msgs, int nmsgs); static void intel_teardown_gmbus_m(struct drm_device *dev, int m); +struct gmbus_port { + const char *name; + int reg; +}; + +static const struct gmbus_port gmbus_ports[] = { + { "ssc", GPIOB }, + { "vga", GPIOA }, + { "panel", GPIOC }, + { "dpc", GPIOD }, + { "dpb", GPIOE }, + { "dpd", GPIOF }, +}; + /* Intel GPIO access functions */ #define I2C_RISEFALL_TIME 10 struct intel_iic_softc { struct drm_device *drm_dev; device_t iic_dev; bool force_bit_dev; char name[32]; uint32_t reg; uint32_t reg0; }; static void intel_iic_quirk_set(struct drm_i915_private *dev_priv, bool enable) { u32 val; /* When using bit bashing for I2C, this bit needs to be set to 1 */ if (!IS_PINEVIEW(dev_priv->dev)) return; val = I915_READ(DSPCLK_GATE_D); if (enable) val |= DPCUNIT_CLOCK_GATE_DISABLE; else val &= ~DPCUNIT_CLOCK_GATE_DISABLE; I915_WRITE(DSPCLK_GATE_D, val); } static u32 intel_iic_get_reserved(device_t idev) { struct intel_iic_softc *sc; struct drm_device *dev; struct drm_i915_private *dev_priv; u32 reserved; sc = device_get_softc(idev); dev = sc->drm_dev; dev_priv = dev->dev_private; if (!IS_I830(dev) && !IS_845G(dev)) { reserved = I915_READ_NOTRACE(sc->reg) & (GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE); } else { reserved = 0; } return (reserved); } void intel_iic_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv; dev_priv = dev->dev_private; - if (HAS_PCH_SPLIT(dev)) - I915_WRITE(PCH_GMBUS0, 0); - else - I915_WRITE(GMBUS0, 0); + I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0); } static int intel_iicbus_reset(device_t idev, u_char speed, u_char addr, u_char *oldaddr) { struct intel_iic_softc *sc; struct drm_device *dev; sc = device_get_softc(idev); dev = sc->drm_dev; intel_iic_reset(dev); return (0); } static void intel_iicbb_setsda(device_t idev, int val) { struct intel_iic_softc *sc; struct drm_i915_private *dev_priv; u32 reserved; u32 data_bits; sc = device_get_softc(idev); dev_priv = sc->drm_dev->dev_private; reserved = intel_iic_get_reserved(idev); if (val) data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK; else data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | GPIO_DATA_VAL_MASK; I915_WRITE_NOTRACE(sc->reg, reserved | data_bits); POSTING_READ(sc->reg); } static void intel_iicbb_setscl(device_t idev, int val) { struct intel_iic_softc *sc; struct drm_i915_private *dev_priv; u32 clock_bits, reserved; sc = device_get_softc(idev); dev_priv = sc->drm_dev->dev_private; reserved = intel_iic_get_reserved(idev); if (val) clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK; else clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_VAL_MASK; I915_WRITE_NOTRACE(sc->reg, reserved | clock_bits); POSTING_READ(sc->reg); } static int intel_iicbb_getsda(device_t idev) { struct intel_iic_softc *sc; struct drm_i915_private *dev_priv; u32 reserved; sc = device_get_softc(idev); dev_priv = sc->drm_dev->dev_private; reserved = intel_iic_get_reserved(idev); I915_WRITE_NOTRACE(sc->reg, reserved | GPIO_DATA_DIR_MASK); I915_WRITE_NOTRACE(sc->reg, reserved); return ((I915_READ_NOTRACE(sc->reg) & GPIO_DATA_VAL_IN) != 0); } static int intel_iicbb_getscl(device_t idev) { struct intel_iic_softc *sc; struct drm_i915_private *dev_priv; u32 reserved; sc = device_get_softc(idev); dev_priv = sc->drm_dev->dev_private; reserved = intel_iic_get_reserved(idev); I915_WRITE_NOTRACE(sc->reg, reserved | GPIO_CLOCK_DIR_MASK); I915_WRITE_NOTRACE(sc->reg, reserved); return ((I915_READ_NOTRACE(sc->reg) & GPIO_CLOCK_VAL_IN) != 0); } static int +gmbus_xfer_read(struct drm_i915_private *dev_priv, struct iic_msg *msg, + u32 gmbus1_index) +{ + int reg_offset = dev_priv->gpio_mmio_base; + u16 len = msg->len; + u8 *buf = msg->buf; + + I915_WRITE(GMBUS1 + reg_offset, + gmbus1_index | + GMBUS_CYCLE_WAIT | + (len << GMBUS_BYTE_COUNT_SHIFT) | + (msg->slave << (GMBUS_SLAVE_ADDR_SHIFT - 1)) | + GMBUS_SLAVE_READ | GMBUS_SW_RDY); + while (len) { + int ret; + u32 val, loop = 0; + u32 gmbus2; + + ret = _intel_wait_for(sc->drm_dev, + ((gmbus2 = I915_READ(GMBUS2 + reg_offset)) & + (GMBUS_SATOER | GMBUS_HW_RDY)), + 50, 1, "915gbr"); + if (ret) + return (ETIMEDOUT); + if (gmbus2 & GMBUS_SATOER) + return (ENXIO); + + val = I915_READ(GMBUS3 + reg_offset); + do { + *buf++ = val & 0xff; + val >>= 8; + } while (--len != 0 && ++loop < 4); + } + + return 0; +} + +static int +gmbus_xfer_write(struct drm_i915_private *dev_priv, struct iic_msg *msg) +{ + int reg_offset = dev_priv->gpio_mmio_base; + u16 len = msg->len; + u8 *buf = msg->buf; + u32 val, loop; + + val = loop = 0; + while (len && loop < 4) { + val |= *buf++ << (8 * loop++); + len -= 1; + } + + I915_WRITE(GMBUS3 + reg_offset, val); + I915_WRITE(GMBUS1 + reg_offset, + GMBUS_CYCLE_WAIT | + (msg->len << GMBUS_BYTE_COUNT_SHIFT) | + (msg->slave << (GMBUS_SLAVE_ADDR_SHIFT - 1)) | + GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); + while (len) { + int ret; + u32 gmbus2; + + val = loop = 0; + do { + val |= *buf++ << (8 * loop); + } while (--len != 0 && ++loop < 4); + + I915_WRITE(GMBUS3 + reg_offset, val); + + ret = _intel_wait_for(sc->drm_dev, + ((gmbus2 = I915_READ(GMBUS2 + reg_offset)) & + (GMBUS_SATOER | GMBUS_HW_RDY)), + 50, 1, "915gbw"); + if (ret) + return (ETIMEDOUT); + if (gmbus2 & GMBUS_SATOER) + return (ENXIO); + } + return 0; +} + +/* + * The gmbus controller can combine a 1 or 2 byte write with a read that + * immediately follows it by using an "INDEX" cycle. + */ +static bool +gmbus_is_index_read(struct iic_msg *msgs, int i, int num) +{ + return (i + 1 < num && + !(msgs[i].flags & IIC_M_RD) && msgs[i].len <= 2 && + (msgs[i + 1].flags & IIC_M_RD)); +} + +static int +gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct iic_msg *msgs) +{ + int reg_offset = dev_priv->gpio_mmio_base; + u32 gmbus1_index = 0; + u32 gmbus5 = 0; + int ret; + + if (msgs[0].len == 2) + gmbus5 = GMBUS_2BYTE_INDEX_EN | + msgs[0].buf[1] | (msgs[0].buf[0] << 8); + if (msgs[0].len == 1) + gmbus1_index = GMBUS_CYCLE_INDEX | + (msgs[0].buf[0] << GMBUS_SLAVE_INDEX_SHIFT); + + /* GMBUS5 holds 16-bit index */ + if (gmbus5) + I915_WRITE(GMBUS5 + reg_offset, gmbus5); + + ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index); + + /* Clear GMBUS5 after each index transfer */ + if (gmbus5) + I915_WRITE(GMBUS5 + reg_offset, 0); + + return ret; +} + +static int intel_gmbus_transfer(device_t idev, struct iic_msg *msgs, uint32_t nmsgs) { struct intel_iic_softc *sc; struct drm_i915_private *dev_priv; - u8 *buf; - int error, i, reg_offset, unit; - u32 val, loop; - u16 len; + int error, i, ret, reg_offset, unit; + error = 0; sc = device_get_softc(idev); dev_priv = sc->drm_dev->dev_private; unit = device_get_unit(idev); sx_xlock(&dev_priv->gmbus_sx); if (sc->force_bit_dev) { - error = intel_iic_quirk_xfer(dev_priv->bbbus[unit], msgs, nmsgs); + error = IICBUS_TRANSFER(dev_priv->bbbus[unit], msgs, nmsgs); goto out; } - reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0; + reg_offset = dev_priv->gpio_mmio_base; I915_WRITE(GMBUS0 + reg_offset, sc->reg0); for (i = 0; i < nmsgs; i++) { - len = msgs[i].len; - buf = msgs[i].buf; + u32 gmbus2; - if ((msgs[i].flags & IIC_M_RD) != 0) { - I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_WAIT | - (i + 1 == nmsgs ? GMBUS_CYCLE_STOP : 0) | - (len << GMBUS_BYTE_COUNT_SHIFT) | - (msgs[i].slave << (GMBUS_SLAVE_ADDR_SHIFT - 1)) | - GMBUS_SLAVE_READ | GMBUS_SW_RDY); - POSTING_READ(GMBUS2 + reg_offset); - do { - loop = 0; - - if (_intel_wait_for(sc->drm_dev, - (I915_READ(GMBUS2 + reg_offset) & - (GMBUS_SATOER | GMBUS_HW_RDY)) != 0, - 50, 1, "915gbr")) - goto timeout; - if ((I915_READ(GMBUS2 + reg_offset) & - GMBUS_SATOER) != 0) - goto clear_err; - - val = I915_READ(GMBUS3 + reg_offset); - do { - *buf++ = val & 0xff; - val >>= 8; - } while (--len != 0 && ++loop < 4); - } while (len != 0); + if (gmbus_is_index_read(msgs, i, nmsgs)) { + error = gmbus_xfer_index_read(dev_priv, &msgs[i]); + i += 1; /* set i to the index of the read xfer */ + } else if (msgs[i].flags & IIC_M_RD) { + error = gmbus_xfer_read(dev_priv, &msgs[i], 0); } else { - val = loop = 0; - do { - val |= *buf++ << (8 * loop); - } while (--len != 0 && ++loop < 4); - - I915_WRITE(GMBUS3 + reg_offset, val); - I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_WAIT | - (i + 1 == nmsgs ? GMBUS_CYCLE_STOP : 0) | - (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) | - (msgs[i].slave << (GMBUS_SLAVE_ADDR_SHIFT - 1)) | - GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); - POSTING_READ(GMBUS2+reg_offset); - - while (len != 0) { - if (_intel_wait_for(sc->drm_dev, - (I915_READ(GMBUS2 + reg_offset) & - (GMBUS_SATOER | GMBUS_HW_RDY)) != 0, - 50, 1, "915gbw")) - goto timeout; - if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) - goto clear_err; - - val = loop = 0; - do { - val |= *buf++ << (8 * loop); - } while (--len != 0 && ++loop < 4); - - I915_WRITE(GMBUS3 + reg_offset, val); - POSTING_READ(GMBUS2 + reg_offset); - } + error = gmbus_xfer_write(dev_priv, &msgs[i]); } - if (i + 1 < nmsgs && _intel_wait_for(sc->drm_dev, - (I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | - GMBUS_HW_WAIT_PHASE)) != 0, - 50, 1, "915gbh")) + if (error == ETIMEDOUT) goto timeout; - if ((I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) != 0) + if (error == ENXIO) goto clear_err; + + ret = _intel_wait_for(sc->drm_dev, + ((gmbus2 = I915_READ(GMBUS2 + reg_offset)) & + (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE)), + 50, 1, "915gbh"); + if (ret) + goto timeout; + if (gmbus2 & GMBUS_SATOER) + goto clear_err; } - error = 0; -done: + /* Generate a STOP condition on the bus. Note that gmbus can't generata + * a STOP on the very first cycle. To simplify the code we + * unconditionally generate the STOP condition with an additional gmbus + * cycle. */ + I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_STOP | GMBUS_SW_RDY); + /* Mark the GMBUS interface as disabled after waiting for idle. * We will re-enable it at the start of the next xfer, * till then let it sleep. */ if (_intel_wait_for(dev, (I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, - 10, 1, "915gbu")) - DRM_INFO("GMBUS timed out waiting for idle\n"); + 10, 1, "915gbu")) { + DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n", + sc->name); + error = ETIMEDOUT; + } I915_WRITE(GMBUS0 + reg_offset, 0); -out: - sx_xunlock(&dev_priv->gmbus_sx); - return (error); + goto out; clear_err: + /* + * Wait for bus to IDLE before clearing NAK. + * If we clear the NAK while bus is still active, then it will stay + * active and the next transaction may fail. + */ + if (_intel_wait_for(dev, + (I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, + 10, 1, "915gbu")) + DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n", sc->name); + /* Toggle the Software Clear Interrupt bit. This has the effect * of resetting the GMBUS controller and so clearing the * BUS_ERROR raised by the slave's NAK. */ I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT); I915_WRITE(GMBUS1 + reg_offset, 0); - error = EIO; - goto done; + I915_WRITE(GMBUS0 + reg_offset, 0); + DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n", + sc->name, msgs[i].slave, + (msgs[i].flags & IIC_M_RD) ? 'r' : 'w', msgs[i].len); + + /* + * If no ACK is received during the address phase of a transaction, + * the adapter must report -ENXIO. + * It is not clear what to return if no ACK is received at other times. + * So, we always return -ENXIO in all NAK cases, to ensure we send + * it at least during the one case that is specified. + */ + error = ENXIO; + goto out; + timeout: - DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n", - sc->reg0 & 0xff, sc->name); + DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n", + sc->name, sc->reg0 & 0xff); I915_WRITE(GMBUS0 + reg_offset, 0); /* * Hardware may not support GMBUS over these pins? * Try GPIO bitbanging instead. */ sc->force_bit_dev = true; - - error = intel_iic_quirk_xfer(dev_priv->bbbus[unit], msgs, nmsgs); + error = IICBUS_TRANSFER(idev, msgs, nmsgs); goto out; + +out: + sx_xunlock(&dev_priv->gmbus_sx); + return (error); } +device_t +intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, + unsigned port) +{ + + if (!intel_gmbus_is_port_valid(port)) + DRM_ERROR("GMBUS get adapter %d: invalid port\n", port); + return (intel_gmbus_is_port_valid(port) ? dev_priv->gmbus[port - 1] : + NULL); +} + void intel_gmbus_set_speed(device_t idev, int speed) { struct intel_iic_softc *sc; sc = device_get_softc(device_get_parent(idev)); sc->reg0 = (sc->reg0 & ~(0x3 << 8)) | speed; } void intel_gmbus_force_bit(device_t idev, bool force_bit) { struct intel_iic_softc *sc; sc = device_get_softc(device_get_parent(idev)); sc->force_bit_dev = force_bit; } static int -intel_iic_quirk_xfer(device_t idev, struct iic_msg *msgs, int nmsgs) +intel_iicbb_pre_xfer(device_t idev) { - device_t bridge_dev; struct intel_iic_softc *sc; struct drm_i915_private *dev_priv; - int ret; - int i; - bridge_dev = device_get_parent(device_get_parent(idev)); - sc = device_get_softc(bridge_dev); + sc = device_get_softc(idev); dev_priv = sc->drm_dev->dev_private; intel_iic_reset(sc->drm_dev); intel_iic_quirk_set(dev_priv, true); - IICBB_SETSDA(bridge_dev, 1); - IICBB_SETSCL(bridge_dev, 1); + IICBB_SETSDA(idev, 1); + IICBB_SETSCL(idev, 1); DELAY(I2C_RISEFALL_TIME); + return (0); +} - for (i = 0; i < nmsgs - 1; i++) { - /* force use of repeated start instead of default stop+start */ - msgs[i].flags |= IIC_M_NOSTOP; - } - ret = iicbus_transfer(idev, msgs, nmsgs); - IICBB_SETSDA(bridge_dev, 1); - IICBB_SETSCL(bridge_dev, 1); - intel_iic_quirk_set(dev_priv, false); +static void +intel_iicbb_post_xfer(device_t idev) +{ + struct intel_iic_softc *sc; + struct drm_i915_private *dev_priv; - return (ret); + sc = device_get_softc(idev); + dev_priv = sc->drm_dev->dev_private; + + IICBB_SETSDA(idev, 1); + IICBB_SETSCL(idev, 1); + intel_iic_quirk_set(dev_priv, false); } -static const char *gpio_names[GMBUS_NUM_PORTS] = { - "disabled", - "ssc", - "vga", - "panel", - "dpc", - "dpb", - "reserved", - "dpd", -}; - static int intel_gmbus_probe(device_t dev) { return (BUS_PROBE_SPECIFIC); } static int intel_gmbus_attach(device_t idev) { struct drm_i915_private *dev_priv; struct intel_iic_softc *sc; - int pin; + int pin, port; sc = device_get_softc(idev); sc->drm_dev = device_get_softc(device_get_parent(idev)); dev_priv = sc->drm_dev->dev_private; pin = device_get_unit(idev); + port = pin + 1; - snprintf(sc->name, sizeof(sc->name), "gmbus bus %s", gpio_names[pin]); + snprintf(sc->name, sizeof(sc->name), "gmbus %s", gmbus_ports[pin].name); device_set_desc(idev, sc->name); /* By default use a conservative clock rate */ - sc->reg0 = pin | GMBUS_RATE_100KHZ; + sc->reg0 = port | GMBUS_RATE_100KHZ; - /* XXX force bit banging until GMBUS is fully debugged */ + /* gmbus seems to be broken on i830 */ + if (IS_I830(sc->drm_dev)) + sc->force_bit_dev = true; +#if 0 if (IS_GEN2(sc->drm_dev)) { sc->force_bit_dev = true; } +#endif /* add bus interface device */ sc->iic_dev = device_add_child(idev, "iicbus", -1); if (sc->iic_dev == NULL) return (ENXIO); device_quiet(sc->iic_dev); bus_generic_attach(idev); return (0); } static int intel_gmbus_detach(device_t idev) { struct intel_iic_softc *sc; struct drm_i915_private *dev_priv; device_t child; int u; sc = device_get_softc(idev); u = device_get_unit(idev); dev_priv = sc->drm_dev->dev_private; child = sc->iic_dev; bus_generic_detach(idev); if (child != NULL) device_delete_child(idev, child); return (0); } static int intel_iicbb_probe(device_t dev) { return (BUS_PROBE_DEFAULT); } static int intel_iicbb_attach(device_t idev) { - static const int map_pin_to_reg[] = { - 0, - GPIOB, - GPIOA, - GPIOC, - GPIOD, - GPIOE, - 0, - GPIOF - }; - struct intel_iic_softc *sc; struct drm_i915_private *dev_priv; int pin; sc = device_get_softc(idev); sc->drm_dev = device_get_softc(device_get_parent(idev)); dev_priv = sc->drm_dev->dev_private; pin = device_get_unit(idev); - snprintf(sc->name, sizeof(sc->name), "i915 iicbb %s", gpio_names[pin]); + snprintf(sc->name, sizeof(sc->name), "i915 iicbb %s", + gmbus_ports[pin].name); device_set_desc(idev, sc->name); sc->reg0 = pin | GMBUS_RATE_100KHZ; - sc->reg = map_pin_to_reg[pin]; - if (HAS_PCH_SPLIT(dev_priv->dev)) - sc->reg += PCH_GPIOA - GPIOA; + sc->reg = dev_priv->gpio_mmio_base + gmbus_ports[pin].reg; /* add generic bit-banging code */ sc->iic_dev = device_add_child(idev, "iicbb", -1); if (sc->iic_dev == NULL) return (ENXIO); device_quiet(sc->iic_dev); bus_generic_attach(idev); + iicbus_set_nostop(idev, true); return (0); } static int intel_iicbb_detach(device_t idev) { struct intel_iic_softc *sc; device_t child; sc = device_get_softc(idev); child = sc->iic_dev; bus_generic_detach(idev); if (child) device_delete_child(idev, child); return (0); } static device_method_t intel_gmbus_methods[] = { DEVMETHOD(device_probe, intel_gmbus_probe), DEVMETHOD(device_attach, intel_gmbus_attach), DEVMETHOD(device_detach, intel_gmbus_detach), DEVMETHOD(iicbus_reset, intel_iicbus_reset), DEVMETHOD(iicbus_transfer, intel_gmbus_transfer), DEVMETHOD_END }; static driver_t intel_gmbus_driver = { "intel_gmbus", intel_gmbus_methods, sizeof(struct intel_iic_softc) }; static devclass_t intel_gmbus_devclass; DRIVER_MODULE_ORDERED(intel_gmbus, drmn, intel_gmbus_driver, intel_gmbus_devclass, 0, 0, SI_ORDER_FIRST); DRIVER_MODULE(iicbus, intel_gmbus, iicbus_driver, iicbus_devclass, 0, 0); static device_method_t intel_iicbb_methods[] = { DEVMETHOD(device_probe, intel_iicbb_probe), DEVMETHOD(device_attach, intel_iicbb_attach), DEVMETHOD(device_detach, intel_iicbb_detach), DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(iicbb_callback, iicbus_null_callback), DEVMETHOD(iicbb_reset, intel_iicbus_reset), DEVMETHOD(iicbb_setsda, intel_iicbb_setsda), DEVMETHOD(iicbb_setscl, intel_iicbb_setscl), DEVMETHOD(iicbb_getsda, intel_iicbb_getsda), DEVMETHOD(iicbb_getscl, intel_iicbb_getscl), + DEVMETHOD(iicbb_pre_xfer, intel_iicbb_pre_xfer), + DEVMETHOD(iicbb_post_xfer, intel_iicbb_post_xfer), DEVMETHOD_END }; static driver_t intel_iicbb_driver = { "intel_iicbb", intel_iicbb_methods, sizeof(struct intel_iic_softc) }; static devclass_t intel_iicbb_devclass; DRIVER_MODULE_ORDERED(intel_iicbb, drmn, intel_iicbb_driver, intel_iicbb_devclass, 0, 0, SI_ORDER_FIRST); DRIVER_MODULE(iicbb, intel_iicbb, iicbb_driver, iicbb_devclass, 0, 0); int intel_setup_gmbus(struct drm_device *dev) { struct drm_i915_private *dev_priv; device_t iic_dev; int i, ret; dev_priv = dev->dev_private; sx_init(&dev_priv->gmbus_sx, "gmbus"); - dev_priv->gmbus_bridge = malloc(sizeof(device_t) * GMBUS_NUM_PORTS, - DRM_MEM_DRIVER, M_WAITOK | M_ZERO); - dev_priv->bbbus_bridge = malloc(sizeof(device_t) * GMBUS_NUM_PORTS, - DRM_MEM_DRIVER, M_WAITOK | M_ZERO); - dev_priv->gmbus = malloc(sizeof(device_t) * GMBUS_NUM_PORTS, - DRM_MEM_DRIVER, M_WAITOK | M_ZERO); - dev_priv->bbbus = malloc(sizeof(device_t) * GMBUS_NUM_PORTS, - DRM_MEM_DRIVER, M_WAITOK | M_ZERO); + if (HAS_PCH_SPLIT(dev)) + dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA; + else + dev_priv->gpio_mmio_base = 0; /* * The Giant there is recursed, most likely. Normally, the * intel_setup_gmbus() is called from the attach method of the * driver. */ mtx_lock(&Giant); - for (i = 0; i < GMBUS_NUM_PORTS; i++) { + for (i = 0; i <= GMBUS_NUM_PORTS; i++) { /* * Initialized bbbus_bridge before gmbus_bridge, since * gmbus may decide to force quirk transfer in the * attachment code. */ dev_priv->bbbus_bridge[i] = device_add_child(dev->device, "intel_iicbb", i); if (dev_priv->bbbus_bridge[i] == NULL) { DRM_ERROR("bbbus bridge %d creation failed\n", i); ret = ENXIO; goto err; } device_quiet(dev_priv->bbbus_bridge[i]); ret = device_probe_and_attach(dev_priv->bbbus_bridge[i]); if (ret != 0) { DRM_ERROR("bbbus bridge %d attach failed, %d\n", i, ret); goto err; } iic_dev = device_find_child(dev_priv->bbbus_bridge[i], "iicbb", -1); if (iic_dev == NULL) { DRM_ERROR("bbbus bridge doesn't have iicbb child\n"); goto err; } iic_dev = device_find_child(iic_dev, "iicbus", -1); if (iic_dev == NULL) { DRM_ERROR( "bbbus bridge doesn't have iicbus grandchild\n"); goto err; } dev_priv->bbbus[i] = iic_dev; dev_priv->gmbus_bridge[i] = device_add_child(dev->device, "intel_gmbus", i); if (dev_priv->gmbus_bridge[i] == NULL) { DRM_ERROR("gmbus bridge %d creation failed\n", i); ret = ENXIO; goto err; } device_quiet(dev_priv->gmbus_bridge[i]); ret = device_probe_and_attach(dev_priv->gmbus_bridge[i]); if (ret != 0) { DRM_ERROR("gmbus bridge %d attach failed, %d\n", i, ret); ret = ENXIO; goto err; } iic_dev = device_find_child(dev_priv->gmbus_bridge[i], "iicbus", -1); if (iic_dev == NULL) { DRM_ERROR("gmbus bridge doesn't have iicbus child\n"); goto err; } dev_priv->gmbus[i] = iic_dev; intel_iic_reset(dev); } mtx_unlock(&Giant); return (0); err: intel_teardown_gmbus_m(dev, i); mtx_unlock(&Giant); return (ret); } static void intel_teardown_gmbus_m(struct drm_device *dev, int m) { struct drm_i915_private *dev_priv; dev_priv = dev->dev_private; - free(dev_priv->gmbus, DRM_MEM_DRIVER); - dev_priv->gmbus = NULL; - free(dev_priv->bbbus, DRM_MEM_DRIVER); - dev_priv->bbbus = NULL; - free(dev_priv->gmbus_bridge, DRM_MEM_DRIVER); - dev_priv->gmbus_bridge = NULL; - free(dev_priv->bbbus_bridge, DRM_MEM_DRIVER); - dev_priv->bbbus_bridge = NULL; sx_destroy(&dev_priv->gmbus_sx); } void intel_teardown_gmbus(struct drm_device *dev) { mtx_lock(&Giant); intel_teardown_gmbus_m(dev, GMBUS_NUM_PORTS); mtx_unlock(&Giant); } Index: head/sys/dev/drm2/i915/intel_lvds.c =================================================================== --- head/sys/dev/drm2/i915/intel_lvds.c (revision 277486) +++ head/sys/dev/drm2/i915/intel_lvds.c (revision 277487) @@ -1,1125 +1,1126 @@ /* * Copyright © 2006-2007 Intel Corporation * Copyright (c) 2006 Dave Airlie * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt * Dave Airlie * Jesse Barnes */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include /* Private structure for the integrated LVDS support */ struct intel_lvds { struct intel_encoder base; struct edid *edid; int fitting_mode; u32 pfit_control; u32 pfit_pgm_ratios; bool pfit_dirty; struct drm_display_mode *fixed_mode; }; static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder) { return container_of(encoder, struct intel_lvds, base.base); } static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector) { return container_of(intel_attached_encoder(connector), struct intel_lvds, base); } /** * Sets the power state for the panel. */ static void intel_lvds_enable(struct intel_lvds *intel_lvds) { struct drm_device *dev = intel_lvds->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 ctl_reg, lvds_reg, stat_reg; if (HAS_PCH_SPLIT(dev)) { ctl_reg = PCH_PP_CONTROL; lvds_reg = PCH_LVDS; stat_reg = PCH_PP_STATUS; } else { ctl_reg = PP_CONTROL; lvds_reg = LVDS; stat_reg = PP_STATUS; } I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); if (intel_lvds->pfit_dirty) { /* * Enable automatic panel scaling so that non-native modes * fill the screen. The panel fitter should only be * adjusted whilst the pipe is disabled, according to * register description and PRM. */ DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", intel_lvds->pfit_control, intel_lvds->pfit_pgm_ratios); I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); intel_lvds->pfit_dirty = false; } I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); POSTING_READ(lvds_reg); if (_intel_wait_for(dev, (I915_READ(stat_reg) & PP_ON) == 0, 1000, 1, "915lvds")) DRM_ERROR("timed out waiting for panel to power off\n"); intel_panel_enable_backlight(dev); } static void intel_lvds_disable(struct intel_lvds *intel_lvds) { struct drm_device *dev = intel_lvds->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 ctl_reg, lvds_reg, stat_reg; if (HAS_PCH_SPLIT(dev)) { ctl_reg = PCH_PP_CONTROL; lvds_reg = PCH_LVDS; stat_reg = PCH_PP_STATUS; } else { ctl_reg = PP_CONTROL; lvds_reg = LVDS; stat_reg = PP_STATUS; } intel_panel_disable_backlight(dev); I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); if (_intel_wait_for(dev, (I915_READ(stat_reg) & PP_ON) == 0, 1000, 1, "915lvo")) DRM_ERROR("timed out waiting for panel to power off\n"); if (intel_lvds->pfit_control) { I915_WRITE(PFIT_CONTROL, 0); intel_lvds->pfit_dirty = true; } I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); POSTING_READ(lvds_reg); } static void intel_lvds_dpms(struct drm_encoder *encoder, int mode) { struct intel_lvds *intel_lvds = to_intel_lvds(encoder); if (mode == DRM_MODE_DPMS_ON) intel_lvds_enable(intel_lvds); else intel_lvds_disable(intel_lvds); /* XXX: We never power down the LVDS pairs. */ } static int intel_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct intel_lvds *intel_lvds = intel_attached_lvds(connector); struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode; if (mode->hdisplay > fixed_mode->hdisplay) return MODE_PANEL; if (mode->vdisplay > fixed_mode->vdisplay) return MODE_PANEL; return MODE_OK; } static void centre_horizontally(struct drm_display_mode *mode, int width) { u32 border, sync_pos, blank_width, sync_width; /* keep the hsync and hblank widths constant */ sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start; blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start; sync_pos = (blank_width - sync_width + 1) / 2; border = (mode->hdisplay - width + 1) / 2; border += border & 1; /* make the border even */ mode->crtc_hdisplay = width; mode->crtc_hblank_start = width + border; mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width; mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos; mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width; mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET; } static void centre_vertically(struct drm_display_mode *mode, int height) { u32 border, sync_pos, blank_width, sync_width; /* keep the vsync and vblank widths constant */ sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start; blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start; sync_pos = (blank_width - sync_width + 1) / 2; border = (mode->vdisplay - height + 1) / 2; mode->crtc_vdisplay = height; mode->crtc_vblank_start = height + border; mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width; mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos; mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width; mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET; } static inline u32 panel_fitter_scaling(u32 source, u32 target) { /* * Floating point operation is not supported. So the FACTOR * is defined, which can avoid the floating point computation * when calculating the panel ratio. */ #define ACCURACY 12 #define FACTOR (1 << ACCURACY) u32 ratio = source * FACTOR / target; return (FACTOR * ratio + FACTOR/2) / FACTOR; } static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, + struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_lvds *intel_lvds = to_intel_lvds(encoder); struct drm_encoder *tmp_encoder; u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; int pipe; /* Should never happen!! */ if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) { DRM_ERROR("Can't support LVDS on pipe A\n"); return false; } /* Should never happen!! */ list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) { if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) { DRM_ERROR("Can't enable LVDS and another " "encoder on the same pipe\n"); return false; } } /* * We have timings from the BIOS for the panel, put them in * to the adjusted mode. The CRTC will be set up for this mode, * with the panel scaling set up to source from the H/VDisplay * of the original mode. */ intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode); if (HAS_PCH_SPLIT(dev)) { intel_pch_panel_fitting(dev, intel_lvds->fitting_mode, mode, adjusted_mode); return true; } /* Native modes don't need fitting */ if (adjusted_mode->hdisplay == mode->hdisplay && adjusted_mode->vdisplay == mode->vdisplay) goto out; /* 965+ wants fuzzy fitting */ if (INTEL_INFO(dev)->gen >= 4) pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | PFIT_FILTER_FUZZY); /* * Enable automatic panel scaling for non-native modes so that they fill * the screen. Should be enabled before the pipe is enabled, according * to register description and PRM. * Change the value here to see the borders for debugging */ for_each_pipe(pipe) I915_WRITE(BCLRPAT(pipe), 0); drm_mode_set_crtcinfo(adjusted_mode, 0); switch (intel_lvds->fitting_mode) { case DRM_MODE_SCALE_CENTER: /* * For centered modes, we have to calculate border widths & * heights and modify the values programmed into the CRTC. */ centre_horizontally(adjusted_mode, mode->hdisplay); centre_vertically(adjusted_mode, mode->vdisplay); border = LVDS_BORDER_ENABLE; break; case DRM_MODE_SCALE_ASPECT: /* Scale but preserve the aspect ratio */ if (INTEL_INFO(dev)->gen >= 4) { u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; /* 965+ is easy, it does everything in hw */ if (scaled_width > scaled_height) pfit_control |= PFIT_ENABLE | PFIT_SCALING_PILLAR; else if (scaled_width < scaled_height) pfit_control |= PFIT_ENABLE | PFIT_SCALING_LETTER; else if (adjusted_mode->hdisplay != mode->hdisplay) pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO; } else { u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; /* * For earlier chips we have to calculate the scaling * ratio by hand and program it into the * PFIT_PGM_RATIO register */ if (scaled_width > scaled_height) { /* pillar */ centre_horizontally(adjusted_mode, scaled_height / mode->vdisplay); border = LVDS_BORDER_ENABLE; if (mode->vdisplay != adjusted_mode->vdisplay) { u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay); pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT | bits << PFIT_VERT_SCALE_SHIFT); pfit_control |= (PFIT_ENABLE | VERT_INTERP_BILINEAR | HORIZ_INTERP_BILINEAR); } } else if (scaled_width < scaled_height) { /* letter */ centre_vertically(adjusted_mode, scaled_width / mode->hdisplay); border = LVDS_BORDER_ENABLE; if (mode->hdisplay != adjusted_mode->hdisplay) { u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay); pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT | bits << PFIT_VERT_SCALE_SHIFT); pfit_control |= (PFIT_ENABLE | VERT_INTERP_BILINEAR | HORIZ_INTERP_BILINEAR); } } else /* Aspects match, Let hw scale both directions */ pfit_control |= (PFIT_ENABLE | VERT_AUTO_SCALE | HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR | HORIZ_INTERP_BILINEAR); } break; case DRM_MODE_SCALE_FULLSCREEN: /* * Full scaling, even if it changes the aspect ratio. * Fortunately this is all done for us in hw. */ if (mode->vdisplay != adjusted_mode->vdisplay || mode->hdisplay != adjusted_mode->hdisplay) { pfit_control |= PFIT_ENABLE; if (INTEL_INFO(dev)->gen >= 4) pfit_control |= PFIT_SCALING_AUTO; else pfit_control |= (VERT_AUTO_SCALE | VERT_INTERP_BILINEAR | HORIZ_AUTO_SCALE | HORIZ_INTERP_BILINEAR); } break; default: break; } out: /* If not enabling scaling, be consistent and always use 0. */ if ((pfit_control & PFIT_ENABLE) == 0) { pfit_control = 0; pfit_pgm_ratios = 0; } /* Make sure pre-965 set dither correctly */ if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither) pfit_control |= PANEL_8TO6_DITHER_ENABLE; if (pfit_control != intel_lvds->pfit_control || pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { intel_lvds->pfit_control = pfit_control; intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios; intel_lvds->pfit_dirty = true; } dev_priv->lvds_border_bits = border; /* * XXX: It would be nice to support lower refresh rates on the * panels to reduce power consumption, and perhaps match the * user's requested refresh rate. */ return true; } static void intel_lvds_prepare(struct drm_encoder *encoder) { struct intel_lvds *intel_lvds = to_intel_lvds(encoder); /* * Prior to Ironlake, we must disable the pipe if we want to adjust * the panel fitter. However at all other times we can just reset * the registers regardless. */ if (!HAS_PCH_SPLIT(encoder->dev) && intel_lvds->pfit_dirty) intel_lvds_disable(intel_lvds); } static void intel_lvds_commit(struct drm_encoder *encoder) { struct intel_lvds *intel_lvds = to_intel_lvds(encoder); /* Always do a full power on as we do not know what state * we were left in. */ intel_lvds_enable(intel_lvds); } static void intel_lvds_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { /* * The LVDS pin pair will already have been turned on in the * intel_crtc_mode_set since it has a large impact on the DPLL * settings. */ } /** * Detect the LVDS connection. * * Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means * connected and closed means disconnected. We also send hotplug events as * needed, using lid status notification from the input layer. */ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector, bool force) { struct drm_device *dev = connector->dev; enum drm_connector_status status; status = intel_panel_detect(dev); if (status != connector_status_unknown) return status; return connector_status_connected; } /** * Return the list of DDC modes if available, or the BIOS fixed mode otherwise. */ static int intel_lvds_get_modes(struct drm_connector *connector) { struct intel_lvds *intel_lvds = intel_attached_lvds(connector); struct drm_device *dev = connector->dev; struct drm_display_mode *mode; if (intel_lvds->edid) return drm_add_edid_modes(connector, intel_lvds->edid); mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); if (mode == NULL) return 0; drm_mode_probed_add(connector, mode); return 1; } static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id) { - DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident); + DRM_INFO("Skipping forced modeset for %s\n", id->ident); return 1; } /* The GPU hangs up on these systems if modeset is performed on LID open */ static const struct dmi_system_id intel_no_modeset_on_lid[] = { { .callback = intel_no_modeset_on_lid_dmi_callback, .ident = "Toshiba Tecra A11", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"), }, }, { } /* terminating entry */ }; #ifdef NOTYET /* * Lid events. Note the use of 'modeset_on_lid': * - we set it on lid close, and reset it on open * - we use it as a "only once" bit (ie we ignore * duplicate events where it was already properly * set/reset) * - the suspend/resume paths will also set it to * zero, since they restore the mode ("lid open"). */ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, void *unused) { struct drm_i915_private *dev_priv = container_of(nb, struct drm_i915_private, lid_notifier); struct drm_device *dev = dev_priv->dev; struct drm_connector *connector = dev_priv->int_lvds_connector; if (dev->switch_power_state != DRM_SWITCH_POWER_ON) return NOTIFY_OK; /* * check and update the status of LVDS connector after receiving * the LID nofication event. */ if (connector) connector->status = connector->funcs->detect(connector, false); /* Don't force modeset on machines where it causes a GPU lockup */ if (dmi_check_system(intel_no_modeset_on_lid)) return NOTIFY_OK; if (!acpi_lid_open()) { dev_priv->modeset_on_lid = 1; return NOTIFY_OK; } if (!dev_priv->modeset_on_lid) return NOTIFY_OK; dev_priv->modeset_on_lid = 0; mutex_lock(&dev->mode_config.mutex); drm_helper_resume_force_mode(dev); mutex_unlock(&dev->mode_config.mutex); return NOTIFY_OK; } #endif /** * intel_lvds_destroy - unregister and free LVDS structures * @connector: connector to free * * Unregister the DDC bus for this connector then free the driver private * structure. */ static void intel_lvds_destroy(struct drm_connector *connector) { struct drm_device *dev = connector->dev; #if 0 struct drm_i915_private *dev_priv = dev->dev_private; #endif intel_panel_destroy_backlight(dev); #if 0 if (dev_priv->lid_notifier.notifier_call) acpi_lid_notifier_unregister(&dev_priv->lid_notifier); #endif #if 0 drm_sysfs_connector_remove(connector); #endif drm_connector_cleanup(connector); free(connector, DRM_MEM_KMS); } static int intel_lvds_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t value) { struct intel_lvds *intel_lvds = intel_attached_lvds(connector); struct drm_device *dev = connector->dev; if (property == dev->mode_config.scaling_mode_property) { struct drm_crtc *crtc = intel_lvds->base.base.crtc; if (value == DRM_MODE_SCALE_NONE) { DRM_DEBUG_KMS("no scaling not supported\n"); return -EINVAL; } if (intel_lvds->fitting_mode == value) { /* the LVDS scaling property is not changed */ return 0; } intel_lvds->fitting_mode = value; if (crtc && crtc->enabled) { /* * If the CRTC is enabled, the display will be changed * according to the new panel fitting mode. */ drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); } } return 0; } static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { .dpms = intel_lvds_dpms, .mode_fixup = intel_lvds_mode_fixup, .prepare = intel_lvds_prepare, .mode_set = intel_lvds_mode_set, .commit = intel_lvds_commit, }; static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { .get_modes = intel_lvds_get_modes, .mode_valid = intel_lvds_mode_valid, .best_encoder = intel_best_encoder, }; static const struct drm_connector_funcs intel_lvds_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = intel_lvds_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_lvds_set_property, .destroy = intel_lvds_destroy, }; static const struct drm_encoder_funcs intel_lvds_enc_funcs = { .destroy = intel_encoder_destroy, }; static int intel_no_lvds_dmi_callback(const struct dmi_system_id *id) { - DRM_DEBUG_KMS("Skipping LVDS initialization for %s\n", id->ident); + DRM_INFO("Skipping LVDS initialization for %s\n", id->ident); return 1; } /* These systems claim to have LVDS, but really don't */ static const struct dmi_system_id intel_no_lvds[] = { { .callback = intel_no_lvds_dmi_callback, .ident = "Apple Mac Mini (Core series)", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Apple Mac Mini (Core 2 series)", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "Macmini2,1"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "MSI IM-945GSE-A", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MSI"), DMI_MATCH(DMI_PRODUCT_NAME, "A9830IMS"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Dell Studio Hybrid", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Studio Hybrid 140g"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Dell OptiPlex FX170", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex FX170"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "AOpen Mini PC", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "AOpen"), DMI_MATCH(DMI_PRODUCT_NAME, "i965GMx-IF"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "AOpen Mini PC MP915", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), DMI_MATCH(DMI_BOARD_NAME, "i915GMx-F"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "AOpen i915GMm-HFS", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "AOpen i45GMx-I", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Aopen i945GTt-VFA", .matches = { DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Clientron U800", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Clientron"), DMI_MATCH(DMI_PRODUCT_NAME, "U800"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Clientron E830", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Clientron"), DMI_MATCH(DMI_PRODUCT_NAME, "E830"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Asus EeeBox PC EB1007", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Asus AT5NM10T-I", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Hewlett-Packard t5745", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "hp t5745"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Hewlett-Packard st5747", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "hp st5747"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "MSI Wind Box DC500", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), DMI_MATCH(DMI_BOARD_NAME, "MS-7469"), }, }, { } /* terminating entry */ }; /** * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID * @dev: drm device * @connector: LVDS connector * * Find the reduced downclock for LVDS in EDID. */ static void intel_find_lvds_downclock(struct drm_device *dev, struct drm_display_mode *fixed_mode, struct drm_connector *connector) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_display_mode *scan; int temp_downclock; temp_downclock = fixed_mode->clock; list_for_each_entry(scan, &connector->probed_modes, head) { /* * If one mode has the same resolution with the fixed_panel * mode while they have the different refresh rate, it means * that the reduced downclock is found for the LVDS. In such * case we can set the different FPx0/1 to dynamically select * between low and high frequency. */ if (scan->hdisplay == fixed_mode->hdisplay && scan->hsync_start == fixed_mode->hsync_start && scan->hsync_end == fixed_mode->hsync_end && scan->htotal == fixed_mode->htotal && scan->vdisplay == fixed_mode->vdisplay && scan->vsync_start == fixed_mode->vsync_start && scan->vsync_end == fixed_mode->vsync_end && scan->vtotal == fixed_mode->vtotal) { if (scan->clock < temp_downclock) { /* * The downclock is already found. But we * expect to find the lower downclock. */ temp_downclock = scan->clock; } } } if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) { /* We found the downclock for LVDS. */ dev_priv->lvds_downclock_avail = 1; dev_priv->lvds_downclock = temp_downclock; DRM_DEBUG_KMS("LVDS downclock is found in EDID. " "Normal clock %dKhz, downclock %dKhz\n", fixed_mode->clock, temp_downclock); } } /* * Enumerate the child dev array parsed from VBT to check whether * the LVDS is present. * If it is present, return 1. * If it is not present, return false. * If no child dev is parsed from VBT, it assumes that the LVDS is present. */ static bool lvds_is_present_in_vbt(struct drm_device *dev, u8 *i2c_pin) { struct drm_i915_private *dev_priv = dev->dev_private; int i; if (!dev_priv->child_dev_num) return true; for (i = 0; i < dev_priv->child_dev_num; i++) { struct child_device_config *child = dev_priv->child_dev + i; /* If the device type is not LFP, continue. * We have to check both the new identifiers as well as the * old for compatibility with some BIOSes. */ if (child->device_type != DEVICE_TYPE_INT_LFP && child->device_type != DEVICE_TYPE_LFP) continue; - if (child->i2c_pin) - *i2c_pin = child->i2c_pin; + if (intel_gmbus_is_port_valid(child->i2c_pin)) + *i2c_pin = child->i2c_pin; /* However, we cannot trust the BIOS writers to populate * the VBT correctly. Since LVDS requires additional * information from AIM blocks, a non-zero addin offset is * a good indicator that the LVDS is actually present. */ if (child->addin_offset) return true; /* But even then some BIOS writers perform some black magic * and instantiate the device without reference to any * additional data. Trust that if the VBT was written into * the OpRegion then they have validated the LVDS's existence. */ if (dev_priv->opregion.vbt) return true; } return false; } static bool intel_lvds_supported(struct drm_device *dev) { /* With the introduction of the PCH we gained a dedicated * LVDS presence pin, use it. */ if (HAS_PCH_SPLIT(dev)) return true; /* Otherwise LVDS was only attached to mobile products, * except for the inglorious 830gm */ return IS_MOBILE(dev) && !IS_I830(dev); } /** * intel_lvds_init - setup LVDS connectors on this device * @dev: drm device * * Create the connector, register the LVDS DDC bus, and try to figure out what * modes we can display on the LVDS panel (if present). */ bool intel_lvds_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_lvds *intel_lvds; struct intel_encoder *intel_encoder; struct intel_connector *intel_connector; struct drm_connector *connector; struct drm_encoder *encoder; struct drm_display_mode *scan; /* *modes, *bios_mode; */ struct drm_crtc *crtc; u32 lvds; int pipe; u8 pin; if (!intel_lvds_supported(dev)) return false; /* Skip init on machines we know falsely report LVDS */ if (dmi_check_system(intel_no_lvds)) return false; pin = GMBUS_PORT_PANEL; if (!lvds_is_present_in_vbt(dev, &pin)) { DRM_DEBUG_KMS("LVDS is not present in VBT\n"); return false; } if (HAS_PCH_SPLIT(dev)) { if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) return false; if (dev_priv->edp.support) { DRM_DEBUG_KMS("disable LVDS for eDP support\n"); return false; } } intel_lvds = malloc(sizeof(struct intel_lvds), DRM_MEM_KMS, M_WAITOK | M_ZERO); intel_connector = malloc(sizeof(struct intel_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO); if (!HAS_PCH_SPLIT(dev)) { intel_lvds->pfit_control = I915_READ(PFIT_CONTROL); } intel_encoder = &intel_lvds->base; encoder = &intel_encoder->base; connector = &intel_connector->base; drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, DRM_MODE_CONNECTOR_LVDS); drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); intel_connector_attach_encoder(intel_connector, intel_encoder); intel_encoder->type = INTEL_OUTPUT_LVDS; intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); if (HAS_PCH_SPLIT(dev)) intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); else intel_encoder->crtc_mask = (1 << 1); drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = false; connector->doublescan_allowed = false; /* create the scaling mode property */ drm_mode_create_scaling_mode_property(dev); /* * the initial panel fitting mode will be FULL_SCREEN. */ drm_connector_attach_property(&intel_connector->base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_ASPECT); intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT; /* * LVDS discovery: * 1) check for EDID on DDC * 2) check for VBT data * 3) check to see if LVDS is already on * if none of the above, no panel * 4) make sure lid is open * if closed, act like it's not there for now */ /* * Attempt to get the fixed panel mode from DDC. Assume that the * preferred mode is the right one. */ - intel_lvds->edid = drm_get_edid(connector, dev_priv->gmbus[pin]); + intel_lvds->edid = drm_get_edid(connector, + intel_gmbus_get_adapter(dev_priv, pin)); if (intel_lvds->edid) { if (drm_add_edid_modes(connector, intel_lvds->edid)) { drm_mode_connector_update_edid_property(connector, intel_lvds->edid); } else { free(intel_lvds->edid, DRM_MEM_KMS); intel_lvds->edid = NULL; } } if (!intel_lvds->edid) { /* Didn't get an EDID, so * Set wide sync ranges so we get all modes * handed to valid_mode for checking */ connector->display_info.min_vfreq = 0; connector->display_info.max_vfreq = 200; connector->display_info.min_hfreq = 0; connector->display_info.max_hfreq = 200; } list_for_each_entry(scan, &connector->probed_modes, head) { if (scan->type & DRM_MODE_TYPE_PREFERRED) { intel_lvds->fixed_mode = drm_mode_duplicate(dev, scan); intel_find_lvds_downclock(dev, intel_lvds->fixed_mode, connector); goto out; } } /* Failed to get EDID, what about VBT? */ if (dev_priv->lfp_lvds_vbt_mode) { intel_lvds->fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); if (intel_lvds->fixed_mode) { intel_lvds->fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; goto out; } } /* * If we didn't get EDID, try checking if the panel is already turned * on. If so, assume that whatever is currently programmed is the * correct mode. */ /* Ironlake: FIXME if still fail, not try pipe mode now */ if (HAS_PCH_SPLIT(dev)) goto failed; lvds = I915_READ(LVDS); pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0; crtc = intel_get_crtc_for_pipe(dev, pipe); if (crtc && (lvds & LVDS_PORT_EN)) { intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc); if (intel_lvds->fixed_mode) { intel_lvds->fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; goto out; } } /* If we still don't have a mode after all that, give up. */ if (!intel_lvds->fixed_mode) goto failed; out: if (HAS_PCH_SPLIT(dev)) { u32 pwm; pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0; /* make sure PWM is enabled and locked to the LVDS pipe */ pwm = I915_READ(BLC_PWM_CPU_CTL2); if (pipe == 0 && (pwm & PWM_PIPE_B)) I915_WRITE(BLC_PWM_CPU_CTL2, pwm & ~PWM_ENABLE); if (pipe) pwm |= PWM_PIPE_B; else pwm &= ~PWM_PIPE_B; I915_WRITE(BLC_PWM_CPU_CTL2, pwm | PWM_ENABLE); pwm = I915_READ(BLC_PWM_PCH_CTL1); pwm |= PWM_PCH_ENABLE; I915_WRITE(BLC_PWM_PCH_CTL1, pwm); /* * Unlock registers and just * leave them unlocked */ I915_WRITE(PCH_PP_CONTROL, I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); } else { /* * Unlock registers and just * leave them unlocked */ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); } #ifdef NOTYET dev_priv->lid_notifier.notifier_call = intel_lid_notify; if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) { DRM_DEBUG_KMS("lid notifier registration failed\n"); dev_priv->lid_notifier.notifier_call = NULL; } #endif /* keep the LVDS connector */ dev_priv->int_lvds_connector = connector; #if 0 drm_sysfs_connector_add(connector); #endif intel_panel_setup_backlight(dev); return true; failed: DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); drm_connector_cleanup(connector); drm_encoder_cleanup(encoder); free(intel_lvds, DRM_MEM_KMS); free(intel_connector, DRM_MEM_KMS); return false; } Index: head/sys/dev/drm2/i915/intel_modes.c =================================================================== --- head/sys/dev/drm2/i915/intel_modes.c (revision 277486) +++ head/sys/dev/drm2/i915/intel_modes.c (revision 277487) @@ -1,143 +1,143 @@ /* * Copyright (c) 2007 Dave Airlie * Copyright (c) 2007, 2010 Intel Corporation * Jesse Barnes * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include /** * intel_ddc_probe * */ bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus) { struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private; u8 out_buf[] = { 0x0, 0x0}; u8 buf[2]; struct iic_msg msgs[] = { { .slave = DDC_ADDR << 1, .flags = IIC_M_WR, .len = 1, .buf = out_buf, }, { .slave = DDC_ADDR << 1, .flags = IIC_M_RD, .len = 1, .buf = buf, } }; - return (iicbus_transfer(dev_priv->gmbus[ddc_bus], msgs, 2) - == 0/* XXXKIB 2*/); + return (iicbus_transfer(intel_gmbus_get_adapter(dev_priv, ddc_bus), + msgs, 2) == 0/* XXXKIB 2*/); } /** * intel_ddc_get_modes - get modelist from monitor * @connector: DRM connector device to use * @adapter: i2c adapter * * Fetch the EDID information from @connector using the DDC bus. */ int intel_ddc_get_modes(struct drm_connector *connector, device_t adapter) { struct edid *edid; int ret = 0; edid = drm_get_edid(connector, adapter); if (edid) { drm_mode_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); drm_edid_to_eld(connector, edid); connector->display_info.raw_edid = NULL; free(edid, DRM_MEM_KMS); } return ret; } static const struct drm_prop_enum_list force_audio_names[] = { { HDMI_AUDIO_OFF_DVI, "force-dvi" }, { HDMI_AUDIO_OFF, "off" }, { HDMI_AUDIO_AUTO, "auto" }, { HDMI_AUDIO_ON, "on" }, }; void intel_attach_force_audio_property(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_property *prop; prop = dev_priv->force_audio_property; if (prop == NULL) { prop = drm_property_create_enum(dev, 0, "audio", force_audio_names, DRM_ARRAY_SIZE(force_audio_names)); if (prop == NULL) return; dev_priv->force_audio_property = prop; } drm_connector_attach_property(connector, prop, 0); } static const struct drm_prop_enum_list broadcast_rgb_names[] = { { 0, "Full" }, { 1, "Limited 16:235" }, }; void intel_attach_broadcast_rgb_property(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_property *prop; prop = dev_priv->broadcast_rgb_property; if (prop == NULL) { prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, "Broadcast RGB", broadcast_rgb_names, DRM_ARRAY_SIZE(broadcast_rgb_names)); if (prop == NULL) return; dev_priv->broadcast_rgb_property = prop; } drm_connector_attach_property(connector, prop, 0); } Index: head/sys/dev/drm2/i915/intel_overlay.c =================================================================== --- head/sys/dev/drm2/i915/intel_overlay.c (revision 277486) +++ head/sys/dev/drm2/i915/intel_overlay.c (revision 277487) @@ -1,1582 +1,1587 @@ /* * Copyright © 2009 * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Daniel Vetter * * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include /* Limits for overlay size. According to intel doc, the real limits are: * Y width: 4095, UV width (planar): 2047, Y height: 2047, * UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use * the mininum of both. */ #define IMAGE_MAX_WIDTH 2048 #define IMAGE_MAX_HEIGHT 2046 /* 2 * 1023 */ /* on 830 and 845 these large limits result in the card hanging */ #define IMAGE_MAX_WIDTH_LEGACY 1024 #define IMAGE_MAX_HEIGHT_LEGACY 1088 /* overlay register definitions */ /* OCMD register */ #define OCMD_TILED_SURFACE (0x1<<19) #define OCMD_MIRROR_MASK (0x3<<17) #define OCMD_MIRROR_MODE (0x3<<17) #define OCMD_MIRROR_HORIZONTAL (0x1<<17) #define OCMD_MIRROR_VERTICAL (0x2<<17) #define OCMD_MIRROR_BOTH (0x3<<17) #define OCMD_BYTEORDER_MASK (0x3<<14) /* zero for YUYV or FOURCC YUY2 */ #define OCMD_UV_SWAP (0x1<<14) /* YVYU */ #define OCMD_Y_SWAP (0x2<<14) /* UYVY or FOURCC UYVY */ #define OCMD_Y_AND_UV_SWAP (0x3<<14) /* VYUY */ #define OCMD_SOURCE_FORMAT_MASK (0xf<<10) #define OCMD_RGB_888 (0x1<<10) /* not in i965 Intel docs */ #define OCMD_RGB_555 (0x2<<10) /* not in i965 Intel docs */ #define OCMD_RGB_565 (0x3<<10) /* not in i965 Intel docs */ #define OCMD_YUV_422_PACKED (0x8<<10) #define OCMD_YUV_411_PACKED (0x9<<10) /* not in i965 Intel docs */ #define OCMD_YUV_420_PLANAR (0xc<<10) #define OCMD_YUV_422_PLANAR (0xd<<10) #define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */ #define OCMD_TVSYNCFLIP_PARITY (0x1<<9) #define OCMD_TVSYNCFLIP_ENABLE (0x1<<7) #define OCMD_BUF_TYPE_MASK (0x1<<5) #define OCMD_BUF_TYPE_FRAME (0x0<<5) #define OCMD_BUF_TYPE_FIELD (0x1<<5) #define OCMD_TEST_MODE (0x1<<4) #define OCMD_BUFFER_SELECT (0x3<<2) #define OCMD_BUFFER0 (0x0<<2) #define OCMD_BUFFER1 (0x1<<2) #define OCMD_FIELD_SELECT (0x1<<2) #define OCMD_FIELD0 (0x0<<1) #define OCMD_FIELD1 (0x1<<1) #define OCMD_ENABLE (0x1<<0) /* OCONFIG register */ #define OCONF_PIPE_MASK (0x1<<18) #define OCONF_PIPE_A (0x0<<18) #define OCONF_PIPE_B (0x1<<18) #define OCONF_GAMMA2_ENABLE (0x1<<16) #define OCONF_CSC_MODE_BT601 (0x0<<5) #define OCONF_CSC_MODE_BT709 (0x1<<5) #define OCONF_CSC_BYPASS (0x1<<4) #define OCONF_CC_OUT_8BIT (0x1<<3) #define OCONF_TEST_MODE (0x1<<2) #define OCONF_THREE_LINE_BUFFER (0x1<<0) #define OCONF_TWO_LINE_BUFFER (0x0<<0) /* DCLRKM (dst-key) register */ #define DST_KEY_ENABLE (0x1<<31) #define CLK_RGB24_MASK 0x0 #define CLK_RGB16_MASK 0x070307 #define CLK_RGB15_MASK 0x070707 #define CLK_RGB8I_MASK 0xffffff #define RGB16_TO_COLORKEY(c) \ (((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3)) #define RGB15_TO_COLORKEY(c) \ (((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3)) /* overlay flip addr flag */ #define OFC_UPDATE 0x1 /* polyphase filter coefficients */ #define N_HORIZ_Y_TAPS 5 #define N_VERT_Y_TAPS 3 #define N_HORIZ_UV_TAPS 3 #define N_VERT_UV_TAPS 3 #define N_PHASES 17 #define MAX_TAPS 5 /* memory bufferd overlay registers */ struct overlay_registers { u32 OBUF_0Y; u32 OBUF_1Y; u32 OBUF_0U; u32 OBUF_0V; u32 OBUF_1U; u32 OBUF_1V; u32 OSTRIDE; u32 YRGB_VPH; u32 UV_VPH; u32 HORZ_PH; u32 INIT_PHS; u32 DWINPOS; u32 DWINSZ; u32 SWIDTH; u32 SWIDTHSW; u32 SHEIGHT; u32 YRGBSCALE; u32 UVSCALE; u32 OCLRC0; u32 OCLRC1; u32 DCLRKV; u32 DCLRKM; u32 SCLRKVH; u32 SCLRKVL; u32 SCLRKEN; u32 OCONFIG; u32 OCMD; u32 RESERVED1; /* 0x6C */ u32 OSTART_0Y; u32 OSTART_1Y; u32 OSTART_0U; u32 OSTART_0V; u32 OSTART_1U; u32 OSTART_1V; u32 OTILEOFF_0Y; u32 OTILEOFF_1Y; u32 OTILEOFF_0U; u32 OTILEOFF_0V; u32 OTILEOFF_1U; u32 OTILEOFF_1V; u32 FASTHSCALE; /* 0xA0 */ u32 UVSCALEV; /* 0xA4 */ u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */ u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */ u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES]; u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */ u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES]; u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */ u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES]; u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */ u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES]; }; struct intel_overlay { struct drm_device *dev; struct intel_crtc *crtc; struct drm_i915_gem_object *vid_bo; struct drm_i915_gem_object *old_vid_bo; int active; int pfit_active; u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */ u32 color_key; u32 brightness, contrast, saturation; u32 old_xscale, old_yscale; /* register access */ u32 flip_addr; struct drm_i915_gem_object *reg_bo; /* flip handling */ uint32_t last_flip_req; void (*flip_tail)(struct intel_overlay *); }; static struct overlay_registers * intel_overlay_map_regs(struct intel_overlay *overlay) { struct overlay_registers *regs; if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) { regs = overlay->reg_bo->phys_obj->handle->vaddr; } else { regs = pmap_mapdev_attr(overlay->dev->agp->base + overlay->reg_bo->gtt_offset, PAGE_SIZE, PAT_WRITE_COMBINING); } return (regs); } static void intel_overlay_unmap_regs(struct intel_overlay *overlay, struct overlay_registers *regs) { if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) pmap_unmapdev((vm_offset_t)regs, PAGE_SIZE); } static int intel_overlay_do_wait_request(struct intel_overlay *overlay, struct drm_i915_gem_request *request, void (*tail)(struct intel_overlay *)) { struct drm_device *dev = overlay->dev; drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->rings[RCS]; int ret; KASSERT(!overlay->last_flip_req, ("Overlay already has flip req")); - ret = i915_add_request(LP_RING(dev_priv), NULL, request); + ret = i915_add_request(ring, NULL, request); if (ret) { free(request, DRM_I915_GEM); return ret; } overlay->last_flip_req = request->seqno; overlay->flip_tail = tail; - ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req, - true); + ret = i915_wait_request(ring, overlay->last_flip_req); if (ret) return ret; + i915_gem_retire_requests(dev); overlay->last_flip_req = 0; return 0; } /* Workaround for i830 bug where pipe a must be enable to change control regs */ static int i830_activate_pipe_a(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_crtc *crtc; struct drm_crtc_helper_funcs *crtc_funcs; struct drm_display_mode vesa_640x480 = { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, 752, 800, 0, 480, 489, 492, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, *mode; crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]); if (crtc->dpms_mode == DRM_MODE_DPMS_ON) return 0; /* most i8xx have pipe a forced on, so don't trust dpms mode */ if (I915_READ(_PIPEACONF) & PIPECONF_ENABLE) return 0; crtc_funcs = crtc->base.helper_private; if (crtc_funcs->dpms == NULL) return 0; DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n"); mode = drm_mode_duplicate(dev, &vesa_640x480); - drm_mode_set_crtcinfo(mode, 0); + if (!drm_crtc_helper_set_mode(&crtc->base, mode, crtc->base.x, crtc->base.y, crtc->base.fb)) return 0; crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON); return 1; } static void i830_deactivate_pipe_a(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0]; struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); } /* overlay needs to be disable in OCMD reg */ static int intel_overlay_on(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->rings[RCS]; struct drm_i915_gem_request *request; int pipe_a_quirk = 0; int ret; KASSERT(!overlay->active, ("Overlay is active")); overlay->active = 1; if (IS_I830(dev)) { pipe_a_quirk = i830_activate_pipe_a(dev); if (pipe_a_quirk < 0) return pipe_a_quirk; } request = malloc(sizeof(*request), DRM_I915_GEM, M_WAITOK | M_ZERO); - ret = BEGIN_LP_RING(4); + ret = intel_ring_begin(ring, 4); if (ret) { free(request, DRM_I915_GEM); goto out; } - OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); - OUT_RING(overlay->flip_addr | OFC_UPDATE); - OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); - OUT_RING(MI_NOOP); - ADVANCE_LP_RING(); + intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON); + intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE); + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); ret = intel_overlay_do_wait_request(overlay, request, NULL); out: if (pipe_a_quirk) i830_deactivate_pipe_a(dev); return ret; } /* overlay needs to be enabled in OCMD reg */ static int intel_overlay_continue(struct intel_overlay *overlay, bool load_polyphase_filter) { struct drm_device *dev = overlay->dev; drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->rings[RCS]; struct drm_i915_gem_request *request; u32 flip_addr = overlay->flip_addr; u32 tmp; int ret; KASSERT(overlay->active, ("Overlay not active")); request = malloc(sizeof(*request), DRM_I915_GEM, M_WAITOK | M_ZERO); if (load_polyphase_filter) flip_addr |= OFC_UPDATE; /* check for underruns */ tmp = I915_READ(DOVSTA); if (tmp & (1 << 17)) DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); - ret = BEGIN_LP_RING(2); + ret = intel_ring_begin(ring, 2); if (ret) { free(request, DRM_I915_GEM); return ret; } - OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); - OUT_RING(flip_addr); - ADVANCE_LP_RING(); + intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); + intel_ring_emit(ring, flip_addr); + intel_ring_advance(ring); - ret = i915_add_request(LP_RING(dev_priv), NULL, request); + ret = i915_add_request(ring, NULL, request); if (ret) { free(request, DRM_I915_GEM); return ret; } overlay->last_flip_req = request->seqno; return 0; } static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) { struct drm_i915_gem_object *obj = overlay->old_vid_bo; i915_gem_object_unpin(obj); drm_gem_object_unreference(&obj->base); overlay->old_vid_bo = NULL; } static void intel_overlay_off_tail(struct intel_overlay *overlay) { struct drm_i915_gem_object *obj = overlay->vid_bo; /* never have the overlay hw on without showing a frame */ KASSERT(overlay->vid_bo != NULL, ("No vid_bo")); i915_gem_object_unpin(obj); drm_gem_object_unreference(&obj->base); overlay->vid_bo = NULL; overlay->crtc->overlay = NULL; overlay->crtc = NULL; overlay->active = 0; } /* overlay needs to be disabled in OCMD reg */ static int intel_overlay_off(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->rings[RCS]; u32 flip_addr = overlay->flip_addr; struct drm_i915_gem_request *request; int ret; KASSERT(overlay->active, ("Overlay is not active")); request = malloc(sizeof(*request), DRM_I915_GEM, M_WAITOK | M_ZERO); /* According to intel docs the overlay hw may hang (when switching * off) without loading the filter coeffs. It is however unclear whether * this applies to the disabling of the overlay or to the switching off * of the hw. Do it in both cases */ flip_addr |= OFC_UPDATE; - ret = BEGIN_LP_RING(6); + ret = intel_ring_begin(ring, 6); if (ret) { free(request, DRM_I915_GEM); return ret; } /* wait for overlay to go idle */ - OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); - OUT_RING(flip_addr); - OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); + intel_ring_emit(ring, flip_addr); + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); /* turn overlay off */ - OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); - OUT_RING(flip_addr); - OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); - ADVANCE_LP_RING(); + intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF); + intel_ring_emit(ring, flip_addr); + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + intel_ring_advance(ring); return intel_overlay_do_wait_request(overlay, request, intel_overlay_off_tail); } /* recover from an interruption due to a signal * We have to be careful not to repeat work forever an make forward progess. */ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->rings[RCS]; int ret; if (overlay->last_flip_req == 0) return 0; - ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req, - true); + ret = i915_wait_request(ring, overlay->last_flip_req); if (ret) return ret; + i915_gem_retire_requests(dev); if (overlay->flip_tail) overlay->flip_tail(overlay); overlay->last_flip_req = 0; return 0; } /* Wait for pending overlay flip and release old frame. * Needs to be called before the overlay register are changed * via intel_overlay_(un)map_regs */ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->rings[RCS]; int ret; /* Only wait if there is actually an old frame to release to * guarantee forward progress. */ if (!overlay->old_vid_bo) return 0; if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) { struct drm_i915_gem_request *request; /* synchronous slowpath */ request = malloc(sizeof(*request), DRM_I915_GEM, M_WAITOK | M_ZERO); - ret = BEGIN_LP_RING(2); + ret = intel_ring_begin(ring, 2); if (ret) { free(request, DRM_I915_GEM); return ret; } - OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); - OUT_RING(MI_NOOP); - ADVANCE_LP_RING(); + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); ret = intel_overlay_do_wait_request(overlay, request, intel_overlay_release_old_vid_tail); if (ret) return ret; } intel_overlay_release_old_vid_tail(overlay); return 0; } struct put_image_params { int format; short dst_x; short dst_y; short dst_w; short dst_h; short src_w; short src_scan_h; short src_scan_w; short src_h; short stride_Y; short stride_UV; int offset_Y; int offset_U; int offset_V; }; static int packed_depth_bytes(u32 format) { switch (format & I915_OVERLAY_DEPTH_MASK) { case I915_OVERLAY_YUV422: return 4; case I915_OVERLAY_YUV411: /* return 6; not implemented */ default: return -EINVAL; } } static int packed_width_bytes(u32 format, short width) { switch (format & I915_OVERLAY_DEPTH_MASK) { case I915_OVERLAY_YUV422: return width << 1; default: return -EINVAL; } } static int uv_hsubsampling(u32 format) { switch (format & I915_OVERLAY_DEPTH_MASK) { case I915_OVERLAY_YUV422: case I915_OVERLAY_YUV420: return 2; case I915_OVERLAY_YUV411: case I915_OVERLAY_YUV410: return 4; default: return -EINVAL; } } static int uv_vsubsampling(u32 format) { switch (format & I915_OVERLAY_DEPTH_MASK) { case I915_OVERLAY_YUV420: case I915_OVERLAY_YUV410: return 2; case I915_OVERLAY_YUV422: case I915_OVERLAY_YUV411: return 1; default: return -EINVAL; } } static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width) { u32 mask, shift, ret; if (IS_GEN2(dev)) { mask = 0x1f; shift = 5; } else { mask = 0x3f; shift = 6; } ret = ((offset + width + mask) >> shift) - (offset >> shift); if (!IS_GEN2(dev)) ret <<= 1; ret -= 1; return ret << 2; } static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = { 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0, 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440, 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0, 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380, 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320, 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0, 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260, 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200, 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0, 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160, 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120, 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0, 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0, 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060, 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040, 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020, 0xb000, 0x3000, 0x0800, 0x3000, 0xb000 }; static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = { 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60, 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40, 0xb040, 0x1b20, 0x29e0, 0xb060, 0x1bd8, 0x2880, 0xb080, 0x1c88, 0x3e60, 0xb0a0, 0x1d28, 0x3c00, 0xb0c0, 0x1db8, 0x39e0, 0xb0e0, 0x1e40, 0x37e0, 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0, 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240, 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0, 0x3000, 0x0800, 0x3000 }; static void update_polyphase_filter(struct overlay_registers *regs) { memcpy(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs)); memcpy(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs)); } static bool update_scaling_factors(struct intel_overlay *overlay, struct overlay_registers *regs, struct put_image_params *params) { /* fixed point with a 12 bit shift */ u32 xscale, yscale, xscale_UV, yscale_UV; #define FP_SHIFT 12 #define FRACT_MASK 0xfff bool scale_changed = false; int uv_hscale = uv_hsubsampling(params->format); int uv_vscale = uv_vsubsampling(params->format); if (params->dst_w > 1) xscale = ((params->src_scan_w - 1) << FP_SHIFT) /(params->dst_w); else xscale = 1 << FP_SHIFT; if (params->dst_h > 1) yscale = ((params->src_scan_h - 1) << FP_SHIFT) /(params->dst_h); else yscale = 1 << FP_SHIFT; /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/ xscale_UV = xscale/uv_hscale; yscale_UV = yscale/uv_vscale; /* make the Y scale to UV scale ratio an exact multiply */ xscale = xscale_UV * uv_hscale; yscale = yscale_UV * uv_vscale; /*} else { xscale_UV = 0; yscale_UV = 0; }*/ if (xscale != overlay->old_xscale || yscale != overlay->old_yscale) scale_changed = true; overlay->old_xscale = xscale; overlay->old_yscale = yscale; regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) | ((xscale >> FP_SHIFT) << 16) | ((xscale & FRACT_MASK) << 3)); regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) | ((xscale_UV >> FP_SHIFT) << 16) | ((xscale_UV & FRACT_MASK) << 3)); regs->UVSCALEV = ((((yscale >> FP_SHIFT) << 16) | ((yscale_UV >> FP_SHIFT) << 0))); if (scale_changed) update_polyphase_filter(regs); return scale_changed; } static void update_colorkey(struct intel_overlay *overlay, struct overlay_registers *regs) { u32 key = overlay->color_key; switch (overlay->crtc->base.fb->bits_per_pixel) { case 8: regs->DCLRKV = 0; regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE; break; case 16: if (overlay->crtc->base.fb->depth == 15) { regs->DCLRKV = RGB15_TO_COLORKEY(key); regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE; } else { regs->DCLRKV = RGB16_TO_COLORKEY(key); regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE; } break; case 24: case 32: regs->DCLRKV = key; regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE; break; } } static u32 overlay_cmd_reg(struct put_image_params *params) { u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0; if (params->format & I915_OVERLAY_YUV_PLANAR) { switch (params->format & I915_OVERLAY_DEPTH_MASK) { case I915_OVERLAY_YUV422: cmd |= OCMD_YUV_422_PLANAR; break; case I915_OVERLAY_YUV420: cmd |= OCMD_YUV_420_PLANAR; break; case I915_OVERLAY_YUV411: case I915_OVERLAY_YUV410: cmd |= OCMD_YUV_410_PLANAR; break; } } else { /* YUV packed */ switch (params->format & I915_OVERLAY_DEPTH_MASK) { case I915_OVERLAY_YUV422: cmd |= OCMD_YUV_422_PACKED; break; case I915_OVERLAY_YUV411: cmd |= OCMD_YUV_411_PACKED; break; } switch (params->format & I915_OVERLAY_SWAP_MASK) { case I915_OVERLAY_NO_SWAP: break; case I915_OVERLAY_UV_SWAP: cmd |= OCMD_UV_SWAP; break; case I915_OVERLAY_Y_SWAP: cmd |= OCMD_Y_SWAP; break; case I915_OVERLAY_Y_AND_UV_SWAP: cmd |= OCMD_Y_AND_UV_SWAP; break; } } return cmd; } static u32 max_u32(u32 a, u32 b) { return (a > b ? a : b); } static int intel_overlay_do_put_image(struct intel_overlay *overlay, struct drm_i915_gem_object *new_bo, struct put_image_params *params) { int ret, tmp_width; struct overlay_registers *regs; bool scale_changed = false; + u32 swidth, swidthsw, sheight, ostride; KASSERT(overlay != NULL, ("No overlay ?")); DRM_LOCK_ASSERT(overlay->dev); DRM_MODE_CONFIG_ASSERT_LOCKED(overlay->dev); ret = intel_overlay_release_old_vid(overlay); if (ret != 0) return ret; ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL); if (ret != 0) goto out_unpin; ret = i915_gem_object_put_fence(new_bo); if (ret) goto out_unpin; if (!overlay->active) { + u32 oconfig; regs = intel_overlay_map_regs(overlay); if (!regs) { ret = -ENOMEM; goto out_unpin; } - regs->OCONFIG = OCONF_CC_OUT_8BIT; + oconfig = OCONF_CC_OUT_8BIT; if (IS_GEN4(overlay->dev)) - regs->OCONFIG |= OCONF_CSC_MODE_BT709; - regs->OCONFIG |= overlay->crtc->pipe == 0 ? + oconfig |= OCONF_CSC_MODE_BT709; + oconfig |= overlay->crtc->pipe == 0 ? OCONF_PIPE_A : OCONF_PIPE_B; + regs->OCONFIG = oconfig; intel_overlay_unmap_regs(overlay, regs); ret = intel_overlay_on(overlay); if (ret != 0) goto out_unpin; } regs = intel_overlay_map_regs(overlay); if (!regs) { ret = -ENOMEM; goto out_unpin; } regs->DWINPOS = (params->dst_y << 16) | params->dst_x; regs->DWINSZ = (params->dst_h << 16) | params->dst_w; if (params->format & I915_OVERLAY_YUV_PACKED) tmp_width = packed_width_bytes(params->format, params->src_w); else tmp_width = params->src_w; - regs->SWIDTH = params->src_w; - regs->SWIDTHSW = calc_swidthsw(overlay->dev, - params->offset_Y, tmp_width); - regs->SHEIGHT = params->src_h; + swidth = params->src_w; + swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width); + sheight = params->src_h; regs->OBUF_0Y = new_bo->gtt_offset + params->offset_Y; - regs->OSTRIDE = params->stride_Y; + ostride = params->stride_Y; if (params->format & I915_OVERLAY_YUV_PLANAR) { int uv_hscale = uv_hsubsampling(params->format); int uv_vscale = uv_vsubsampling(params->format); u32 tmp_U, tmp_V; - regs->SWIDTH |= (params->src_w/uv_hscale) << 16; + swidth |= (params->src_w/uv_hscale) << 16; tmp_U = calc_swidthsw(overlay->dev, params->offset_U, params->src_w/uv_hscale); tmp_V = calc_swidthsw(overlay->dev, params->offset_V, params->src_w/uv_hscale); - regs->SWIDTHSW |= max_u32(tmp_U, tmp_V) << 16; - regs->SHEIGHT |= (params->src_h/uv_vscale) << 16; + swidthsw |= max_u32(tmp_U, tmp_V) << 16; + sheight |= (params->src_h/uv_vscale) << 16; regs->OBUF_0U = new_bo->gtt_offset + params->offset_U; regs->OBUF_0V = new_bo->gtt_offset + params->offset_V; - regs->OSTRIDE |= params->stride_UV << 16; + ostride |= params->stride_UV << 16; } + regs->SWIDTH = swidth; + regs->SWIDTHSW = swidthsw; + regs->SHEIGHT = sheight; + regs->OSTRIDE = ostride; + scale_changed = update_scaling_factors(overlay, regs, params); update_colorkey(overlay, regs); regs->OCMD = overlay_cmd_reg(params); intel_overlay_unmap_regs(overlay, regs); ret = intel_overlay_continue(overlay, scale_changed); if (ret) goto out_unpin; overlay->old_vid_bo = overlay->vid_bo; overlay->vid_bo = new_bo; return 0; out_unpin: i915_gem_object_unpin(new_bo); return ret; } int intel_overlay_switch_off(struct intel_overlay *overlay) { struct overlay_registers *regs; int ret; DRM_LOCK_ASSERT(overlay->dev); DRM_MODE_CONFIG_ASSERT_LOCKED(overlay->dev); ret = intel_overlay_recover_from_interrupt(overlay); if (ret != 0) return ret; if (!overlay->active) return 0; ret = intel_overlay_release_old_vid(overlay); if (ret != 0) return ret; regs = intel_overlay_map_regs(overlay); regs->OCMD = 0; intel_overlay_unmap_regs(overlay, regs); ret = intel_overlay_off(overlay); if (ret != 0) return ret; intel_overlay_off_tail(overlay); return 0; } static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, struct intel_crtc *crtc) { drm_i915_private_t *dev_priv = overlay->dev->dev_private; if (!crtc->active) return -EINVAL; /* can't use the overlay with double wide pipe */ if (INTEL_INFO(overlay->dev)->gen < 4 && (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE) return -EINVAL; return 0; } static void update_pfit_vscale_ratio(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; drm_i915_private_t *dev_priv = dev->dev_private; u32 pfit_control = I915_READ(PFIT_CONTROL); u32 ratio; /* XXX: This is not the same logic as in the xorg driver, but more in * line with the intel documentation for the i965 */ if (INTEL_INFO(dev)->gen >= 4) { /* on i965 use the PGM reg to read out the autoscaler values */ ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965; } else { if (pfit_control & VERT_AUTO_SCALE) ratio = I915_READ(PFIT_AUTO_RATIOS); else ratio = I915_READ(PFIT_PGM_RATIOS); ratio >>= PFIT_VERT_SCALE_SHIFT; } overlay->pfit_vscale_ratio = ratio; } static int check_overlay_dst(struct intel_overlay *overlay, struct drm_intel_overlay_put_image *rec) { struct drm_display_mode *mode = &overlay->crtc->base.mode; if (rec->dst_x < mode->hdisplay && rec->dst_x + rec->dst_width <= mode->hdisplay && rec->dst_y < mode->vdisplay && rec->dst_y + rec->dst_height <= mode->vdisplay) return 0; else return -EINVAL; } static int check_overlay_scaling(struct put_image_params *rec) { u32 tmp; /* downscaling limit is 8.0 */ tmp = ((rec->src_scan_h << 16) / rec->dst_h) >> 16; if (tmp > 7) return -EINVAL; tmp = ((rec->src_scan_w << 16) / rec->dst_w) >> 16; if (tmp > 7) return -EINVAL; return 0; } static int check_overlay_src(struct drm_device *dev, struct drm_intel_overlay_put_image *rec, struct drm_i915_gem_object *new_bo) { int uv_hscale = uv_hsubsampling(rec->flags); int uv_vscale = uv_vsubsampling(rec->flags); u32 stride_mask; int depth; u32 tmp; /* check src dimensions */ if (IS_845G(dev) || IS_I830(dev)) { if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY || rec->src_width > IMAGE_MAX_WIDTH_LEGACY) return -EINVAL; } else { if (rec->src_height > IMAGE_MAX_HEIGHT || rec->src_width > IMAGE_MAX_WIDTH) return -EINVAL; } /* better safe than sorry, use 4 as the maximal subsampling ratio */ if (rec->src_height < N_VERT_Y_TAPS*4 || rec->src_width < N_HORIZ_Y_TAPS*4) return -EINVAL; /* check alignment constraints */ switch (rec->flags & I915_OVERLAY_TYPE_MASK) { case I915_OVERLAY_RGB: /* not implemented */ return -EINVAL; case I915_OVERLAY_YUV_PACKED: if (uv_vscale != 1) return -EINVAL; depth = packed_depth_bytes(rec->flags); if (depth < 0) return depth; /* ignore UV planes */ rec->stride_UV = 0; rec->offset_U = 0; rec->offset_V = 0; /* check pixel alignment */ if (rec->offset_Y % depth) return -EINVAL; break; case I915_OVERLAY_YUV_PLANAR: if (uv_vscale < 0 || uv_hscale < 0) return -EINVAL; /* no offset restrictions for planar formats */ break; default: return -EINVAL; } if (rec->src_width % uv_hscale) return -EINVAL; /* stride checking */ if (IS_I830(dev) || IS_845G(dev)) stride_mask = 255; else stride_mask = 63; if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) return -EINVAL; if (IS_GEN4(dev) && rec->stride_Y < 512) return -EINVAL; tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ? 4096 : 8192; if (rec->stride_Y > tmp || rec->stride_UV > 2*1024) return -EINVAL; /* check buffer dimensions */ switch (rec->flags & I915_OVERLAY_TYPE_MASK) { case I915_OVERLAY_RGB: case I915_OVERLAY_YUV_PACKED: /* always 4 Y values per depth pixels */ if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y) return -EINVAL; tmp = rec->stride_Y*rec->src_height; if (rec->offset_Y + tmp > new_bo->base.size) return -EINVAL; break; case I915_OVERLAY_YUV_PLANAR: if (rec->src_width > rec->stride_Y) return -EINVAL; if (rec->src_width/uv_hscale > rec->stride_UV) return -EINVAL; tmp = rec->stride_Y * rec->src_height; if (rec->offset_Y + tmp > new_bo->base.size) return -EINVAL; tmp = rec->stride_UV * (rec->src_height / uv_vscale); if (rec->offset_U + tmp > new_bo->base.size || rec->offset_V + tmp > new_bo->base.size) return -EINVAL; break; } return 0; } /** * Return the pipe currently connected to the panel fitter, * or -1 if the panel fitter is not present or not in use */ static int intel_panel_fitter_pipe(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 pfit_control; /* i830 doesn't have a panel fitter */ if (IS_I830(dev)) return -1; pfit_control = I915_READ(PFIT_CONTROL); /* See if the panel fitter is in use */ if ((pfit_control & PFIT_ENABLE) == 0) return -1; /* 965 can place panel fitter on either pipe */ if (IS_GEN4(dev)) return (pfit_control >> 29) & 0x3; /* older chips can only use pipe 1 */ return 1; } int intel_overlay_put_image(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_intel_overlay_put_image *put_image_rec = data; drm_i915_private_t *dev_priv = dev->dev_private; struct intel_overlay *overlay; struct drm_mode_object *drmmode_obj; struct intel_crtc *crtc; struct drm_i915_gem_object *new_bo; struct put_image_params *params; int ret; - if (!dev_priv) { - DRM_ERROR("called with no initialization\n"); - return -EINVAL; - } - + /* No need to check for DRIVER_MODESET - we don't set it up then. */ overlay = dev_priv->overlay; if (!overlay) { DRM_DEBUG("userspace bug: no overlay\n"); return -ENODEV; } if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) { sx_xlock(&dev->mode_config.mutex); DRM_LOCK(dev); ret = intel_overlay_switch_off(overlay); DRM_UNLOCK(dev); sx_xunlock(&dev->mode_config.mutex); return ret; } params = malloc(sizeof(struct put_image_params), DRM_I915_GEM, M_WAITOK | M_ZERO); drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id, DRM_MODE_OBJECT_CRTC); if (!drmmode_obj) { ret = -ENOENT; goto out_free; } crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv, put_image_rec->bo_handle)); if (&new_bo->base == NULL) { ret = -ENOENT; goto out_free; } sx_xlock(&dev->mode_config.mutex); DRM_LOCK(dev); if (new_bo->tiling_mode) { DRM_ERROR("buffer used for overlay image can not be tiled\n"); ret = -EINVAL; goto out_unlock; } ret = intel_overlay_recover_from_interrupt(overlay); if (ret != 0) goto out_unlock; if (overlay->crtc != crtc) { struct drm_display_mode *mode = &crtc->base.mode; ret = intel_overlay_switch_off(overlay); if (ret != 0) goto out_unlock; ret = check_overlay_possible_on_crtc(overlay, crtc); if (ret != 0) goto out_unlock; overlay->crtc = crtc; crtc->overlay = overlay; /* line too wide, i.e. one-line-mode */ if (mode->hdisplay > 1024 && intel_panel_fitter_pipe(dev) == crtc->pipe) { overlay->pfit_active = 1; update_pfit_vscale_ratio(overlay); } else overlay->pfit_active = 0; } ret = check_overlay_dst(overlay, put_image_rec); if (ret != 0) goto out_unlock; if (overlay->pfit_active) { params->dst_y = ((((u32)put_image_rec->dst_y) << 12) / overlay->pfit_vscale_ratio); /* shifting right rounds downwards, so add 1 */ params->dst_h = ((((u32)put_image_rec->dst_height) << 12) / overlay->pfit_vscale_ratio) + 1; } else { params->dst_y = put_image_rec->dst_y; params->dst_h = put_image_rec->dst_height; } params->dst_x = put_image_rec->dst_x; params->dst_w = put_image_rec->dst_width; params->src_w = put_image_rec->src_width; params->src_h = put_image_rec->src_height; params->src_scan_w = put_image_rec->src_scan_width; params->src_scan_h = put_image_rec->src_scan_height; if (params->src_scan_h > params->src_h || params->src_scan_w > params->src_w) { ret = -EINVAL; goto out_unlock; } ret = check_overlay_src(dev, put_image_rec, new_bo); if (ret != 0) goto out_unlock; params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK; params->stride_Y = put_image_rec->stride_Y; params->stride_UV = put_image_rec->stride_UV; params->offset_Y = put_image_rec->offset_Y; params->offset_U = put_image_rec->offset_U; params->offset_V = put_image_rec->offset_V; /* Check scaling after src size to prevent a divide-by-zero. */ ret = check_overlay_scaling(params); if (ret != 0) goto out_unlock; ret = intel_overlay_do_put_image(overlay, new_bo, params); if (ret != 0) goto out_unlock; DRM_UNLOCK(dev); sx_xunlock(&dev->mode_config.mutex); free(params, DRM_I915_GEM); return 0; out_unlock: DRM_UNLOCK(dev); sx_xunlock(&dev->mode_config.mutex); drm_gem_object_unreference_unlocked(&new_bo->base); out_free: free(params, DRM_I915_GEM); return ret; } static void update_reg_attrs(struct intel_overlay *overlay, struct overlay_registers *regs) { regs->OCLRC0 = (overlay->contrast << 18) | (overlay->brightness & 0xff); regs->OCLRC1 = overlay->saturation; } static bool check_gamma_bounds(u32 gamma1, u32 gamma2) { int i; if (gamma1 & 0xff000000 || gamma2 & 0xff000000) return false; for (i = 0; i < 3; i++) { if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff)) return false; } return true; } static bool check_gamma5_errata(u32 gamma5) { int i; for (i = 0; i < 3; i++) { if (((gamma5 >> i*8) & 0xff) == 0x80) return false; } return true; } static int check_gamma(struct drm_intel_overlay_attrs *attrs) { if (!check_gamma_bounds(0, attrs->gamma0) || !check_gamma_bounds(attrs->gamma0, attrs->gamma1) || !check_gamma_bounds(attrs->gamma1, attrs->gamma2) || !check_gamma_bounds(attrs->gamma2, attrs->gamma3) || !check_gamma_bounds(attrs->gamma3, attrs->gamma4) || !check_gamma_bounds(attrs->gamma4, attrs->gamma5) || !check_gamma_bounds(attrs->gamma5, 0x00ffffff)) return -EINVAL; if (!check_gamma5_errata(attrs->gamma5)) return -EINVAL; return 0; } int intel_overlay_attrs(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_intel_overlay_attrs *attrs = data; drm_i915_private_t *dev_priv = dev->dev_private; struct intel_overlay *overlay; struct overlay_registers *regs; int ret; - if (!dev_priv) { - DRM_ERROR("called with no initialization\n"); - return -EINVAL; - } - + /* No need to check for DRIVER_MODESET - we don't set it up then. */ overlay = dev_priv->overlay; if (!overlay) { DRM_DEBUG("userspace bug: no overlay\n"); return -ENODEV; } sx_xlock(&dev->mode_config.mutex); DRM_LOCK(dev); ret = -EINVAL; if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) { attrs->color_key = overlay->color_key; attrs->brightness = overlay->brightness; attrs->contrast = overlay->contrast; attrs->saturation = overlay->saturation; if (!IS_GEN2(dev)) { attrs->gamma0 = I915_READ(OGAMC0); attrs->gamma1 = I915_READ(OGAMC1); attrs->gamma2 = I915_READ(OGAMC2); attrs->gamma3 = I915_READ(OGAMC3); attrs->gamma4 = I915_READ(OGAMC4); attrs->gamma5 = I915_READ(OGAMC5); } } else { if (attrs->brightness < -128 || attrs->brightness > 127) goto out_unlock; if (attrs->contrast > 255) goto out_unlock; if (attrs->saturation > 1023) goto out_unlock; overlay->color_key = attrs->color_key; overlay->brightness = attrs->brightness; overlay->contrast = attrs->contrast; overlay->saturation = attrs->saturation; regs = intel_overlay_map_regs(overlay); if (!regs) { ret = -ENOMEM; goto out_unlock; } update_reg_attrs(overlay, regs); intel_overlay_unmap_regs(overlay, regs); if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { if (IS_GEN2(dev)) goto out_unlock; if (overlay->active) { ret = -EBUSY; goto out_unlock; } ret = check_gamma(attrs); if (ret) goto out_unlock; I915_WRITE(OGAMC0, attrs->gamma0); I915_WRITE(OGAMC1, attrs->gamma1); I915_WRITE(OGAMC2, attrs->gamma2); I915_WRITE(OGAMC3, attrs->gamma3); I915_WRITE(OGAMC4, attrs->gamma4); I915_WRITE(OGAMC5, attrs->gamma5); } } ret = 0; out_unlock: DRM_UNLOCK(dev); sx_xunlock(&dev->mode_config.mutex); return ret; } void intel_setup_overlay(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_overlay *overlay; struct drm_i915_gem_object *reg_bo; struct overlay_registers *regs; int ret; if (!HAS_OVERLAY(dev)) return; overlay = malloc(sizeof(struct intel_overlay), DRM_I915_GEM, M_WAITOK | M_ZERO); DRM_LOCK(dev); if (dev_priv->overlay != NULL) goto out_free; overlay->dev = dev; reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); if (!reg_bo) goto out_free; overlay->reg_bo = reg_bo; if (OVERLAY_NEEDS_PHYSICAL(dev)) { ret = i915_gem_attach_phys_object(dev, reg_bo, I915_GEM_PHYS_OVERLAY_REGS, PAGE_SIZE); if (ret) { DRM_ERROR("failed to attach phys overlay regs\n"); goto out_free_bo; } overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; } else { ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true); if (ret) { DRM_ERROR("failed to pin overlay register bo\n"); goto out_free_bo; } overlay->flip_addr = reg_bo->gtt_offset; ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); if (ret) { DRM_ERROR("failed to move overlay register bo into the GTT\n"); goto out_unpin_bo; } } /* init all values */ overlay->color_key = 0x0101fe; overlay->brightness = -19; overlay->contrast = 75; overlay->saturation = 146; regs = intel_overlay_map_regs(overlay); if (!regs) goto out_unpin_bo; memset(regs, 0, sizeof(struct overlay_registers)); update_polyphase_filter(regs); update_reg_attrs(overlay, regs); intel_overlay_unmap_regs(overlay, regs); dev_priv->overlay = overlay; DRM_INFO("initialized overlay support\n"); DRM_UNLOCK(dev); return; out_unpin_bo: if (!OVERLAY_NEEDS_PHYSICAL(dev)) i915_gem_object_unpin(reg_bo); out_free_bo: drm_gem_object_unreference(®_bo->base); out_free: DRM_UNLOCK(dev); free(overlay, DRM_I915_GEM); return; } void intel_cleanup_overlay(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; if (!dev_priv->overlay) return; /* The bo's should be free'd by the generic code already. * Furthermore modesetting teardown happens beforehand so the * hardware should be off already */ KASSERT(!dev_priv->overlay->active, ("Overlay still active")); drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base); free(dev_priv->overlay, DRM_I915_GEM); } struct intel_overlay_error_state { struct overlay_registers regs; unsigned long base; u32 dovsta; u32 isr; }; struct intel_overlay_error_state * intel_overlay_capture_error_state(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_overlay *overlay = dev_priv->overlay; struct intel_overlay_error_state *error; struct overlay_registers __iomem *regs; if (!overlay || !overlay->active) return NULL; error = malloc(sizeof(*error), DRM_I915_GEM, M_NOWAIT); if (error == NULL) return NULL; error->dovsta = I915_READ(DOVSTA); error->isr = I915_READ(ISR); if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr; else error->base = (long) overlay->reg_bo->gtt_offset; regs = intel_overlay_map_regs(overlay); if (!regs) goto err; memcpy(&error->regs, regs, sizeof(struct overlay_registers)); intel_overlay_unmap_regs(overlay, regs); return (error); err: free(error, DRM_I915_GEM); return (NULL); } void intel_overlay_print_error_state(struct sbuf *m, struct intel_overlay_error_state *error) { sbuf_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n", error->dovsta, error->isr); sbuf_printf(m, " Register file at 0x%08lx:\n", error->base); #define P(x) sbuf_printf(m, " " #x ": 0x%08x\n", error->regs.x) P(OBUF_0Y); P(OBUF_1Y); P(OBUF_0U); P(OBUF_0V); P(OBUF_1U); P(OBUF_1V); P(OSTRIDE); P(YRGB_VPH); P(UV_VPH); P(HORZ_PH); P(INIT_PHS); P(DWINPOS); P(DWINSZ); P(SWIDTH); P(SWIDTHSW); P(SHEIGHT); P(YRGBSCALE); P(UVSCALE); P(OCLRC0); P(OCLRC1); P(DCLRKV); P(DCLRKM); P(SCLRKVH); P(SCLRKVL); P(SCLRKEN); P(OCONFIG); P(OCMD); P(OSTART_0Y); P(OSTART_1Y); P(OSTART_0U); P(OSTART_0V); P(OSTART_1U); P(OSTART_1V); P(OTILEOFF_0Y); P(OTILEOFF_1Y); P(OTILEOFF_0U); P(OTILEOFF_0V); P(OTILEOFF_1U); P(OTILEOFF_1V); P(FASTHSCALE); P(UVSCALEV); #undef P } Index: head/sys/dev/drm2/i915/intel_panel.c =================================================================== --- head/sys/dev/drm2/i915/intel_panel.c (revision 277486) +++ head/sys/dev/drm2/i915/intel_panel.c (revision 277487) @@ -1,327 +1,343 @@ /* * Copyright © 2006-2010 Intel Corporation * Copyright (c) 2006 Dave Airlie * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt * Dave Airlie * Jesse Barnes * Chris Wilson */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode) { adjusted_mode->hdisplay = fixed_mode->hdisplay; adjusted_mode->hsync_start = fixed_mode->hsync_start; adjusted_mode->hsync_end = fixed_mode->hsync_end; adjusted_mode->htotal = fixed_mode->htotal; adjusted_mode->vdisplay = fixed_mode->vdisplay; adjusted_mode->vsync_start = fixed_mode->vsync_start; adjusted_mode->vsync_end = fixed_mode->vsync_end; adjusted_mode->vtotal = fixed_mode->vtotal; adjusted_mode->clock = fixed_mode->clock; } /* adjusted_mode has been preset to be the panel's fixed mode */ void intel_pch_panel_fitting(struct drm_device *dev, int fitting_mode, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_i915_private *dev_priv = dev->dev_private; int x, y, width, height; x = y = width = height = 0; /* Native modes don't need fitting */ if (adjusted_mode->hdisplay == mode->hdisplay && adjusted_mode->vdisplay == mode->vdisplay) goto done; switch (fitting_mode) { case DRM_MODE_SCALE_CENTER: width = mode->hdisplay; height = mode->vdisplay; x = (adjusted_mode->hdisplay - width + 1)/2; y = (adjusted_mode->vdisplay - height + 1)/2; break; case DRM_MODE_SCALE_ASPECT: /* Scale but preserve the aspect ratio */ { u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; if (scaled_width > scaled_height) { /* pillar */ width = scaled_height / mode->vdisplay; if (width & 1) width++; x = (adjusted_mode->hdisplay - width + 1) / 2; y = 0; height = adjusted_mode->vdisplay; } else if (scaled_width < scaled_height) { /* letter */ height = scaled_width / mode->hdisplay; if (height & 1) height++; y = (adjusted_mode->vdisplay - height + 1) / 2; x = 0; width = adjusted_mode->hdisplay; } else { x = y = 0; width = adjusted_mode->hdisplay; height = adjusted_mode->vdisplay; } } break; default: case DRM_MODE_SCALE_FULLSCREEN: x = y = 0; width = adjusted_mode->hdisplay; height = adjusted_mode->vdisplay; break; } done: dev_priv->pch_pf_pos = (x << 16) | y; dev_priv->pch_pf_size = (width << 16) | height; } static int is_backlight_combination_mode(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; if (INTEL_INFO(dev)->gen >= 4) return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE; if (IS_GEN2(dev)) return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE; return 0; } static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) { u32 val; /* Restore the CTL value if it lost, e.g. GPU reset */ if (HAS_PCH_SPLIT(dev_priv->dev)) { val = I915_READ(BLC_PWM_PCH_CTL2); if (dev_priv->saveBLC_PWM_CTL2 == 0) { dev_priv->saveBLC_PWM_CTL2 = val; } else if (val == 0) { I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2); val = dev_priv->saveBLC_PWM_CTL2; } } else { val = I915_READ(BLC_PWM_CTL); if (dev_priv->saveBLC_PWM_CTL == 0) { dev_priv->saveBLC_PWM_CTL = val; dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); } else if (val == 0) { I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); val = dev_priv->saveBLC_PWM_CTL; } } return val; } u32 intel_panel_get_max_backlight(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 max; max = i915_read_blc_pwm_ctl(dev_priv); if (max == 0) { /* XXX add code here to query mode clock or hardware clock * and program max PWM appropriately. */ #if 0 printf("fixme: max PWM is zero.\n"); #endif return 1; } if (HAS_PCH_SPLIT(dev)) { max >>= 16; } else { if (INTEL_INFO(dev)->gen < 4) max >>= 17; else max >>= 16; if (is_backlight_combination_mode(dev)) max *= 0xff; } DRM_DEBUG("max backlight PWM = %d\n", max); return max; } +static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (i915_panel_invert_brightness < 0) + return val; + + if (i915_panel_invert_brightness > 0 || + dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) + return intel_panel_get_max_backlight(dev) - val; + + return val; +} + u32 intel_panel_get_backlight(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 val; if (HAS_PCH_SPLIT(dev)) { val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; } else { val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; if (INTEL_INFO(dev)->gen < 4) val >>= 1; if (is_backlight_combination_mode(dev)) { u8 lbpc; lbpc = pci_read_config(dev->device, PCI_LBPC, 1); val *= lbpc; } } - DRM_DEBUG("get backlight PWM = %d\n", val); + val = intel_panel_compute_brightness(dev, val); + DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); return val; } static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level) { struct drm_i915_private *dev_priv = dev->dev_private; u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; I915_WRITE(BLC_PWM_CPU_CTL, val | level); } static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level) { struct drm_i915_private *dev_priv = dev->dev_private; u32 tmp; - DRM_DEBUG("set backlight PWM = %d\n", level); + DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); + level = intel_panel_compute_brightness(dev, level); if (HAS_PCH_SPLIT(dev)) return intel_pch_panel_set_backlight(dev, level); if (is_backlight_combination_mode(dev)) { u32 max = intel_panel_get_max_backlight(dev); u8 lbpc; lbpc = level * 0xfe / max + 1; level /= lbpc; pci_write_config(dev->device, PCI_LBPC, lbpc, 4); } tmp = I915_READ(BLC_PWM_CTL); if (INTEL_INFO(dev)->gen < 4) level <<= 1; tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK; I915_WRITE(BLC_PWM_CTL, tmp | level); } void intel_panel_set_backlight(struct drm_device *dev, u32 level) { struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->backlight_level = level; if (dev_priv->backlight_enabled) intel_panel_actually_set_backlight(dev, level); } void intel_panel_disable_backlight(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->backlight_enabled = false; intel_panel_actually_set_backlight(dev, 0); } void intel_panel_enable_backlight(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; if (dev_priv->backlight_level == 0) dev_priv->backlight_level = intel_panel_get_max_backlight(dev); dev_priv->backlight_enabled = true; intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); } static void intel_panel_init_backlight(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->backlight_level = intel_panel_get_backlight(dev); dev_priv->backlight_enabled = dev_priv->backlight_level != 0; } enum drm_connector_status intel_panel_detect(struct drm_device *dev) { #if 0 struct drm_i915_private *dev_priv = dev->dev_private; #endif if (i915_panel_ignore_lid) return i915_panel_ignore_lid > 0 ? connector_status_connected : connector_status_disconnected; /* opregion lid state on HP 2540p is wrong at boot up, * appears to be either the BIOS or Linux ACPI fault */ #if 0 /* Assume that the BIOS does not lie through the OpRegion... */ if (dev_priv->opregion.lid_state) return ioread32(dev_priv->opregion.lid_state) & 0x1 ? connector_status_connected : connector_status_disconnected; #endif return connector_status_unknown; } int intel_panel_setup_backlight(struct drm_device *dev) { intel_panel_init_backlight(dev); return 0; } void intel_panel_destroy_backlight(struct drm_device *dev) { return; } Index: head/sys/dev/drm2/i915/intel_pm.c =================================================================== --- head/sys/dev/drm2/i915/intel_pm.c (nonexistent) +++ head/sys/dev/drm2/i915/intel_pm.c (revision 277487) @@ -0,0 +1,3788 @@ +/* + * Copyright © 2012 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eugeni Dodonov + * + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include + +static struct drm_i915_private *i915_mch_dev; +/* + * Lock protecting IPS related data structures + * - i915_mch_dev + * - dev_priv->max_delay + * - dev_priv->min_delay + * - dev_priv->fmax + * - dev_priv->gpu_busy + */ +static struct mtx mchdev_lock; +MTX_SYSINIT(mchdev, &mchdev_lock, "mchdev", MTX_DEF); + +/* FBC, or Frame Buffer Compression, is a technique employed to compress the + * framebuffer contents in-memory, aiming at reducing the required bandwidth + * during in-memory transfers and, therefore, reduce the power packet. + * + * The benefits of FBC are mostly visible with solid backgrounds and + * variation-less patterns. + * + * FBC-related functionality can be enabled by the means of the + * i915.i915_enable_fbc parameter + */ + +static void i8xx_disable_fbc(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 fbc_ctl; + + /* Disable compression */ + fbc_ctl = I915_READ(FBC_CONTROL); + if ((fbc_ctl & FBC_CTL_EN) == 0) + return; + + fbc_ctl &= ~FBC_CTL_EN; + I915_WRITE(FBC_CONTROL, fbc_ctl); + + /* Wait for compressing bit to clear */ + if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { + DRM_DEBUG_KMS("FBC idle timed out\n"); + return; + } + + DRM_DEBUG_KMS("disabled FBC\n"); +} + +static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_framebuffer *fb = crtc->fb; + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct drm_i915_gem_object *obj = intel_fb->obj; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int cfb_pitch; + int plane, i; + u32 fbc_ctl, fbc_ctl2; + + cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; + if (fb->pitches[0] < cfb_pitch) + cfb_pitch = fb->pitches[0]; + + /* FBC_CTL wants 64B units */ + cfb_pitch = (cfb_pitch / 64) - 1; + plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; + + /* Clear old tags */ + for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) + I915_WRITE(FBC_TAG + (i * 4), 0); + + /* Set it up... */ + fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; + fbc_ctl2 |= plane; + I915_WRITE(FBC_CONTROL2, fbc_ctl2); + I915_WRITE(FBC_FENCE_OFF, crtc->y); + + /* enable it... */ + fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; + if (IS_I945GM(dev)) + fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ + fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; + fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; + fbc_ctl |= obj->fence_reg; + I915_WRITE(FBC_CONTROL, fbc_ctl); + + DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ", + cfb_pitch, crtc->y, intel_crtc->plane); +} + +static bool i8xx_fbc_enabled(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + return I915_READ(FBC_CONTROL) & FBC_CTL_EN; +} + +static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_framebuffer *fb = crtc->fb; + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct drm_i915_gem_object *obj = intel_fb->obj; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; + unsigned long stall_watermark = 200; + u32 dpfc_ctl; + + dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; + dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; + I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); + + I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | + (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | + (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); + I915_WRITE(DPFC_FENCE_YOFF, crtc->y); + + /* enable it... */ + I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); + + DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); +} + +static void g4x_disable_fbc(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 dpfc_ctl; + + /* Disable compression */ + dpfc_ctl = I915_READ(DPFC_CONTROL); + if (dpfc_ctl & DPFC_CTL_EN) { + dpfc_ctl &= ~DPFC_CTL_EN; + I915_WRITE(DPFC_CONTROL, dpfc_ctl); + + DRM_DEBUG_KMS("disabled FBC\n"); + } +} + +static bool g4x_fbc_enabled(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; +} + +static void sandybridge_blit_fbc_update(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 blt_ecoskpd; + + /* Make sure blitter notifies FBC of writes */ + gen6_gt_force_wake_get(dev_priv); + blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); + blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << + GEN6_BLITTER_LOCK_SHIFT; + I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); + blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY; + I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); + blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY << + GEN6_BLITTER_LOCK_SHIFT); + I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); + POSTING_READ(GEN6_BLITTER_ECOSKPD); + gen6_gt_force_wake_put(dev_priv); +} + +static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_framebuffer *fb = crtc->fb; + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct drm_i915_gem_object *obj = intel_fb->obj; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; + unsigned long stall_watermark = 200; + u32 dpfc_ctl; + + dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); + dpfc_ctl &= DPFC_RESERVED; + dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); + /* Set persistent mode for front-buffer rendering, ala X. */ + dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE; + dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg); + I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); + + I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | + (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | + (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); + I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); + I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); + /* enable it... */ + I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + + if (IS_GEN6(dev)) { + I915_WRITE(SNB_DPFC_CTL_SA, + SNB_CPU_FENCE_ENABLE | obj->fence_reg); + I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); + sandybridge_blit_fbc_update(dev); + } + + DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); +} + +static void ironlake_disable_fbc(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 dpfc_ctl; + + /* Disable compression */ + dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); + if (dpfc_ctl & DPFC_CTL_EN) { + dpfc_ctl &= ~DPFC_CTL_EN; + I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); + + DRM_DEBUG_KMS("disabled FBC\n"); + } +} + +static bool ironlake_fbc_enabled(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; +} + +bool intel_fbc_enabled(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (!dev_priv->display.fbc_enabled) + return false; + + return dev_priv->display.fbc_enabled(dev); +} + +static void intel_fbc_work_fn(void *arg, int pending) +{ + struct intel_fbc_work *work = arg; + struct drm_device *dev = work->crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + DRM_LOCK(dev); + if (work == dev_priv->fbc_work) { + /* Double check that we haven't switched fb without cancelling + * the prior work. + */ + if (work->crtc->fb == work->fb) { + dev_priv->display.enable_fbc(work->crtc, + work->interval); + + dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane; + dev_priv->cfb_fb = work->crtc->fb->base.id; + dev_priv->cfb_y = work->crtc->y; + } + + dev_priv->fbc_work = NULL; + } + DRM_UNLOCK(dev); + + free(work, DRM_MEM_KMS); +} + +static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv) +{ + u_int pending; + + if (dev_priv->fbc_work == NULL) + return; + + DRM_DEBUG_KMS("cancelling pending FBC enable\n"); + + /* Synchronisation is provided by struct_mutex and checking of + * dev_priv->fbc_work, so we can perform the cancellation + * entirely asynchronously. + */ + if (taskqueue_cancel_timeout(dev_priv->tq, &dev_priv->fbc_work->task, + &pending) == 0) + /* tasklet was killed before being run, clean up */ + free(dev_priv->fbc_work, DRM_MEM_KMS); + + /* Mark the work as no longer wanted so that if it does + * wake-up (because the work was already running and waiting + * for our mutex), it will discover that is no longer + * necessary to run. + */ + dev_priv->fbc_work = NULL; +} + +void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) +{ + struct intel_fbc_work *work; + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + if (!dev_priv->display.enable_fbc) + return; + + intel_cancel_fbc_work(dev_priv); + + work = malloc(sizeof(*work), DRM_MEM_KMS, M_WAITOK | M_ZERO); + + work->crtc = crtc; + work->fb = crtc->fb; + work->interval = interval; + TIMEOUT_TASK_INIT(dev_priv->tq, &work->task, 0, intel_fbc_work_fn, + work); + + dev_priv->fbc_work = work; + + DRM_DEBUG_KMS("scheduling delayed FBC enable\n"); + + /* Delay the actual enabling to let pageflipping cease and the + * display to settle before starting the compression. Note that + * this delay also serves a second purpose: it allows for a + * vblank to pass after disabling the FBC before we attempt + * to modify the control registers. + * + * A more complicated solution would involve tracking vblanks + * following the termination of the page-flipping sequence + * and indeed performing the enable as a co-routine and not + * waiting synchronously upon the vblank. + */ + taskqueue_enqueue_timeout(dev_priv->tq, &work->task, + msecs_to_jiffies(50)); +} + +void intel_disable_fbc(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + intel_cancel_fbc_work(dev_priv); + + if (!dev_priv->display.disable_fbc) + return; + + dev_priv->display.disable_fbc(dev); + dev_priv->cfb_plane = -1; +} + +/** + * intel_update_fbc - enable/disable FBC as needed + * @dev: the drm_device + * + * Set up the framebuffer compression hardware at mode set time. We + * enable it if possible: + * - plane A only (on pre-965) + * - no pixel mulitply/line duplication + * - no alpha buffer discard + * - no dual wide + * - framebuffer <= 2048 in width, 1536 in height + * + * We can't assume that any compression will take place (worst case), + * so the compressed buffer has to be the same size as the uncompressed + * one. It also must reside (along with the line length buffer) in + * stolen memory. + * + * We need to enable/disable FBC on a global basis. + */ +void intel_update_fbc(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc = NULL, *tmp_crtc; + struct intel_crtc *intel_crtc; + struct drm_framebuffer *fb; + struct intel_framebuffer *intel_fb; + struct drm_i915_gem_object *obj; + int enable_fbc; + + DRM_DEBUG_KMS("\n"); + + if (!i915_powersave) + return; + + if (!I915_HAS_FBC(dev)) + return; + + /* + * If FBC is already on, we just have to verify that we can + * keep it that way... + * Need to disable if: + * - more than one pipe is active + * - changing FBC params (stride, fence, mode) + * - new fb is too large to fit in compressed buffer + * - going to an unsupported config (interlace, pixel multiply, etc.) + */ + list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { + if (tmp_crtc->enabled && tmp_crtc->fb) { + if (crtc) { + DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); + dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; + goto out_disable; + } + crtc = tmp_crtc; + } + } + + if (!crtc || crtc->fb == NULL) { + DRM_DEBUG_KMS("no output, disabling\n"); + dev_priv->no_fbc_reason = FBC_NO_OUTPUT; + goto out_disable; + } + + intel_crtc = to_intel_crtc(crtc); + fb = crtc->fb; + intel_fb = to_intel_framebuffer(fb); + obj = intel_fb->obj; + + enable_fbc = i915_enable_fbc; + if (enable_fbc < 0) { + DRM_DEBUG_KMS("fbc set to per-chip default\n"); + enable_fbc = 1; + if (INTEL_INFO(dev)->gen <= 6) + enable_fbc = 0; + } + if (!enable_fbc) { + DRM_DEBUG_KMS("fbc disabled per module param\n"); + dev_priv->no_fbc_reason = FBC_MODULE_PARAM; + goto out_disable; + } + if (intel_fb->obj->base.size > dev_priv->cfb_size) { + DRM_DEBUG_KMS("framebuffer too large, disabling " + "compression\n"); + dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; + goto out_disable; + } + if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || + (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { + DRM_DEBUG_KMS("mode incompatible with compression, " + "disabling\n"); + dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; + goto out_disable; + } + if ((crtc->mode.hdisplay > 2048) || + (crtc->mode.vdisplay > 1536)) { + DRM_DEBUG_KMS("mode too large for compression, disabling\n"); + dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; + goto out_disable; + } + if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) { + DRM_DEBUG_KMS("plane not 0, disabling compression\n"); + dev_priv->no_fbc_reason = FBC_BAD_PLANE; + goto out_disable; + } + + /* The use of a CPU fence is mandatory in order to detect writes + * by the CPU to the scanout and trigger updates to the FBC. + */ + if (obj->tiling_mode != I915_TILING_X || + obj->fence_reg == I915_FENCE_REG_NONE) { + DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n"); + dev_priv->no_fbc_reason = FBC_NOT_TILED; + goto out_disable; + } + + /* If the kernel debugger is active, always disable compression */ + if (kdb_active) + goto out_disable; + + /* If the scanout has not changed, don't modify the FBC settings. + * Note that we make the fundamental assumption that the fb->obj + * cannot be unpinned (and have its GTT offset and fence revoked) + * without first being decoupled from the scanout and FBC disabled. + */ + if (dev_priv->cfb_plane == intel_crtc->plane && + dev_priv->cfb_fb == fb->base.id && + dev_priv->cfb_y == crtc->y) + return; + + if (intel_fbc_enabled(dev)) { + /* We update FBC along two paths, after changing fb/crtc + * configuration (modeswitching) and after page-flipping + * finishes. For the latter, we know that not only did + * we disable the FBC at the start of the page-flip + * sequence, but also more than one vblank has passed. + * + * For the former case of modeswitching, it is possible + * to switch between two FBC valid configurations + * instantaneously so we do need to disable the FBC + * before we can modify its control registers. We also + * have to wait for the next vblank for that to take + * effect. However, since we delay enabling FBC we can + * assume that a vblank has passed since disabling and + * that we can safely alter the registers in the deferred + * callback. + * + * In the scenario that we go from a valid to invalid + * and then back to valid FBC configuration we have + * no strict enforcement that a vblank occurred since + * disabling the FBC. However, along all current pipe + * disabling paths we do need to wait for a vblank at + * some point. And we wait before enabling FBC anyway. + */ + DRM_DEBUG_KMS("disabling active FBC for update\n"); + intel_disable_fbc(dev); + } + + intel_enable_fbc(crtc, 500); + return; + +out_disable: + /* Multiple disables should be harmless */ + if (intel_fbc_enabled(dev)) { + DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); + intel_disable_fbc(dev); + } +} + +static void i915_pineview_get_mem_freq(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + u32 tmp; + + tmp = I915_READ(CLKCFG); + + switch (tmp & CLKCFG_FSB_MASK) { + case CLKCFG_FSB_533: + dev_priv->fsb_freq = 533; /* 133*4 */ + break; + case CLKCFG_FSB_800: + dev_priv->fsb_freq = 800; /* 200*4 */ + break; + case CLKCFG_FSB_667: + dev_priv->fsb_freq = 667; /* 167*4 */ + break; + case CLKCFG_FSB_400: + dev_priv->fsb_freq = 400; /* 100*4 */ + break; + } + + switch (tmp & CLKCFG_MEM_MASK) { + case CLKCFG_MEM_533: + dev_priv->mem_freq = 533; + break; + case CLKCFG_MEM_667: + dev_priv->mem_freq = 667; + break; + case CLKCFG_MEM_800: + dev_priv->mem_freq = 800; + break; + } + + /* detect pineview DDR3 setting */ + tmp = I915_READ(CSHRDDR3CTL); + dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; +} + +static void i915_ironlake_get_mem_freq(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + u16 ddrpll, csipll; + + ddrpll = I915_READ16(DDRMPLL1); + csipll = I915_READ16(CSIPLL0); + + switch (ddrpll & 0xff) { + case 0xc: + dev_priv->mem_freq = 800; + break; + case 0x10: + dev_priv->mem_freq = 1066; + break; + case 0x14: + dev_priv->mem_freq = 1333; + break; + case 0x18: + dev_priv->mem_freq = 1600; + break; + default: + DRM_DEBUG("unknown memory frequency 0x%02x\n", + ddrpll & 0xff); + dev_priv->mem_freq = 0; + break; + } + + dev_priv->r_t = dev_priv->mem_freq; + + switch (csipll & 0x3ff) { + case 0x00c: + dev_priv->fsb_freq = 3200; + break; + case 0x00e: + dev_priv->fsb_freq = 3733; + break; + case 0x010: + dev_priv->fsb_freq = 4266; + break; + case 0x012: + dev_priv->fsb_freq = 4800; + break; + case 0x014: + dev_priv->fsb_freq = 5333; + break; + case 0x016: + dev_priv->fsb_freq = 5866; + break; + case 0x018: + dev_priv->fsb_freq = 6400; + break; + default: + DRM_DEBUG("unknown fsb frequency 0x%04x\n", + csipll & 0x3ff); + dev_priv->fsb_freq = 0; + break; + } + + if (dev_priv->fsb_freq == 3200) { + dev_priv->c_m = 0; + } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { + dev_priv->c_m = 1; + } else { + dev_priv->c_m = 2; + } +} + +static const struct cxsr_latency cxsr_latency_table[] = { + {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ + {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ + {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ + {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ + {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ + + {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ + {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ + {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ + {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ + {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ + + {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ + {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ + {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ + {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ + {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ + + {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ + {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ + {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ + {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ + {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ + + {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ + {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ + {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ + {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ + {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ + + {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ + {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ + {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ + {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ + {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ +}; + +static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, + int is_ddr3, + int fsb, + int mem) +{ + const struct cxsr_latency *latency; + int i; + + if (fsb == 0 || mem == 0) + return NULL; + + for (i = 0; i < DRM_ARRAY_SIZE(cxsr_latency_table); i++) { + latency = &cxsr_latency_table[i]; + if (is_desktop == latency->is_desktop && + is_ddr3 == latency->is_ddr3 && + fsb == latency->fsb_freq && mem == latency->mem_freq) + return latency; + } + + DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); + + return NULL; +} + +static void pineview_disable_cxsr(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* deactivate cxsr */ + I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN); +} + +/* + * Latency for FIFO fetches is dependent on several factors: + * - memory configuration (speed, channels) + * - chipset + * - current MCH state + * It can be fairly high in some situations, so here we assume a fairly + * pessimal value. It's a tradeoff between extra memory fetches (if we + * set this value too high, the FIFO will fetch frequently to stay full) + * and power consumption (set it too low to save power and we might see + * FIFO underruns and display "flicker"). + * + * A value of 5us seems to be a good balance; safe for very low end + * platforms but not overly aggressive on lower latency configs. + */ +static const int latency_ns = 5000; + +static int i9xx_get_fifo_size(struct drm_device *dev, int plane) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dsparb = I915_READ(DSPARB); + int size; + + size = dsparb & 0x7f; + if (plane) + size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; + + DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, + plane ? "B" : "A", size); + + return size; +} + +static int i85x_get_fifo_size(struct drm_device *dev, int plane) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dsparb = I915_READ(DSPARB); + int size; + + size = dsparb & 0x1ff; + if (plane) + size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; + size >>= 1; /* Convert to cachelines */ + + DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, + plane ? "B" : "A", size); + + return size; +} + +static int i845_get_fifo_size(struct drm_device *dev, int plane) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dsparb = I915_READ(DSPARB); + int size; + + size = dsparb & 0x7f; + size >>= 2; /* Convert to cachelines */ + + DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, + plane ? "B" : "A", + size); + + return size; +} + +static int i830_get_fifo_size(struct drm_device *dev, int plane) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dsparb = I915_READ(DSPARB); + int size; + + size = dsparb & 0x7f; + size >>= 1; /* Convert to cachelines */ + + DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, + plane ? "B" : "A", size); + + return size; +} + +/* Pineview has different values for various configs */ +static const struct intel_watermark_params pineview_display_wm = { + PINEVIEW_DISPLAY_FIFO, + PINEVIEW_MAX_WM, + PINEVIEW_DFT_WM, + PINEVIEW_GUARD_WM, + PINEVIEW_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params pineview_display_hplloff_wm = { + PINEVIEW_DISPLAY_FIFO, + PINEVIEW_MAX_WM, + PINEVIEW_DFT_HPLLOFF_WM, + PINEVIEW_GUARD_WM, + PINEVIEW_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params pineview_cursor_wm = { + PINEVIEW_CURSOR_FIFO, + PINEVIEW_CURSOR_MAX_WM, + PINEVIEW_CURSOR_DFT_WM, + PINEVIEW_CURSOR_GUARD_WM, + PINEVIEW_FIFO_LINE_SIZE, +}; +static const struct intel_watermark_params pineview_cursor_hplloff_wm = { + PINEVIEW_CURSOR_FIFO, + PINEVIEW_CURSOR_MAX_WM, + PINEVIEW_CURSOR_DFT_WM, + PINEVIEW_CURSOR_GUARD_WM, + PINEVIEW_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params g4x_wm_info = { + G4X_FIFO_SIZE, + G4X_MAX_WM, + G4X_MAX_WM, + 2, + G4X_FIFO_LINE_SIZE, +}; +static const struct intel_watermark_params g4x_cursor_wm_info = { + I965_CURSOR_FIFO, + I965_CURSOR_MAX_WM, + I965_CURSOR_DFT_WM, + 2, + G4X_FIFO_LINE_SIZE, +}; +static const struct intel_watermark_params valleyview_wm_info = { + VALLEYVIEW_FIFO_SIZE, + VALLEYVIEW_MAX_WM, + VALLEYVIEW_MAX_WM, + 2, + G4X_FIFO_LINE_SIZE, +}; +static const struct intel_watermark_params valleyview_cursor_wm_info = { + I965_CURSOR_FIFO, + VALLEYVIEW_CURSOR_MAX_WM, + I965_CURSOR_DFT_WM, + 2, + G4X_FIFO_LINE_SIZE, +}; +static const struct intel_watermark_params i965_cursor_wm_info = { + I965_CURSOR_FIFO, + I965_CURSOR_MAX_WM, + I965_CURSOR_DFT_WM, + 2, + I915_FIFO_LINE_SIZE, +}; +static const struct intel_watermark_params i945_wm_info = { + I945_FIFO_SIZE, + I915_MAX_WM, + 1, + 2, + I915_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params i915_wm_info = { + I915_FIFO_SIZE, + I915_MAX_WM, + 1, + 2, + I915_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params i855_wm_info = { + I855GM_FIFO_SIZE, + I915_MAX_WM, + 1, + 2, + I830_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params i830_wm_info = { + I830_FIFO_SIZE, + I915_MAX_WM, + 1, + 2, + I830_FIFO_LINE_SIZE +}; + +static const struct intel_watermark_params ironlake_display_wm_info = { + ILK_DISPLAY_FIFO, + ILK_DISPLAY_MAXWM, + ILK_DISPLAY_DFTWM, + 2, + ILK_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params ironlake_cursor_wm_info = { + ILK_CURSOR_FIFO, + ILK_CURSOR_MAXWM, + ILK_CURSOR_DFTWM, + 2, + ILK_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params ironlake_display_srwm_info = { + ILK_DISPLAY_SR_FIFO, + ILK_DISPLAY_MAX_SRWM, + ILK_DISPLAY_DFT_SRWM, + 2, + ILK_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params ironlake_cursor_srwm_info = { + ILK_CURSOR_SR_FIFO, + ILK_CURSOR_MAX_SRWM, + ILK_CURSOR_DFT_SRWM, + 2, + ILK_FIFO_LINE_SIZE +}; + +static const struct intel_watermark_params sandybridge_display_wm_info = { + SNB_DISPLAY_FIFO, + SNB_DISPLAY_MAXWM, + SNB_DISPLAY_DFTWM, + 2, + SNB_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params sandybridge_cursor_wm_info = { + SNB_CURSOR_FIFO, + SNB_CURSOR_MAXWM, + SNB_CURSOR_DFTWM, + 2, + SNB_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params sandybridge_display_srwm_info = { + SNB_DISPLAY_SR_FIFO, + SNB_DISPLAY_MAX_SRWM, + SNB_DISPLAY_DFT_SRWM, + 2, + SNB_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params sandybridge_cursor_srwm_info = { + SNB_CURSOR_SR_FIFO, + SNB_CURSOR_MAX_SRWM, + SNB_CURSOR_DFT_SRWM, + 2, + SNB_FIFO_LINE_SIZE +}; + + +/** + * intel_calculate_wm - calculate watermark level + * @clock_in_khz: pixel clock + * @wm: chip FIFO params + * @pixel_size: display pixel size + * @latency_ns: memory latency for the platform + * + * Calculate the watermark level (the level at which the display plane will + * start fetching from memory again). Each chip has a different display + * FIFO size and allocation, so the caller needs to figure that out and pass + * in the correct intel_watermark_params structure. + * + * As the pixel clock runs, the FIFO will be drained at a rate that depends + * on the pixel size. When it reaches the watermark level, it'll start + * fetching FIFO line sized based chunks from memory until the FIFO fills + * past the watermark point. If the FIFO drains completely, a FIFO underrun + * will occur, and a display engine hang could result. + */ +static unsigned long intel_calculate_wm(unsigned long clock_in_khz, + const struct intel_watermark_params *wm, + int fifo_size, + int pixel_size, + unsigned long latency_ns) +{ + long entries_required, wm_size; + + /* + * Note: we need to make sure we don't overflow for various clock & + * latency values. + * clocks go from a few thousand to several hundred thousand. + * latency is usually a few thousand + */ + entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / + 1000; + entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); + + DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required); + + wm_size = fifo_size - (entries_required + wm->guard_size); + + DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size); + + /* Don't promote wm_size to unsigned... */ + if (wm_size > (long)wm->max_wm) + wm_size = wm->max_wm; + if (wm_size <= 0) + wm_size = wm->default_wm; + return wm_size; +} + +static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) +{ + struct drm_crtc *crtc, *enabled = NULL; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + if (crtc->enabled && crtc->fb) { + if (enabled) + return NULL; + enabled = crtc; + } + } + + return enabled; +} + +static void pineview_update_wm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc; + const struct cxsr_latency *latency; + u32 reg; + unsigned long wm; + + latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, + dev_priv->fsb_freq, dev_priv->mem_freq); + if (!latency) { + DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); + pineview_disable_cxsr(dev); + return; + } + + crtc = single_enabled_crtc(dev); + if (crtc) { + int clock = crtc->mode.clock; + int pixel_size = crtc->fb->bits_per_pixel / 8; + + /* Display SR */ + wm = intel_calculate_wm(clock, &pineview_display_wm, + pineview_display_wm.fifo_size, + pixel_size, latency->display_sr); + reg = I915_READ(DSPFW1); + reg &= ~DSPFW_SR_MASK; + reg |= wm << DSPFW_SR_SHIFT; + I915_WRITE(DSPFW1, reg); + DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); + + /* cursor SR */ + wm = intel_calculate_wm(clock, &pineview_cursor_wm, + pineview_display_wm.fifo_size, + pixel_size, latency->cursor_sr); + reg = I915_READ(DSPFW3); + reg &= ~DSPFW_CURSOR_SR_MASK; + reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT; + I915_WRITE(DSPFW3, reg); + + /* Display HPLL off SR */ + wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, + pineview_display_hplloff_wm.fifo_size, + pixel_size, latency->display_hpll_disable); + reg = I915_READ(DSPFW3); + reg &= ~DSPFW_HPLL_SR_MASK; + reg |= wm & DSPFW_HPLL_SR_MASK; + I915_WRITE(DSPFW3, reg); + + /* cursor HPLL off SR */ + wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, + pineview_display_hplloff_wm.fifo_size, + pixel_size, latency->cursor_hpll_disable); + reg = I915_READ(DSPFW3); + reg &= ~DSPFW_HPLL_CURSOR_MASK; + reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT; + I915_WRITE(DSPFW3, reg); + DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); + + /* activate cxsr */ + I915_WRITE(DSPFW3, + I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN); + DRM_DEBUG_KMS("Self-refresh is enabled\n"); + } else { + pineview_disable_cxsr(dev); + DRM_DEBUG_KMS("Self-refresh is disabled\n"); + } +} + +static bool g4x_compute_wm0(struct drm_device *dev, + int plane, + const struct intel_watermark_params *display, + int display_latency_ns, + const struct intel_watermark_params *cursor, + int cursor_latency_ns, + int *plane_wm, + int *cursor_wm) +{ + struct drm_crtc *crtc; + int htotal, hdisplay, clock, pixel_size; + int line_time_us, line_count; + int entries, tlb_miss; + + crtc = intel_get_crtc_for_plane(dev, plane); + if (crtc->fb == NULL || !crtc->enabled) { + *cursor_wm = cursor->guard_size; + *plane_wm = display->guard_size; + return false; + } + + htotal = crtc->mode.htotal; + hdisplay = crtc->mode.hdisplay; + clock = crtc->mode.clock; + pixel_size = crtc->fb->bits_per_pixel / 8; + + /* Use the small buffer method to calculate plane watermark */ + entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; + tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; + if (tlb_miss > 0) + entries += tlb_miss; + entries = DIV_ROUND_UP(entries, display->cacheline_size); + *plane_wm = entries + display->guard_size; + if (*plane_wm > (int)display->max_wm) + *plane_wm = display->max_wm; + + /* Use the large buffer method to calculate cursor watermark */ + line_time_us = ((htotal * 1000) / clock); + line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; + entries = line_count * 64 * pixel_size; + tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; + if (tlb_miss > 0) + entries += tlb_miss; + entries = DIV_ROUND_UP(entries, cursor->cacheline_size); + *cursor_wm = entries + cursor->guard_size; + if (*cursor_wm > (int)cursor->max_wm) + *cursor_wm = (int)cursor->max_wm; + + return true; +} + +/* + * Check the wm result. + * + * If any calculated watermark values is larger than the maximum value that + * can be programmed into the associated watermark register, that watermark + * must be disabled. + */ +static bool g4x_check_srwm(struct drm_device *dev, + int display_wm, int cursor_wm, + const struct intel_watermark_params *display, + const struct intel_watermark_params *cursor) +{ + DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n", + display_wm, cursor_wm); + + if (display_wm > display->max_wm) { + DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n", + display_wm, display->max_wm); + return false; + } + + if (cursor_wm > cursor->max_wm) { + DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n", + cursor_wm, cursor->max_wm); + return false; + } + + if (!(display_wm || cursor_wm)) { + DRM_DEBUG_KMS("SR latency is 0, disabling\n"); + return false; + } + + return true; +} + +static bool g4x_compute_srwm(struct drm_device *dev, + int plane, + int latency_ns, + const struct intel_watermark_params *display, + const struct intel_watermark_params *cursor, + int *display_wm, int *cursor_wm) +{ + struct drm_crtc *crtc; + int hdisplay, htotal, pixel_size, clock; + unsigned long line_time_us; + int line_count, line_size; + int small, large; + int entries; + + if (!latency_ns) { + *display_wm = *cursor_wm = 0; + return false; + } + + crtc = intel_get_crtc_for_plane(dev, plane); + hdisplay = crtc->mode.hdisplay; + htotal = crtc->mode.htotal; + clock = crtc->mode.clock; + pixel_size = crtc->fb->bits_per_pixel / 8; + + line_time_us = (htotal * 1000) / clock; + line_count = (latency_ns / line_time_us + 1000) / 1000; + line_size = hdisplay * pixel_size; + + /* Use the minimum of the small and large buffer method for primary */ + small = ((clock * pixel_size / 1000) * latency_ns) / 1000; + large = line_count * line_size; + + entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); + *display_wm = entries + display->guard_size; + + /* calculate the self-refresh watermark for display cursor */ + entries = line_count * pixel_size * 64; + entries = DIV_ROUND_UP(entries, cursor->cacheline_size); + *cursor_wm = entries + cursor->guard_size; + + return g4x_check_srwm(dev, + *display_wm, *cursor_wm, + display, cursor); +} + +static bool vlv_compute_drain_latency(struct drm_device *dev, + int plane, + int *plane_prec_mult, + int *plane_dl, + int *cursor_prec_mult, + int *cursor_dl) +{ + struct drm_crtc *crtc; + int clock, pixel_size; + int entries; + + crtc = intel_get_crtc_for_plane(dev, plane); + if (crtc->fb == NULL || !crtc->enabled) + return false; + + clock = crtc->mode.clock; /* VESA DOT Clock */ + pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */ + + entries = (clock / 1000) * pixel_size; + *plane_prec_mult = (entries > 256) ? + DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16; + *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) * + pixel_size); + + entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */ + *cursor_prec_mult = (entries > 256) ? + DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16; + *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4); + + return true; +} + +/* + * Update drain latency registers of memory arbiter + * + * Valleyview SoC has a new memory arbiter and needs drain latency registers + * to be programmed. Each plane has a drain latency multiplier and a drain + * latency value. + */ + +static void vlv_update_drain_latency(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int planea_prec, planea_dl, planeb_prec, planeb_dl; + int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl; + int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is + either 16 or 32 */ + + /* For plane A, Cursor A */ + if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl, + &cursor_prec_mult, &cursora_dl)) { + cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ? + DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16; + planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ? + DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16; + + I915_WRITE(VLV_DDL1, cursora_prec | + (cursora_dl << DDL_CURSORA_SHIFT) | + planea_prec | planea_dl); + } + + /* For plane B, Cursor B */ + if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl, + &cursor_prec_mult, &cursorb_dl)) { + cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ? + DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16; + planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ? + DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16; + + I915_WRITE(VLV_DDL2, cursorb_prec | + (cursorb_dl << DDL_CURSORB_SHIFT) | + planeb_prec | planeb_dl); + } +} + +#define single_plane_enabled(mask) ((mask) != 0 && powerof2(mask)) + +static void valleyview_update_wm(struct drm_device *dev) +{ + static const int sr_latency_ns = 12000; + struct drm_i915_private *dev_priv = dev->dev_private; + int planea_wm, planeb_wm, cursora_wm, cursorb_wm; + int plane_sr, cursor_sr; + unsigned int enabled = 0; + + vlv_update_drain_latency(dev); + + if (g4x_compute_wm0(dev, 0, + &valleyview_wm_info, latency_ns, + &valleyview_cursor_wm_info, latency_ns, + &planea_wm, &cursora_wm)) + enabled |= 1; + + if (g4x_compute_wm0(dev, 1, + &valleyview_wm_info, latency_ns, + &valleyview_cursor_wm_info, latency_ns, + &planeb_wm, &cursorb_wm)) + enabled |= 2; + + plane_sr = cursor_sr = 0; + if (single_plane_enabled(enabled) && + g4x_compute_srwm(dev, ffs(enabled) - 1, + sr_latency_ns, + &valleyview_wm_info, + &valleyview_cursor_wm_info, + &plane_sr, &cursor_sr)) + I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN); + else + I915_WRITE(FW_BLC_SELF_VLV, + I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN); + + DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", + planea_wm, cursora_wm, + planeb_wm, cursorb_wm, + plane_sr, cursor_sr); + + I915_WRITE(DSPFW1, + (plane_sr << DSPFW_SR_SHIFT) | + (cursorb_wm << DSPFW_CURSORB_SHIFT) | + (planeb_wm << DSPFW_PLANEB_SHIFT) | + planea_wm); + I915_WRITE(DSPFW2, + (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | + (cursora_wm << DSPFW_CURSORA_SHIFT)); + I915_WRITE(DSPFW3, + (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT))); +} + +static void g4x_update_wm(struct drm_device *dev) +{ + static const int sr_latency_ns = 12000; + struct drm_i915_private *dev_priv = dev->dev_private; + int planea_wm, planeb_wm, cursora_wm, cursorb_wm; + int plane_sr, cursor_sr; + unsigned int enabled = 0; + + if (g4x_compute_wm0(dev, 0, + &g4x_wm_info, latency_ns, + &g4x_cursor_wm_info, latency_ns, + &planea_wm, &cursora_wm)) + enabled |= 1; + + if (g4x_compute_wm0(dev, 1, + &g4x_wm_info, latency_ns, + &g4x_cursor_wm_info, latency_ns, + &planeb_wm, &cursorb_wm)) + enabled |= 2; + + plane_sr = cursor_sr = 0; + if (single_plane_enabled(enabled) && + g4x_compute_srwm(dev, ffs(enabled) - 1, + sr_latency_ns, + &g4x_wm_info, + &g4x_cursor_wm_info, + &plane_sr, &cursor_sr)) + I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); + else + I915_WRITE(FW_BLC_SELF, + I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); + + DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", + planea_wm, cursora_wm, + planeb_wm, cursorb_wm, + plane_sr, cursor_sr); + + I915_WRITE(DSPFW1, + (plane_sr << DSPFW_SR_SHIFT) | + (cursorb_wm << DSPFW_CURSORB_SHIFT) | + (planeb_wm << DSPFW_PLANEB_SHIFT) | + planea_wm); + I915_WRITE(DSPFW2, + (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | + (cursora_wm << DSPFW_CURSORA_SHIFT)); + /* HPLL off in SR has some issues on G4x... disable it */ + I915_WRITE(DSPFW3, + (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) | + (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); +} + +static void i965_update_wm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc; + int srwm = 1; + int cursor_sr = 16; + + /* Calc sr entries for one plane configs */ + crtc = single_enabled_crtc(dev); + if (crtc) { + /* self-refresh has much higher latency */ + static const int sr_latency_ns = 12000; + int clock = crtc->mode.clock; + int htotal = crtc->mode.htotal; + int hdisplay = crtc->mode.hdisplay; + int pixel_size = crtc->fb->bits_per_pixel / 8; + unsigned long line_time_us; + int entries; + + line_time_us = ((htotal * 1000) / clock); + + /* Use ns/us then divide to preserve precision */ + entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * + pixel_size * hdisplay; + entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); + srwm = I965_FIFO_SIZE - entries; + if (srwm < 0) + srwm = 1; + srwm &= 0x1ff; + DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n", + entries, srwm); + + entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * + pixel_size * 64; + entries = DIV_ROUND_UP(entries, + i965_cursor_wm_info.cacheline_size); + cursor_sr = i965_cursor_wm_info.fifo_size - + (entries + i965_cursor_wm_info.guard_size); + + if (cursor_sr > i965_cursor_wm_info.max_wm) + cursor_sr = i965_cursor_wm_info.max_wm; + + DRM_DEBUG_KMS("self-refresh watermark: display plane %d " + "cursor %d\n", srwm, cursor_sr); + + if (IS_CRESTLINE(dev)) + I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); + } else { + /* Turn off self refresh if both pipes are enabled */ + if (IS_CRESTLINE(dev)) + I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) + & ~FW_BLC_SELF_EN); + } + + DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", + srwm); + + /* 965 has limitations... */ + I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | + (8 << 16) | (8 << 8) | (8 << 0)); + I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); + /* update cursor SR watermark */ + I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); +} + +static void i9xx_update_wm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + const struct intel_watermark_params *wm_info; + uint32_t fwater_lo; + uint32_t fwater_hi; + int cwm, srwm = 1; + int fifo_size; + int planea_wm, planeb_wm; + struct drm_crtc *crtc, *enabled = NULL; + + if (IS_I945GM(dev)) + wm_info = &i945_wm_info; + else if (!IS_GEN2(dev)) + wm_info = &i915_wm_info; + else + wm_info = &i855_wm_info; + + fifo_size = dev_priv->display.get_fifo_size(dev, 0); + crtc = intel_get_crtc_for_plane(dev, 0); + if (crtc->enabled && crtc->fb) { + planea_wm = intel_calculate_wm(crtc->mode.clock, + wm_info, fifo_size, + crtc->fb->bits_per_pixel / 8, + latency_ns); + enabled = crtc; + } else + planea_wm = fifo_size - wm_info->guard_size; + + fifo_size = dev_priv->display.get_fifo_size(dev, 1); + crtc = intel_get_crtc_for_plane(dev, 1); + if (crtc->enabled && crtc->fb) { + planeb_wm = intel_calculate_wm(crtc->mode.clock, + wm_info, fifo_size, + crtc->fb->bits_per_pixel / 8, + latency_ns); + if (enabled == NULL) + enabled = crtc; + else + enabled = NULL; + } else + planeb_wm = fifo_size - wm_info->guard_size; + + DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); + + /* + * Overlay gets an aggressive default since video jitter is bad. + */ + cwm = 2; + + /* Play safe and disable self-refresh before adjusting watermarks. */ + if (IS_I945G(dev) || IS_I945GM(dev)) + I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0); + else if (IS_I915GM(dev)) + I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); + + /* Calc sr entries for one plane configs */ + if (HAS_FW_BLC(dev) && enabled) { + /* self-refresh has much higher latency */ + static const int sr_latency_ns = 6000; + int clock = enabled->mode.clock; + int htotal = enabled->mode.htotal; + int hdisplay = enabled->mode.hdisplay; + int pixel_size = enabled->fb->bits_per_pixel / 8; + unsigned long line_time_us; + int entries; + + line_time_us = (htotal * 1000) / clock; + + /* Use ns/us then divide to preserve precision */ + entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * + pixel_size * hdisplay; + entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); + DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); + srwm = wm_info->fifo_size - entries; + if (srwm < 0) + srwm = 1; + + if (IS_I945G(dev) || IS_I945GM(dev)) + I915_WRITE(FW_BLC_SELF, + FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); + else if (IS_I915GM(dev)) + I915_WRITE(FW_BLC_SELF, srwm & 0x3f); + } + + DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", + planea_wm, planeb_wm, cwm, srwm); + + fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); + fwater_hi = (cwm & 0x1f); + + /* Set request length to 8 cachelines per fetch */ + fwater_lo = fwater_lo | (1 << 24) | (1 << 8); + fwater_hi = fwater_hi | (1 << 8); + + I915_WRITE(FW_BLC, fwater_lo); + I915_WRITE(FW_BLC2, fwater_hi); + + if (HAS_FW_BLC(dev)) { + if (enabled) { + if (IS_I945G(dev) || IS_I945GM(dev)) + I915_WRITE(FW_BLC_SELF, + FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); + else if (IS_I915GM(dev)) + I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); + DRM_DEBUG_KMS("memory self refresh enabled\n"); + } else + DRM_DEBUG_KMS("memory self refresh disabled\n"); + } +} + +static void i830_update_wm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc; + uint32_t fwater_lo; + int planea_wm; + + crtc = single_enabled_crtc(dev); + if (crtc == NULL) + return; + + planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, + dev_priv->display.get_fifo_size(dev, 0), + crtc->fb->bits_per_pixel / 8, + latency_ns); + fwater_lo = I915_READ(FW_BLC) & ~0xfff; + fwater_lo |= (3<<8) | planea_wm; + + DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); + + I915_WRITE(FW_BLC, fwater_lo); +} + +#define ILK_LP0_PLANE_LATENCY 700 +#define ILK_LP0_CURSOR_LATENCY 1300 + +/* + * Check the wm result. + * + * If any calculated watermark values is larger than the maximum value that + * can be programmed into the associated watermark register, that watermark + * must be disabled. + */ +static bool ironlake_check_srwm(struct drm_device *dev, int level, + int fbc_wm, int display_wm, int cursor_wm, + const struct intel_watermark_params *display, + const struct intel_watermark_params *cursor) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d," + " cursor %d\n", level, display_wm, fbc_wm, cursor_wm); + + if (fbc_wm > SNB_FBC_MAX_SRWM) { + DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n", + fbc_wm, SNB_FBC_MAX_SRWM, level); + + /* fbc has it's own way to disable FBC WM */ + I915_WRITE(DISP_ARB_CTL, + I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); + return false; + } + + if (display_wm > display->max_wm) { + DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n", + display_wm, SNB_DISPLAY_MAX_SRWM, level); + return false; + } + + if (cursor_wm > cursor->max_wm) { + DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n", + cursor_wm, SNB_CURSOR_MAX_SRWM, level); + return false; + } + + if (!(fbc_wm || display_wm || cursor_wm)) { + DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level); + return false; + } + + return true; +} + +/* + * Compute watermark values of WM[1-3], + */ +static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane, + int latency_ns, + const struct intel_watermark_params *display, + const struct intel_watermark_params *cursor, + int *fbc_wm, int *display_wm, int *cursor_wm) +{ + struct drm_crtc *crtc; + unsigned long line_time_us; + int hdisplay, htotal, pixel_size, clock; + int line_count, line_size; + int small, large; + int entries; + + if (!latency_ns) { + *fbc_wm = *display_wm = *cursor_wm = 0; + return false; + } + + crtc = intel_get_crtc_for_plane(dev, plane); + hdisplay = crtc->mode.hdisplay; + htotal = crtc->mode.htotal; + clock = crtc->mode.clock; + pixel_size = crtc->fb->bits_per_pixel / 8; + + line_time_us = (htotal * 1000) / clock; + line_count = (latency_ns / line_time_us + 1000) / 1000; + line_size = hdisplay * pixel_size; + + /* Use the minimum of the small and large buffer method for primary */ + small = ((clock * pixel_size / 1000) * latency_ns) / 1000; + large = line_count * line_size; + + entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); + *display_wm = entries + display->guard_size; + + /* + * Spec says: + * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2 + */ + *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2; + + /* calculate the self-refresh watermark for display cursor */ + entries = line_count * pixel_size * 64; + entries = DIV_ROUND_UP(entries, cursor->cacheline_size); + *cursor_wm = entries + cursor->guard_size; + + return ironlake_check_srwm(dev, level, + *fbc_wm, *display_wm, *cursor_wm, + display, cursor); +} + +static void ironlake_update_wm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int fbc_wm, plane_wm, cursor_wm; + unsigned int enabled; + + enabled = 0; + if (g4x_compute_wm0(dev, 0, + &ironlake_display_wm_info, + ILK_LP0_PLANE_LATENCY, + &ironlake_cursor_wm_info, + ILK_LP0_CURSOR_LATENCY, + &plane_wm, &cursor_wm)) { + I915_WRITE(WM0_PIPEA_ILK, + (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); + DRM_DEBUG_KMS("FIFO watermarks For pipe A -" + " plane %d, " "cursor: %d\n", + plane_wm, cursor_wm); + enabled |= 1; + } + + if (g4x_compute_wm0(dev, 1, + &ironlake_display_wm_info, + ILK_LP0_PLANE_LATENCY, + &ironlake_cursor_wm_info, + ILK_LP0_CURSOR_LATENCY, + &plane_wm, &cursor_wm)) { + I915_WRITE(WM0_PIPEB_ILK, + (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); + DRM_DEBUG_KMS("FIFO watermarks For pipe B -" + " plane %d, cursor: %d\n", + plane_wm, cursor_wm); + enabled |= 2; + } + + /* + * Calculate and update the self-refresh watermark only when one + * display plane is used. + */ + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + if (!single_plane_enabled(enabled)) + return; + enabled = ffs(enabled) - 1; + + /* WM1 */ + if (!ironlake_compute_srwm(dev, 1, enabled, + ILK_READ_WM1_LATENCY() * 500, + &ironlake_display_srwm_info, + &ironlake_cursor_srwm_info, + &fbc_wm, &plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM1_LP_ILK, + WM1_LP_SR_EN | + (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); + + /* WM2 */ + if (!ironlake_compute_srwm(dev, 2, enabled, + ILK_READ_WM2_LATENCY() * 500, + &ironlake_display_srwm_info, + &ironlake_cursor_srwm_info, + &fbc_wm, &plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM2_LP_ILK, + WM2_LP_EN | + (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); + + /* + * WM3 is unsupported on ILK, probably because we don't have latency + * data for that power state + */ +} + +static void sandybridge_update_wm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ + u32 val; + int fbc_wm, plane_wm, cursor_wm; + unsigned int enabled; + + enabled = 0; + if (g4x_compute_wm0(dev, 0, + &sandybridge_display_wm_info, latency, + &sandybridge_cursor_wm_info, latency, + &plane_wm, &cursor_wm)) { + val = I915_READ(WM0_PIPEA_ILK); + val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); + I915_WRITE(WM0_PIPEA_ILK, val | + ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); + DRM_DEBUG_KMS("FIFO watermarks For pipe A -" + " plane %d, " "cursor: %d\n", + plane_wm, cursor_wm); + enabled |= 1; + } + + if (g4x_compute_wm0(dev, 1, + &sandybridge_display_wm_info, latency, + &sandybridge_cursor_wm_info, latency, + &plane_wm, &cursor_wm)) { + val = I915_READ(WM0_PIPEB_ILK); + val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); + I915_WRITE(WM0_PIPEB_ILK, val | + ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); + DRM_DEBUG_KMS("FIFO watermarks For pipe B -" + " plane %d, cursor: %d\n", + plane_wm, cursor_wm); + enabled |= 2; + } + + if ((dev_priv->num_pipe == 3) && + g4x_compute_wm0(dev, 2, + &sandybridge_display_wm_info, latency, + &sandybridge_cursor_wm_info, latency, + &plane_wm, &cursor_wm)) { + val = I915_READ(WM0_PIPEC_IVB); + val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); + I915_WRITE(WM0_PIPEC_IVB, val | + ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); + DRM_DEBUG_KMS("FIFO watermarks For pipe C -" + " plane %d, cursor: %d\n", + plane_wm, cursor_wm); + enabled |= 3; + } + + /* + * Calculate and update the self-refresh watermark only when one + * display plane is used. + * + * SNB support 3 levels of watermark. + * + * WM1/WM2/WM2 watermarks have to be enabled in the ascending order, + * and disabled in the descending order + * + */ + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + if (!single_plane_enabled(enabled) || + dev_priv->sprite_scaling_enabled) + return; + enabled = ffs(enabled) - 1; + + /* WM1 */ + if (!ironlake_compute_srwm(dev, 1, enabled, + SNB_READ_WM1_LATENCY() * 500, + &sandybridge_display_srwm_info, + &sandybridge_cursor_srwm_info, + &fbc_wm, &plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM1_LP_ILK, + WM1_LP_SR_EN | + (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); + + /* WM2 */ + if (!ironlake_compute_srwm(dev, 2, enabled, + SNB_READ_WM2_LATENCY() * 500, + &sandybridge_display_srwm_info, + &sandybridge_cursor_srwm_info, + &fbc_wm, &plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM2_LP_ILK, + WM2_LP_EN | + (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); + + /* WM3 */ + if (!ironlake_compute_srwm(dev, 3, enabled, + SNB_READ_WM3_LATENCY() * 500, + &sandybridge_display_srwm_info, + &sandybridge_cursor_srwm_info, + &fbc_wm, &plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM3_LP_ILK, + WM3_LP_EN | + (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); +} + +static void +haswell_update_linetime_wm(struct drm_device *dev, int pipe, + struct drm_display_mode *mode) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 temp; + + temp = I915_READ(PIPE_WM_LINETIME(pipe)); + temp &= ~PIPE_WM_LINETIME_MASK; + + /* The WM are computed with base on how long it takes to fill a single + * row at the given clock rate, multiplied by 8. + * */ + temp |= PIPE_WM_LINETIME_TIME( + ((mode->crtc_hdisplay * 1000) / mode->clock) * 8); + + /* IPS watermarks are only used by pipe A, and are ignored by + * pipes B and C. They are calculated similarly to the common + * linetime values, except that we are using CD clock frequency + * in MHz instead of pixel rate for the division. + * + * This is a placeholder for the IPS watermark calculation code. + */ + + I915_WRITE(PIPE_WM_LINETIME(pipe), temp); +} + +static bool +sandybridge_compute_sprite_wm(struct drm_device *dev, int plane, + uint32_t sprite_width, int pixel_size, + const struct intel_watermark_params *display, + int display_latency_ns, int *sprite_wm) +{ + struct drm_crtc *crtc; + int clock; + int entries, tlb_miss; + + crtc = intel_get_crtc_for_plane(dev, plane); + if (crtc->fb == NULL || !crtc->enabled) { + *sprite_wm = display->guard_size; + return false; + } + + clock = crtc->mode.clock; + + /* Use the small buffer method to calculate the sprite watermark */ + entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; + tlb_miss = display->fifo_size*display->cacheline_size - + sprite_width * 8; + if (tlb_miss > 0) + entries += tlb_miss; + entries = DIV_ROUND_UP(entries, display->cacheline_size); + *sprite_wm = entries + display->guard_size; + if (*sprite_wm > (int)display->max_wm) + *sprite_wm = display->max_wm; + + return true; +} + +static bool +sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane, + uint32_t sprite_width, int pixel_size, + const struct intel_watermark_params *display, + int latency_ns, int *sprite_wm) +{ + struct drm_crtc *crtc; + unsigned long line_time_us; + int clock; + int line_count, line_size; + int small, large; + int entries; + + if (!latency_ns) { + *sprite_wm = 0; + return false; + } + + crtc = intel_get_crtc_for_plane(dev, plane); + clock = crtc->mode.clock; + if (!clock) { + *sprite_wm = 0; + return false; + } + + line_time_us = (sprite_width * 1000) / clock; + if (!line_time_us) { + *sprite_wm = 0; + return false; + } + + line_count = (latency_ns / line_time_us + 1000) / 1000; + line_size = sprite_width * pixel_size; + + /* Use the minimum of the small and large buffer method for primary */ + small = ((clock * pixel_size / 1000) * latency_ns) / 1000; + large = line_count * line_size; + + entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); + *sprite_wm = entries + display->guard_size; + + return *sprite_wm > 0x3ff ? false : true; +} + +static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, + uint32_t sprite_width, int pixel_size) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ + u32 val; + int sprite_wm, reg; + int ret; + + switch (pipe) { + case 0: + reg = WM0_PIPEA_ILK; + break; + case 1: + reg = WM0_PIPEB_ILK; + break; + case 2: + reg = WM0_PIPEC_IVB; + break; + default: + return; /* bad pipe */ + } + + ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size, + &sandybridge_display_wm_info, + latency, &sprite_wm); + if (!ret) { + DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n", + pipe); + return; + } + + val = I915_READ(reg); + val &= ~WM0_PIPE_SPRITE_MASK; + I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT)); + DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm); + + + ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, + pixel_size, + &sandybridge_display_srwm_info, + SNB_READ_WM1_LATENCY() * 500, + &sprite_wm); + if (!ret) { + DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n", + pipe); + return; + } + I915_WRITE(WM1S_LP_ILK, sprite_wm); + + /* Only IVB has two more LP watermarks for sprite */ + if (!IS_IVYBRIDGE(dev)) + return; + + ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, + pixel_size, + &sandybridge_display_srwm_info, + SNB_READ_WM2_LATENCY() * 500, + &sprite_wm); + if (!ret) { + DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n", + pipe); + return; + } + I915_WRITE(WM2S_LP_IVB, sprite_wm); + + ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, + pixel_size, + &sandybridge_display_srwm_info, + SNB_READ_WM3_LATENCY() * 500, + &sprite_wm); + if (!ret) { + DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n", + pipe); + return; + } + I915_WRITE(WM3S_LP_IVB, sprite_wm); +} + +/** + * intel_update_watermarks - update FIFO watermark values based on current modes + * + * Calculate watermark values for the various WM regs based on current mode + * and plane configuration. + * + * There are several cases to deal with here: + * - normal (i.e. non-self-refresh) + * - self-refresh (SR) mode + * - lines are large relative to FIFO size (buffer can hold up to 2) + * - lines are small relative to FIFO size (buffer can hold more than 2 + * lines), so need to account for TLB latency + * + * The normal calculation is: + * watermark = dotclock * bytes per pixel * latency + * where latency is platform & configuration dependent (we assume pessimal + * values here). + * + * The SR calculation is: + * watermark = (trunc(latency/line time)+1) * surface width * + * bytes per pixel + * where + * line time = htotal / dotclock + * surface width = hdisplay for normal plane and 64 for cursor + * and latency is assumed to be high, as above. + * + * The final value programmed to the register should always be rounded up, + * and include an extra 2 entries to account for clock crossings. + * + * We don't use the sprite, so we can ignore that. And on Crestline we have + * to set the non-SR watermarks to 8. + */ +void intel_update_watermarks(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->display.update_wm) + dev_priv->display.update_wm(dev); +} + +void intel_update_linetime_watermarks(struct drm_device *dev, + int pipe, struct drm_display_mode *mode) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->display.update_linetime_wm) + dev_priv->display.update_linetime_wm(dev, pipe, mode); +} + +void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, + uint32_t sprite_width, int pixel_size) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->display.update_sprite_wm) + dev_priv->display.update_sprite_wm(dev, pipe, sprite_width, + pixel_size); +} + +static struct drm_i915_gem_object * +intel_alloc_context_page(struct drm_device *dev) +{ + struct drm_i915_gem_object *ctx; + int ret; + + DRM_LOCK_ASSERT(dev); + + ctx = i915_gem_alloc_object(dev, 4096); + if (!ctx) { + DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); + return NULL; + } + + ret = i915_gem_object_pin(ctx, 4096, true); + if (ret) { + DRM_ERROR("failed to pin power context: %d\n", ret); + goto err_unref; + } + + ret = i915_gem_object_set_to_gtt_domain(ctx, 1); + if (ret) { + DRM_ERROR("failed to set-domain on power context: %d\n", ret); + goto err_unpin; + } + + return ctx; + +err_unpin: + i915_gem_object_unpin(ctx); +err_unref: + drm_gem_object_unreference(&ctx->base); + DRM_UNLOCK(dev); + return NULL; +} + +bool ironlake_set_drps(struct drm_device *dev, u8 val) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u16 rgvswctl; + + rgvswctl = I915_READ16(MEMSWCTL); + if (rgvswctl & MEMCTL_CMD_STS) { + DRM_DEBUG("gpu busy, RCS change rejected\n"); + return false; /* still busy with another command */ + } + + rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | + (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; + I915_WRITE16(MEMSWCTL, rgvswctl); + POSTING_READ16(MEMSWCTL); + + rgvswctl |= MEMCTL_CMD_STS; + I915_WRITE16(MEMSWCTL, rgvswctl); + + return true; +} + +void ironlake_enable_drps(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 rgvmodectl = I915_READ(MEMMODECTL); + u8 fmax, fmin, fstart, vstart; + + /* Enable temp reporting */ + I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); + I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); + + /* 100ms RC evaluation intervals */ + I915_WRITE(RCUPEI, 100000); + I915_WRITE(RCDNEI, 100000); + + /* Set max/min thresholds to 90ms and 80ms respectively */ + I915_WRITE(RCBMAXAVG, 90000); + I915_WRITE(RCBMINAVG, 80000); + + I915_WRITE(MEMIHYST, 1); + + /* Set up min, max, and cur for interrupt handling */ + fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; + fmin = (rgvmodectl & MEMMODE_FMIN_MASK); + fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> + MEMMODE_FSTART_SHIFT; + + vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> + PXVFREQ_PX_SHIFT; + + dev_priv->fmax = fmax; /* IPS callback will increase this */ + dev_priv->fstart = fstart; + + dev_priv->max_delay = fstart; + dev_priv->min_delay = fmin; + dev_priv->cur_delay = fstart; + + DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", + fmax, fmin, fstart); + + I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); + + /* + * Interrupts will be enabled in ironlake_irq_postinstall + */ + + I915_WRITE(VIDSTART, vstart); + POSTING_READ(VIDSTART); + + rgvmodectl |= MEMMODE_SWMODE_EN; + I915_WRITE(MEMMODECTL, rgvmodectl); + + if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) + DRM_ERROR("stuck trying to change perf mode\n"); + pause("915dsp", 1); + + ironlake_set_drps(dev, fstart); + + dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + + I915_READ(0x112e0); + dev_priv->last_time1 = jiffies_to_msecs(jiffies); + dev_priv->last_count2 = I915_READ(0x112f4); + nanotime(&dev_priv->last_time2); +} + +void ironlake_disable_drps(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u16 rgvswctl = I915_READ16(MEMSWCTL); + + /* Ack interrupts, disable EFC interrupt */ + I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); + I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG); + I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT); + I915_WRITE(DEIIR, DE_PCU_EVENT); + I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); + + /* Go back to the starting frequency */ + ironlake_set_drps(dev, dev_priv->fstart); + pause("915dsp", 1); + rgvswctl |= MEMCTL_CMD_STS; + I915_WRITE(MEMSWCTL, rgvswctl); + pause("915dsp", 1); + +} + +void gen6_set_rps(struct drm_device *dev, u8 val) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 swreq; + + swreq = (val & 0x3ff) << 25; + I915_WRITE(GEN6_RPNSWREQ, swreq); +} + +void gen6_disable_rps(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(GEN6_RPNSWREQ, 1 << 31); + I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); + I915_WRITE(GEN6_PMIER, 0); + /* Complete PM interrupt masking here doesn't race with the rps work + * item again unmasking PM interrupts because that is using a different + * register (PMIMR) to mask PM interrupts. The only risk is in leaving + * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ + + mtx_lock(&dev_priv->rps_lock); + dev_priv->pm_iir = 0; + mtx_unlock(&dev_priv->rps_lock); + + I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); +} + +int intel_enable_rc6(const struct drm_device *dev) +{ + /* + * Respect the kernel parameter if it is set + */ + if (i915_enable_rc6 >= 0) + return i915_enable_rc6; + + /* + * Disable RC6 on Ironlake + */ + if (INTEL_INFO(dev)->gen == 5) + return 0; + + /* Sorry Haswell, no RC6 for you for now. */ + if (IS_HASWELL(dev)) + return 0; + + /* + * Disable rc6 on Sandybridge + */ + if (INTEL_INFO(dev)->gen == 6) { + DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n"); + return INTEL_RC6_ENABLE; + } + DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n"); + return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); +} + +void gen6_enable_rps(struct drm_i915_private *dev_priv) +{ + struct intel_ring_buffer *ring; + u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); + u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); + u32 pcu_mbox, rc6_mask = 0; + u32 gtfifodbg; + int cur_freq, min_freq, max_freq; + int rc6_mode; + int i; + + /* Here begins a magic sequence of register writes to enable + * auto-downclocking. + * + * Perhaps there might be some value in exposing these to + * userspace... + */ + I915_WRITE(GEN6_RC_STATE, 0); + DRM_LOCK(dev_priv->dev); + + /* Clear the DBG now so we don't confuse earlier errors */ + if ((gtfifodbg = I915_READ(GTFIFODBG))) { + DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); + I915_WRITE(GTFIFODBG, gtfifodbg); + } + + gen6_gt_force_wake_get(dev_priv); + + /* disable the counters and set deterministic thresholds */ + I915_WRITE(GEN6_RC_CONTROL, 0); + + I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); + I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); + I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); + I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); + I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); + + for_each_ring(ring, dev_priv, i) + I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); + + I915_WRITE(GEN6_RC_SLEEP, 0); + I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); + I915_WRITE(GEN6_RC6_THRESHOLD, 50000); + I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); + I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ + + rc6_mode = intel_enable_rc6(dev_priv->dev); + if (rc6_mode & INTEL_RC6_ENABLE) + rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; + + if (rc6_mode & INTEL_RC6p_ENABLE) + rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; + + if (rc6_mode & INTEL_RC6pp_ENABLE) + rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; + + DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", + (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off", + (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off", + (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off"); + + I915_WRITE(GEN6_RC_CONTROL, + rc6_mask | + GEN6_RC_CTL_EI_MODE(1) | + GEN6_RC_CTL_HW_ENABLE); + + I915_WRITE(GEN6_RPNSWREQ, + GEN6_FREQUENCY(10) | + GEN6_OFFSET(0) | + GEN6_AGGRESSIVE_TURBO); + I915_WRITE(GEN6_RC_VIDEO_FREQ, + GEN6_FREQUENCY(12)); + + I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); + I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, + 18 << 24 | + 6 << 16); + I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); + I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); + I915_WRITE(GEN6_RP_UP_EI, 100000); + I915_WRITE(GEN6_RP_DOWN_EI, 5000000); + I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); + I915_WRITE(GEN6_RP_CONTROL, + GEN6_RP_MEDIA_TURBO | + GEN6_RP_MEDIA_HW_MODE | + GEN6_RP_MEDIA_IS_GFX | + GEN6_RP_ENABLE | + GEN6_RP_UP_BUSY_AVG | + GEN6_RP_DOWN_IDLE_CONT); + + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, + 500)) + DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); + + I915_WRITE(GEN6_PCODE_DATA, 0); + I915_WRITE(GEN6_PCODE_MAILBOX, + GEN6_PCODE_READY | + GEN6_PCODE_WRITE_MIN_FREQ_TABLE); + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, + 500)) + DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); + + min_freq = (rp_state_cap & 0xff0000) >> 16; + max_freq = rp_state_cap & 0xff; + cur_freq = (gt_perf_status & 0xff00) >> 8; + + /* Check for overclock support */ + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, + 500)) + DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); + I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS); + pcu_mbox = I915_READ(GEN6_PCODE_DATA); + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, + 500)) + DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); + if (pcu_mbox & (1<<31)) { /* OC supported */ + max_freq = pcu_mbox & 0xff; + DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); + } + + /* In units of 100MHz */ + dev_priv->max_delay = max_freq; + dev_priv->min_delay = min_freq; + dev_priv->cur_delay = cur_freq; + + /* requires MSI enabled */ + I915_WRITE(GEN6_PMIER, + GEN6_PM_MBOX_EVENT | + GEN6_PM_THERMAL_EVENT | + GEN6_PM_RP_DOWN_TIMEOUT | + GEN6_PM_RP_UP_THRESHOLD | + GEN6_PM_RP_DOWN_THRESHOLD | + GEN6_PM_RP_UP_EI_EXPIRED | + GEN6_PM_RP_DOWN_EI_EXPIRED); + mtx_lock(&dev_priv->rps_lock); + if (dev_priv->pm_iir != 0) + printf("KMS: pm_iir %x\n", dev_priv->pm_iir); + I915_WRITE(GEN6_PMIMR, 0); + mtx_unlock(&dev_priv->rps_lock); + /* enable all PM interrupts */ + I915_WRITE(GEN6_PMINTRMSK, 0); + + gen6_gt_force_wake_put(dev_priv); + DRM_UNLOCK(dev_priv->dev); +} + +void gen6_update_ring_freq(struct drm_i915_private *dev_priv) +{ + int min_freq = 15; + int gpu_freq, ia_freq, max_ia_freq; + int scaling_factor = 180; + uint64_t tsc_freq; + +#if 0 + max_ia_freq = cpufreq_quick_get_max(0); + /* + * Default to measured freq if none found, PCU will ensure we don't go + * over + */ + if (!max_ia_freq) + max_ia_freq = tsc_khz; + + /* Convert from kHz to MHz */ + max_ia_freq /= 1000; +#else + tsc_freq = atomic_load_acq_64(&tsc_freq); + max_ia_freq = tsc_freq / 1000 / 1000; +#endif + + DRM_LOCK(dev_priv->dev); + + /* + * For each potential GPU frequency, load a ring frequency we'd like + * to use for memory access. We do this by specifying the IA frequency + * the PCU should use as a reference to determine the ring frequency. + */ + for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay; + gpu_freq--) { + int diff = dev_priv->max_delay - gpu_freq; + int d; + + /* + * For GPU frequencies less than 750MHz, just use the lowest + * ring freq. + */ + if (gpu_freq < min_freq) + ia_freq = 800; + else + ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); + d = 100; + ia_freq = (ia_freq + d / 2) / d; + + I915_WRITE(GEN6_PCODE_DATA, + (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) | + gpu_freq); + I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | + GEN6_PCODE_WRITE_MIN_FREQ_TABLE); + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & + GEN6_PCODE_READY) == 0, 10)) { + DRM_ERROR("pcode write of freq table timed out\n"); + continue; + } + } + + DRM_UNLOCK(dev_priv->dev); +} + +static void ironlake_teardown_rc6(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->renderctx) { + i915_gem_object_unpin(dev_priv->renderctx); + drm_gem_object_unreference(&dev_priv->renderctx->base); + dev_priv->renderctx = NULL; + } + + if (dev_priv->pwrctx) { + i915_gem_object_unpin(dev_priv->pwrctx); + drm_gem_object_unreference(&dev_priv->pwrctx->base); + dev_priv->pwrctx = NULL; + } +} + +void ironlake_disable_rc6(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (I915_READ(PWRCTXA)) { + /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ + I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); + wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), + 50); + + I915_WRITE(PWRCTXA, 0); + POSTING_READ(PWRCTXA); + + I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); + POSTING_READ(RSTDBYCTL); + } + + ironlake_teardown_rc6(dev); +} + +static int ironlake_setup_rc6(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->renderctx == NULL) + dev_priv->renderctx = intel_alloc_context_page(dev); + if (!dev_priv->renderctx) + return -ENOMEM; + + if (dev_priv->pwrctx == NULL) + dev_priv->pwrctx = intel_alloc_context_page(dev); + if (!dev_priv->pwrctx) { + ironlake_teardown_rc6(dev); + return -ENOMEM; + } + + return 0; +} + +void ironlake_enable_rc6(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->rings[RCS]; + int ret; + + /* rc6 disabled by default due to repeated reports of hanging during + * boot and resume. + */ + if (!intel_enable_rc6(dev)) + return; + + DRM_LOCK(dev); + ret = ironlake_setup_rc6(dev); + if (ret) { + DRM_UNLOCK(dev); + return; + } + + /* + * GPU can automatically power down the render unit if given a page + * to save state. + */ + ret = intel_ring_begin(ring, 6); + if (ret) { + ironlake_teardown_rc6(dev); + DRM_UNLOCK(dev); + return; + } + + intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); + intel_ring_emit(ring, MI_SET_CONTEXT); + intel_ring_emit(ring, dev_priv->renderctx->gtt_offset | + MI_MM_SPACE_GTT | + MI_SAVE_EXT_STATE_EN | + MI_RESTORE_EXT_STATE_EN | + MI_RESTORE_INHIBIT); + intel_ring_emit(ring, MI_SUSPEND_FLUSH); + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_FLUSH); + intel_ring_advance(ring); + + /* + * Wait for the command parser to advance past MI_SET_CONTEXT. The HW + * does an implicit flush, combined with MI_FLUSH above, it should be + * safe to assume that renderctx is valid + */ + ret = intel_wait_ring_idle(ring); + if (ret) { + DRM_ERROR("failed to enable ironlake power power savings\n"); + ironlake_teardown_rc6(dev); + DRM_UNLOCK(dev); + return; + } + + I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); + I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); + DRM_UNLOCK(dev); +} + +static unsigned long intel_pxfreq(u32 vidfreq) +{ + unsigned long freq; + int div = (vidfreq & 0x3f0000) >> 16; + int post = (vidfreq & 0x3000) >> 12; + int pre = (vidfreq & 0x7); + + if (!pre) + return 0; + + freq = ((div * 133333) / ((1<last_time1; + /* + * sysctl(8) reads the value of sysctl twice in rapid + * succession. There is high chance that it happens in the + * same timer tick. Use the cached value to not divide by + * zero and give the hw a chance to gather more samples. + */ + if (diff1 <= 10) + return (dev_priv->chipset_power); + + count1 = I915_READ(DMIEC); + count2 = I915_READ(DDREC); + count3 = I915_READ(CSIEC); + + total_count = count1 + count2 + count3; + + /* FIXME: handle per-counter overflow */ + if (total_count < dev_priv->last_count1) { + diff = ~0UL - dev_priv->last_count1; + diff += total_count; + } else { + diff = total_count - dev_priv->last_count1; + } + + for (i = 0; i < DRM_ARRAY_SIZE(cparams); i++) { + if (cparams[i].i == dev_priv->c_m && + cparams[i].t == dev_priv->r_t) { + m = cparams[i].m; + c = cparams[i].c; + break; + } + } + + diff = diff / diff1; + ret = ((m * diff) + c); + ret = ret / 10; + + dev_priv->last_count1 = total_count; + dev_priv->last_time1 = now; + + dev_priv->chipset_power = ret; + return (ret); +} + +unsigned long i915_mch_val(struct drm_i915_private *dev_priv) +{ + unsigned long m, x, b; + u32 tsfs; + + tsfs = I915_READ(TSFS); + + m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); + x = I915_READ8(I915_TR1); + + b = tsfs & TSFS_INTR_MASK; + + return ((m * x) / 127) - b; +} + +static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) +{ + static const struct v_table { + u16 vd; /* in .1 mil */ + u16 vm; /* in .1 mil */ + } v_table[] = { + { 0, 0, }, + { 375, 0, }, + { 500, 0, }, + { 625, 0, }, + { 750, 0, }, + { 875, 0, }, + { 1000, 0, }, + { 1125, 0, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4250, 3125, }, + { 4375, 3250, }, + { 4500, 3375, }, + { 4625, 3500, }, + { 4750, 3625, }, + { 4875, 3750, }, + { 5000, 3875, }, + { 5125, 4000, }, + { 5250, 4125, }, + { 5375, 4250, }, + { 5500, 4375, }, + { 5625, 4500, }, + { 5750, 4625, }, + { 5875, 4750, }, + { 6000, 4875, }, + { 6125, 5000, }, + { 6250, 5125, }, + { 6375, 5250, }, + { 6500, 5375, }, + { 6625, 5500, }, + { 6750, 5625, }, + { 6875, 5750, }, + { 7000, 5875, }, + { 7125, 6000, }, + { 7250, 6125, }, + { 7375, 6250, }, + { 7500, 6375, }, + { 7625, 6500, }, + { 7750, 6625, }, + { 7875, 6750, }, + { 8000, 6875, }, + { 8125, 7000, }, + { 8250, 7125, }, + { 8375, 7250, }, + { 8500, 7375, }, + { 8625, 7500, }, + { 8750, 7625, }, + { 8875, 7750, }, + { 9000, 7875, }, + { 9125, 8000, }, + { 9250, 8125, }, + { 9375, 8250, }, + { 9500, 8375, }, + { 9625, 8500, }, + { 9750, 8625, }, + { 9875, 8750, }, + { 10000, 8875, }, + { 10125, 9000, }, + { 10250, 9125, }, + { 10375, 9250, }, + { 10500, 9375, }, + { 10625, 9500, }, + { 10750, 9625, }, + { 10875, 9750, }, + { 11000, 9875, }, + { 11125, 10000, }, + { 11250, 10125, }, + { 11375, 10250, }, + { 11500, 10375, }, + { 11625, 10500, }, + { 11750, 10625, }, + { 11875, 10750, }, + { 12000, 10875, }, + { 12125, 11000, }, + { 12250, 11125, }, + { 12375, 11250, }, + { 12500, 11375, }, + { 12625, 11500, }, + { 12750, 11625, }, + { 12875, 11750, }, + { 13000, 11875, }, + { 13125, 12000, }, + { 13250, 12125, }, + { 13375, 12250, }, + { 13500, 12375, }, + { 13625, 12500, }, + { 13750, 12625, }, + { 13875, 12750, }, + { 14000, 12875, }, + { 14125, 13000, }, + { 14250, 13125, }, + { 14375, 13250, }, + { 14500, 13375, }, + { 14625, 13500, }, + { 14750, 13625, }, + { 14875, 13750, }, + { 15000, 13875, }, + { 15125, 14000, }, + { 15250, 14125, }, + { 15375, 14250, }, + { 15500, 14375, }, + { 15625, 14500, }, + { 15750, 14625, }, + { 15875, 14750, }, + { 16000, 14875, }, + { 16125, 15000, }, + }; + if (dev_priv->info->is_mobile) + return v_table[pxvid].vm; + else + return v_table[pxvid].vd; +} + +void i915_update_gfx_val(struct drm_i915_private *dev_priv) +{ + struct timespec now, diff1; + u64 diff; + unsigned long diffms; + u32 count; + + if (dev_priv->info->gen != 5) + return; + + nanotime(&now); + diff1 = now; + timespecsub(&diff1, &dev_priv->last_time2); + + /* Don't divide by 0 */ + diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; + if (!diffms) + return; + + count = I915_READ(GFXEC); + + if (count < dev_priv->last_count2) { + diff = ~0UL - dev_priv->last_count2; + diff += count; + } else { + diff = count - dev_priv->last_count2; + } + + dev_priv->last_count2 = count; + dev_priv->last_time2 = now; + + /* More magic constants... */ + diff = diff * 1181; + diff = diff / (diffms * 10); + dev_priv->gfx_power = diff; +} + +unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) +{ + unsigned long t, corr, state1, corr2, state2; + u32 pxvid, ext_v; + + pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4)); + pxvid = (pxvid >> 24) & 0x7f; + ext_v = pvid_to_extvid(dev_priv, pxvid); + + state1 = ext_v; + + t = i915_mch_val(dev_priv); + + /* Revel in the empirically derived constants */ + + /* Correction factor in 1/100000 units */ + if (t > 80) + corr = ((t * 2349) + 135940); + else if (t >= 50) + corr = ((t * 964) + 29317); + else /* < 50 */ + corr = ((t * 301) + 1004); + + corr = corr * ((150142 * state1) / 10000 - 78642); + corr /= 100000; + corr2 = (corr * dev_priv->corr); + + state2 = (corr2 * state1) / 10000; + state2 /= 100; /* convert to mW */ + + i915_update_gfx_val(dev_priv); + + return dev_priv->gfx_power + state2; +} + +/** + * i915_read_mch_val - return value for IPS use + * + * Calculate and return a value for the IPS driver to use when deciding whether + * we have thermal and power headroom to increase CPU or GPU power budget. + */ +unsigned long i915_read_mch_val(void) +{ + struct drm_i915_private *dev_priv; + unsigned long chipset_val, graphics_val, ret = 0; + + mtx_lock(&mchdev_lock); + if (!i915_mch_dev) + goto out_unlock; + dev_priv = i915_mch_dev; + + chipset_val = i915_chipset_val(dev_priv); + graphics_val = i915_gfx_val(dev_priv); + + ret = chipset_val + graphics_val; + +out_unlock: + mtx_unlock(&mchdev_lock); + + return ret; +} + +/** + * i915_gpu_raise - raise GPU frequency limit + * + * Raise the limit; IPS indicates we have thermal headroom. + */ +bool i915_gpu_raise(void) +{ + struct drm_i915_private *dev_priv; + bool ret = true; + + mtx_lock(&mchdev_lock); + if (!i915_mch_dev) { + ret = false; + goto out_unlock; + } + dev_priv = i915_mch_dev; + + if (dev_priv->max_delay > dev_priv->fmax) + dev_priv->max_delay--; + +out_unlock: + mtx_unlock(&mchdev_lock); + + return ret; +} + +/** + * i915_gpu_lower - lower GPU frequency limit + * + * IPS indicates we're close to a thermal limit, so throttle back the GPU + * frequency maximum. + */ +bool i915_gpu_lower(void) +{ + struct drm_i915_private *dev_priv; + bool ret = true; + + mtx_lock(&mchdev_lock); + if (!i915_mch_dev) { + ret = false; + goto out_unlock; + } + dev_priv = i915_mch_dev; + + if (dev_priv->max_delay < dev_priv->min_delay) + dev_priv->max_delay++; + +out_unlock: + mtx_unlock(&mchdev_lock); + + return ret; +} + +/** + * i915_gpu_busy - indicate GPU business to IPS + * + * Tell the IPS driver whether or not the GPU is busy. + */ +bool i915_gpu_busy(void) +{ + struct drm_i915_private *dev_priv; + bool ret = false; + + mtx_lock(&mchdev_lock); + if (!i915_mch_dev) + goto out_unlock; + dev_priv = i915_mch_dev; + + ret = dev_priv->busy; + +out_unlock: + mtx_unlock(&mchdev_lock); + + return ret; +} + +/** + * i915_gpu_turbo_disable - disable graphics turbo + * + * Disable graphics turbo by resetting the max frequency and setting the + * current frequency to the default. + */ +bool i915_gpu_turbo_disable(void) +{ + struct drm_i915_private *dev_priv; + bool ret = true; + + mtx_lock(&mchdev_lock); + if (!i915_mch_dev) { + ret = false; + goto out_unlock; + } + dev_priv = i915_mch_dev; + + dev_priv->max_delay = dev_priv->fstart; + + if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart)) + ret = false; + +out_unlock: + mtx_unlock(&mchdev_lock); + + return ret; +} + +void intel_gpu_ips_init(struct drm_i915_private *dev_priv) +{ + mtx_lock(&mchdev_lock); + i915_mch_dev = dev_priv; + dev_priv->mchdev_lock = &mchdev_lock; + mtx_unlock(&mchdev_lock); + +#if 0 + ips_ping_for_i915_load(); +#endif +} + +void intel_gpu_ips_teardown(void) +{ + mtx_lock(&mchdev_lock); + i915_mch_dev = NULL; + mtx_unlock(&mchdev_lock); +} + +void intel_init_emon(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 lcfuse; + u8 pxw[16]; + int i; + + /* Disable to program */ + I915_WRITE(ECR, 0); + POSTING_READ(ECR); + + /* Program energy weights for various events */ + I915_WRITE(SDEW, 0x15040d00); + I915_WRITE(CSIEW0, 0x007f0000); + I915_WRITE(CSIEW1, 0x1e220004); + I915_WRITE(CSIEW2, 0x04000004); + + for (i = 0; i < 5; i++) + I915_WRITE(PEW + (i * 4), 0); + for (i = 0; i < 3; i++) + I915_WRITE(DEW + (i * 4), 0); + + /* Program P-state weights to account for frequency power adjustment */ + for (i = 0; i < 16; i++) { + u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4)); + unsigned long freq = intel_pxfreq(pxvidfreq); + unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> + PXVFREQ_PX_SHIFT; + unsigned long val; + + val = vid * vid; + val *= (freq / 1000); + val *= 255; + val /= (127*127*900); + if (val > 0xff) + DRM_ERROR("bad pxval: %ld\n", val); + pxw[i] = val; + } + /* Render standby states get 0 weight */ + pxw[14] = 0; + pxw[15] = 0; + + for (i = 0; i < 4; i++) { + u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | + (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); + I915_WRITE(PXW + (i * 4), val); + } + + /* Adjust magic regs to magic values (more experimental results) */ + I915_WRITE(OGW0, 0); + I915_WRITE(OGW1, 0); + I915_WRITE(EG0, 0x00007f00); + I915_WRITE(EG1, 0x0000000e); + I915_WRITE(EG2, 0x000e0000); + I915_WRITE(EG3, 0x68000300); + I915_WRITE(EG4, 0x42000000); + I915_WRITE(EG5, 0x00140031); + I915_WRITE(EG6, 0); + I915_WRITE(EG7, 0); + + for (i = 0; i < 8; i++) + I915_WRITE(PXWL + (i * 4), 0); + + /* Enable PMON + select events */ + I915_WRITE(ECR, 0x80000019); + + lcfuse = I915_READ(LCFUSE02); + + dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); +} + +static void ironlake_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; + + /* Required for FBC */ + dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | + DPFCRUNIT_CLOCK_GATE_DISABLE | + DPFDUNIT_CLOCK_GATE_DISABLE; + /* Required for CxSR */ + dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; + + I915_WRITE(PCH_3DCGDIS0, + MARIUNIT_CLOCK_GATE_DISABLE | + SVSMUNIT_CLOCK_GATE_DISABLE); + I915_WRITE(PCH_3DCGDIS1, + VFMUNIT_CLOCK_GATE_DISABLE); + + I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); + + /* + * According to the spec the following bits should be set in + * order to enable memory self-refresh + * The bit 22/21 of 0x42004 + * The bit 5 of 0x42020 + * The bit 15 of 0x45000 + */ + I915_WRITE(ILK_DISPLAY_CHICKEN2, + (I915_READ(ILK_DISPLAY_CHICKEN2) | + ILK_DPARB_GATE | ILK_VSDPFD_FULL)); + I915_WRITE(ILK_DSPCLK_GATE, + (I915_READ(ILK_DSPCLK_GATE) | + ILK_DPARB_CLK_GATE)); + I915_WRITE(DISP_ARB_CTL, + (I915_READ(DISP_ARB_CTL) | + DISP_FBC_WM_DIS)); + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + /* + * Based on the document from hardware guys the following bits + * should be set unconditionally in order to enable FBC. + * The bit 22 of 0x42000 + * The bit 22 of 0x42004 + * The bit 7,8,9 of 0x42020. + */ + if (IS_IRONLAKE_M(dev)) { + I915_WRITE(ILK_DISPLAY_CHICKEN1, + I915_READ(ILK_DISPLAY_CHICKEN1) | + ILK_FBCQ_DIS); + I915_WRITE(ILK_DISPLAY_CHICKEN2, + I915_READ(ILK_DISPLAY_CHICKEN2) | + ILK_DPARB_GATE); + I915_WRITE(ILK_DSPCLK_GATE, + I915_READ(ILK_DSPCLK_GATE) | + ILK_DPFC_DIS1 | + ILK_DPFC_DIS2 | + ILK_CLK_FBC); + } + + I915_WRITE(ILK_DISPLAY_CHICKEN2, + I915_READ(ILK_DISPLAY_CHICKEN2) | + ILK_ELPIN_409_SELECT); + I915_WRITE(_3D_CHICKEN2, + _3D_CHICKEN2_WM_READ_PIPELINED << 16 | + _3D_CHICKEN2_WM_READ_PIPELINED); +} + +static void gen6_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe; + uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; + + I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); + + I915_WRITE(ILK_DISPLAY_CHICKEN2, + I915_READ(ILK_DISPLAY_CHICKEN2) | + ILK_ELPIN_409_SELECT); + + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + I915_WRITE(CACHE_MODE_0, + _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); + + I915_WRITE(GEN6_UCGCTL1, + I915_READ(GEN6_UCGCTL1) | + GEN6_BLBUNIT_CLOCK_GATE_DISABLE | + GEN6_CSUNIT_CLOCK_GATE_DISABLE); + + /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock + * gating disable must be set. Failure to set it results in + * flickering pixels due to Z write ordering failures after + * some amount of runtime in the Mesa "fire" demo, and Unigine + * Sanctuary and Tropics, and apparently anything else with + * alpha test or pixel discard. + * + * According to the spec, bit 11 (RCCUNIT) must also be set, + * but we didn't debug actual testcases to find it out. + */ + I915_WRITE(GEN6_UCGCTL2, + GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | + GEN6_RCCUNIT_CLOCK_GATE_DISABLE); + + /* Bspec says we need to always set all mask bits. */ + I915_WRITE(_3D_CHICKEN, (0xFFFF << 16) | + _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL); + + /* + * According to the spec the following bits should be + * set in order to enable memory self-refresh and fbc: + * The bit21 and bit22 of 0x42000 + * The bit21 and bit22 of 0x42004 + * The bit5 and bit7 of 0x42020 + * The bit14 of 0x70180 + * The bit14 of 0x71180 + */ + I915_WRITE(ILK_DISPLAY_CHICKEN1, + I915_READ(ILK_DISPLAY_CHICKEN1) | + ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); + I915_WRITE(ILK_DISPLAY_CHICKEN2, + I915_READ(ILK_DISPLAY_CHICKEN2) | + ILK_DPARB_GATE | ILK_VSDPFD_FULL); + I915_WRITE(ILK_DSPCLK_GATE, + I915_READ(ILK_DSPCLK_GATE) | + ILK_DPARB_CLK_GATE | + ILK_DPFD_CLK_GATE); + + for_each_pipe(pipe) { + I915_WRITE(DSPCNTR(pipe), + I915_READ(DSPCNTR(pipe)) | + DISPPLANE_TRICKLE_FEED_DISABLE); + intel_flush_display_plane(dev_priv, pipe); + } +} + +static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) +{ + uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE); + + reg &= ~GEN7_FF_SCHED_MASK; + reg |= GEN7_FF_TS_SCHED_HW; + reg |= GEN7_FF_VS_SCHED_HW; + reg |= GEN7_FF_DS_SCHED_HW; + + I915_WRITE(GEN7_FF_THREAD_MODE, reg); +} + +static void ivybridge_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe; + uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; + + I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); + + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + /* According to the spec, bit 13 (RCZUNIT) must be set on IVB. + * This implements the WaDisableRCZUnitClockGating workaround. + */ + I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); + + I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); + + I915_WRITE(IVB_CHICKEN3, + CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | + CHICKEN3_DGMG_DONE_FIX_DISABLE); + + /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ + I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, + GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); + + /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ + I915_WRITE(GEN7_L3CNTLREG1, + GEN7_WA_FOR_GEN7_L3_CONTROL); + I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, + GEN7_WA_L3_CHICKEN_MODE); + + /* This is required by WaCatErrorRejectionIssue */ + I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, + I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | + GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); + + for_each_pipe(pipe) { + I915_WRITE(DSPCNTR(pipe), + I915_READ(DSPCNTR(pipe)) | + DISPPLANE_TRICKLE_FEED_DISABLE); + intel_flush_display_plane(dev_priv, pipe); + } + + gen7_setup_fixed_func_scheduler(dev_priv); + + /* WaDisable4x2SubspanOptimization */ + I915_WRITE(CACHE_MODE_1, + _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); +} + +static void valleyview_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe; + uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; + + I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); + + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + /* According to the spec, bit 13 (RCZUNIT) must be set on IVB. + * This implements the WaDisableRCZUnitClockGating workaround. + */ + I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); + + I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); + + I915_WRITE(IVB_CHICKEN3, + CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | + CHICKEN3_DGMG_DONE_FIX_DISABLE); + + /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ + I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, + GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); + + /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ + I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL); + I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE); + + /* This is required by WaCatErrorRejectionIssue */ + I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, + I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | + GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); + + for_each_pipe(pipe) { + I915_WRITE(DSPCNTR(pipe), + I915_READ(DSPCNTR(pipe)) | + DISPPLANE_TRICKLE_FEED_DISABLE); + intel_flush_display_plane(dev_priv, pipe); + } + + I915_WRITE(CACHE_MODE_1, + _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); +} + +static void g4x_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dspclk_gate; + + I915_WRITE(RENCLK_GATE_D1, 0); + I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | + GS_UNIT_CLOCK_GATE_DISABLE | + CL_UNIT_CLOCK_GATE_DISABLE); + I915_WRITE(RAMCLK_GATE_D, 0); + dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | + OVRUNIT_CLOCK_GATE_DISABLE | + OVCUNIT_CLOCK_GATE_DISABLE; + if (IS_GM45(dev)) + dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; + I915_WRITE(DSPCLK_GATE_D, dspclk_gate); +} + +static void crestline_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); + I915_WRITE(RENCLK_GATE_D2, 0); + I915_WRITE(DSPCLK_GATE_D, 0); + I915_WRITE(RAMCLK_GATE_D, 0); + I915_WRITE16(DEUC, 0); +} + +static void broadwater_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | + I965_RCC_CLOCK_GATE_DISABLE | + I965_RCPB_CLOCK_GATE_DISABLE | + I965_ISC_CLOCK_GATE_DISABLE | + I965_FBC_CLOCK_GATE_DISABLE); + I915_WRITE(RENCLK_GATE_D2, 0); +} + +static void gen3_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 dstate = I915_READ(D_STATE); + + dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | + DSTATE_DOT_CLOCK_GATING; + I915_WRITE(D_STATE, dstate); + + if (IS_PINEVIEW(dev)) + I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY)); +} + +static void i85x_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); +} + +static void i830_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); +} + +static void ibx_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* + * On Ibex Peak and Cougar Point, we need to disable clock + * gating for the panel power sequencer or it will fail to + * start up when no ports are active. + */ + I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); +} + +static void cpt_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe; + + /* + * On Ibex Peak and Cougar Point, we need to disable clock + * gating for the panel power sequencer or it will fail to + * start up when no ports are active. + */ + I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); + I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | + DPLS_EDP_PPS_FIX_DIS); + /* Without this, mode sets may fail silently on FDI */ + for_each_pipe(pipe) + I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS); +} + +void intel_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + dev_priv->display.init_clock_gating(dev); + + if (dev_priv->display.init_pch_clock_gating) + dev_priv->display.init_pch_clock_gating(dev); +} + +static void gen6_sanitize_pm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 limits, delay, old; + + gen6_gt_force_wake_get(dev_priv); + + old = limits = I915_READ(GEN6_RP_INTERRUPT_LIMITS); + /* Make sure we continue to get interrupts + * until we hit the minimum or maximum frequencies. + */ + limits &= ~(0x3f << 16 | 0x3f << 24); + delay = dev_priv->cur_delay; + if (delay < dev_priv->max_delay) + limits |= (dev_priv->max_delay & 0x3f) << 24; + if (delay > dev_priv->min_delay) + limits |= (dev_priv->min_delay & 0x3f) << 16; + + if (old != limits) { + DRM_ERROR("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS expected %08x, was %08x\n", + limits, old); + I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits); + } + + gen6_gt_force_wake_put(dev_priv); +} + +void intel_sanitize_pm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->display.sanitize_pm) + dev_priv->display.sanitize_pm(dev); +} + +/* Starting with Haswell, we have different power wells for + * different parts of the GPU. This attempts to enable them all. + */ +static void intel_init_power_wells(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long power_wells[] = { + HSW_PWR_WELL_CTL1, + HSW_PWR_WELL_CTL2, + HSW_PWR_WELL_CTL4 + }; + int i; + + if (!IS_HASWELL(dev)) + return; + + DRM_LOCK(dev); + + for (i = 0; i < DRM_ARRAY_SIZE(power_wells); i++) { + int well = I915_READ(power_wells[i]); + + if ((well & HSW_PWR_WELL_STATE) == 0) { + I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE); + if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20)) + DRM_ERROR("Error enabling power well %lx\n", power_wells[i]); + } + } + +printf("XXXKIB HACK: HSW RC OFF\n"); + I915_WRITE(GEN6_RC_STATE, 0); + I915_WRITE(GEN6_RC_CONTROL, 0); + DRM_UNLOCK(dev); +} + +/* Set up chip specific power management-related functions */ +void intel_init_pm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (I915_HAS_FBC(dev)) { + if (HAS_PCH_SPLIT(dev)) { + dev_priv->display.fbc_enabled = ironlake_fbc_enabled; + dev_priv->display.enable_fbc = ironlake_enable_fbc; + dev_priv->display.disable_fbc = ironlake_disable_fbc; + } else if (IS_GM45(dev)) { + dev_priv->display.fbc_enabled = g4x_fbc_enabled; + dev_priv->display.enable_fbc = g4x_enable_fbc; + dev_priv->display.disable_fbc = g4x_disable_fbc; + } else if (IS_CRESTLINE(dev)) { + dev_priv->display.fbc_enabled = i8xx_fbc_enabled; + dev_priv->display.enable_fbc = i8xx_enable_fbc; + dev_priv->display.disable_fbc = i8xx_disable_fbc; + } + /* 855GM needs testing */ + } + + /* For cxsr */ + if (IS_PINEVIEW(dev)) + i915_pineview_get_mem_freq(dev); + else if (IS_GEN5(dev)) + i915_ironlake_get_mem_freq(dev); + + /* For FIFO watermark updates */ + if (HAS_PCH_SPLIT(dev)) { + dev_priv->display.force_wake_get = __gen6_gt_force_wake_get; + dev_priv->display.force_wake_put = __gen6_gt_force_wake_put; + + /* IVB configs may use multi-threaded forcewake */ + if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { + u32 ecobus; + + /* A small trick here - if the bios hasn't configured MT forcewake, + * and if the device is in RC6, then force_wake_mt_get will not wake + * the device and the ECOBUS read will return zero. Which will be + * (correctly) interpreted by the test below as MT forcewake being + * disabled. + */ + DRM_LOCK(dev); + __gen6_gt_force_wake_mt_get(dev_priv); + ecobus = I915_READ_NOTRACE(ECOBUS); + __gen6_gt_force_wake_mt_put(dev_priv); + DRM_UNLOCK(dev); + + if (ecobus & FORCEWAKE_MT_ENABLE) { + DRM_DEBUG_KMS("Using MT version of forcewake\n"); + dev_priv->display.force_wake_get = + __gen6_gt_force_wake_mt_get; + dev_priv->display.force_wake_put = + __gen6_gt_force_wake_mt_put; + } + } + + if (HAS_PCH_IBX(dev)) + dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating; + else if (HAS_PCH_CPT(dev)) + dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating; + + if (IS_GEN5(dev)) { + if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) + dev_priv->display.update_wm = ironlake_update_wm; + else { + DRM_DEBUG_KMS("Failed to get proper latency. " + "Disable CxSR\n"); + dev_priv->display.update_wm = NULL; + } + dev_priv->display.init_clock_gating = ironlake_init_clock_gating; + } else if (IS_GEN6(dev)) { + if (SNB_READ_WM0_LATENCY()) { + dev_priv->display.update_wm = sandybridge_update_wm; + dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; + } else { + DRM_DEBUG_KMS("Failed to read display plane latency. " + "Disable CxSR\n"); + dev_priv->display.update_wm = NULL; + } + dev_priv->display.init_clock_gating = gen6_init_clock_gating; + dev_priv->display.sanitize_pm = gen6_sanitize_pm; + } else if (IS_IVYBRIDGE(dev)) { + /* FIXME: detect B0+ stepping and use auto training */ + if (SNB_READ_WM0_LATENCY()) { + dev_priv->display.update_wm = sandybridge_update_wm; + dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; + } else { + DRM_DEBUG_KMS("Failed to read display plane latency. " + "Disable CxSR\n"); + dev_priv->display.update_wm = NULL; + } + dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; + dev_priv->display.sanitize_pm = gen6_sanitize_pm; + } else if (IS_HASWELL(dev)) { + if (SNB_READ_WM0_LATENCY()) { + dev_priv->display.update_wm = sandybridge_update_wm; + dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; + dev_priv->display.update_linetime_wm = haswell_update_linetime_wm; + } else { + DRM_DEBUG_KMS("Failed to read display plane latency. " + "Disable CxSR\n"); + dev_priv->display.update_wm = NULL; + } + dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; + dev_priv->display.sanitize_pm = gen6_sanitize_pm; + } else + dev_priv->display.update_wm = NULL; + } else if (IS_VALLEYVIEW(dev)) { + dev_priv->display.update_wm = valleyview_update_wm; + dev_priv->display.init_clock_gating = + valleyview_init_clock_gating; + dev_priv->display.force_wake_get = vlv_force_wake_get; + dev_priv->display.force_wake_put = vlv_force_wake_put; + } else if (IS_PINEVIEW(dev)) { + if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), + dev_priv->is_ddr3, + dev_priv->fsb_freq, + dev_priv->mem_freq)) { + DRM_INFO("failed to find known CxSR latency " + "(found ddr%s fsb freq %d, mem freq %d), " + "disabling CxSR\n", + (dev_priv->is_ddr3 == 1) ? "3" : "2", + dev_priv->fsb_freq, dev_priv->mem_freq); + /* Disable CxSR and never update its watermark again */ + pineview_disable_cxsr(dev); + dev_priv->display.update_wm = NULL; + } else + dev_priv->display.update_wm = pineview_update_wm; + dev_priv->display.init_clock_gating = gen3_init_clock_gating; + } else if (IS_G4X(dev)) { + dev_priv->display.update_wm = g4x_update_wm; + dev_priv->display.init_clock_gating = g4x_init_clock_gating; + } else if (IS_GEN4(dev)) { + dev_priv->display.update_wm = i965_update_wm; + if (IS_CRESTLINE(dev)) + dev_priv->display.init_clock_gating = crestline_init_clock_gating; + else if (IS_BROADWATER(dev)) + dev_priv->display.init_clock_gating = broadwater_init_clock_gating; + } else if (IS_GEN3(dev)) { + dev_priv->display.update_wm = i9xx_update_wm; + dev_priv->display.get_fifo_size = i9xx_get_fifo_size; + dev_priv->display.init_clock_gating = gen3_init_clock_gating; + } else if (IS_I865G(dev)) { + dev_priv->display.update_wm = i830_update_wm; + dev_priv->display.init_clock_gating = i85x_init_clock_gating; + dev_priv->display.get_fifo_size = i830_get_fifo_size; + } else if (IS_I85X(dev)) { + dev_priv->display.update_wm = i9xx_update_wm; + dev_priv->display.get_fifo_size = i85x_get_fifo_size; + dev_priv->display.init_clock_gating = i85x_init_clock_gating; + } else { + dev_priv->display.update_wm = i830_update_wm; + dev_priv->display.init_clock_gating = i830_init_clock_gating; + if (IS_845G(dev)) + dev_priv->display.get_fifo_size = i845_get_fifo_size; + else + dev_priv->display.get_fifo_size = i830_get_fifo_size; + } + + /* We attempt to init the necessary power wells early in the initialization + * time, so the subsystems that expect power to be enabled can work. + */ + intel_init_power_wells(dev); +} + Property changes on: head/sys/dev/drm2/i915/intel_pm.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/sys/dev/drm2/i915/intel_ringbuffer.c =================================================================== --- head/sys/dev/drm2/i915/intel_ringbuffer.c (revision 277486) +++ head/sys/dev/drm2/i915/intel_ringbuffer.c (revision 277487) @@ -1,1619 +1,1555 @@ /* * Copyright © 2008-2010 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Authors: * Eric Anholt * Zou Nan hai * Xiang Hai hao * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include /* * 965+ support PIPE_CONTROL commands, which provide finer grained control * over cache flushing. */ struct pipe_control { struct drm_i915_gem_object *obj; volatile u32 *cpu_page; u32 gtt_offset; }; void i915_trace_irq_get(struct intel_ring_buffer *ring, uint32_t seqno) { + struct drm_i915_private *dev_priv; if (ring->trace_irq_seqno == 0) { - mtx_lock(&ring->irq_lock); + dev_priv = ring->dev->dev_private; + mtx_lock(&dev_priv->irq_lock); if (ring->irq_get(ring)) ring->trace_irq_seqno = seqno; - mtx_unlock(&ring->irq_lock); + mtx_unlock(&dev_priv->irq_lock); } } static inline int ring_space(struct intel_ring_buffer *ring) { int space = (ring->head & HEAD_ADDR) - (ring->tail + 8); if (space < 0) space += ring->size; return space; } static int -render_ring_flush(struct intel_ring_buffer *ring, - uint32_t invalidate_domains, - uint32_t flush_domains) +gen2_render_ring_flush(struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains) { + u32 cmd; + int ret; + + cmd = MI_FLUSH; + if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0) + cmd |= MI_NO_WRITE_FLUSH; + + if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) + cmd |= MI_READ_FLUSH; + + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; + + intel_ring_emit(ring, cmd); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + + return 0; +} + +static int +gen4_render_ring_flush(struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains) +{ struct drm_device *dev = ring->dev; uint32_t cmd; int ret; /* * read/write caches: * * I915_GEM_DOMAIN_RENDER is always invalidated, but is * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is * also flushed at 2d versus 3d pipeline switches. * * read-only caches: * * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if * MI_READ_FLUSH is set, and is always flushed on 965. * * I915_GEM_DOMAIN_COMMAND may not exist? * * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is * invalidated when MI_EXE_FLUSH is set. * * I915_GEM_DOMAIN_VERTEX, which exists on 965, is * invalidated with every MI_FLUSH. * * TLBs: * * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER * are flushed at any MI_FLUSH. */ cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; - if ((invalidate_domains|flush_domains) & - I915_GEM_DOMAIN_RENDER) + if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) cmd &= ~MI_NO_WRITE_FLUSH; - if (INTEL_INFO(dev)->gen < 4) { - /* - * On the 965, the sampler cache always gets flushed - * and this bit is reserved. - */ - if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) - cmd |= MI_READ_FLUSH; - } if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) cmd |= MI_EXE_FLUSH; if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && (IS_G4X(dev) || IS_GEN5(dev))) cmd |= MI_INVALIDATE_ISP; ret = intel_ring_begin(ring, 2); if (ret) return ret; intel_ring_emit(ring, cmd); intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); return 0; } /** * Emits a PIPE_CONTROL with a non-zero post-sync operation, for * implementing two workarounds on gen6. From section 1.4.7.1 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: * * [DevSNB-C+{W/A}] Before any depth stall flush (including those * produced by non-pipelined state commands), software needs to first * send a PIPE_CONTROL with no bits set except Post-Sync Operation != * 0. * * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. * * And the workaround for these two requires this workaround first: * * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent * BEFORE the pipe-control with a post-sync op and no write-cache * flushes. * * And this last workaround is tricky because of the requirements on * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM * volume 2 part 1: * * "1 of the following must also be set: * - Render Target Cache Flush Enable ([12] of DW1) * - Depth Cache Flush Enable ([0] of DW1) * - Stall at Pixel Scoreboard ([1] of DW1) * - Depth Stall ([13] of DW1) * - Post-Sync Operation ([13] of DW1) * - Notify Enable ([8] of DW1)" * * The cache flushes require the workaround flush that triggered this * one, so we can't use it. Depth stall would trigger the same. * Post-sync nonzero is what triggered this second workaround, so we * can't use that one either. Notify enable is IRQs, which aren't * really our business. That leaves only stall at scoreboard. */ static int intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) { struct pipe_control *pc = ring->private; u32 scratch_addr = pc->gtt_offset + 128; int ret; ret = intel_ring_begin(ring, 6); if (ret) return ret; intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD); intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ intel_ring_emit(ring, 0); /* low dword */ intel_ring_emit(ring, 0); /* high dword */ intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); ret = intel_ring_begin(ring, 6); if (ret) return ret; intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ intel_ring_emit(ring, 0); intel_ring_emit(ring, 0); intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); return 0; } static int gen6_render_ring_flush(struct intel_ring_buffer *ring, u32 invalidate_domains, u32 flush_domains) { u32 flags = 0; struct pipe_control *pc = ring->private; u32 scratch_addr = pc->gtt_offset + 128; int ret; /* Force SNB workarounds for PIPE_CONTROL flushes */ intel_emit_post_sync_nonzero_flush(ring); /* Just flush everything. Experiments have shown that reducing the * number of bits based on the write domains has little performance * impact. */ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; ret = intel_ring_begin(ring, 6); if (ret) return ret; intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); intel_ring_emit(ring, flags); intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); intel_ring_emit(ring, 0); /* lower dword */ intel_ring_emit(ring, 0); /* uppwer dword */ intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); return 0; } static void ring_write_tail(struct intel_ring_buffer *ring, uint32_t value) { drm_i915_private_t *dev_priv = ring->dev->dev_private; I915_WRITE_TAIL(ring, value); } u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = ring->dev->dev_private; uint32_t acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ? RING_ACTHD(ring->mmio_base) : ACTHD; return I915_READ(acthd_reg); } static int init_ring_common(struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = ring->dev->dev_private; struct drm_i915_gem_object *obj = ring->obj; uint32_t head; /* Stop the ring if it's running. */ I915_WRITE_CTL(ring, 0); I915_WRITE_HEAD(ring, 0); ring->write_tail(ring, 0); /* Initialize the ring. */ I915_WRITE_START(ring, obj->gtt_offset); head = I915_READ_HEAD(ring) & HEAD_ADDR; /* G45 ring initialization fails to reset head to zero */ if (head != 0) { DRM_DEBUG("%s head not reset to zero " "ctl %08x head %08x tail %08x start %08x\n", ring->name, I915_READ_CTL(ring), I915_READ_HEAD(ring), I915_READ_TAIL(ring), I915_READ_START(ring)); I915_WRITE_HEAD(ring, 0); if (I915_READ_HEAD(ring) & HEAD_ADDR) { DRM_ERROR("failed to set %s head to zero " "ctl %08x head %08x tail %08x start %08x\n", ring->name, I915_READ_CTL(ring), I915_READ_HEAD(ring), I915_READ_TAIL(ring), I915_READ_START(ring)); } } I915_WRITE_CTL(ring, ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID); /* If the head is still not zero, the ring is dead */ if (_intel_wait_for(ring->dev, (I915_READ_CTL(ring) & RING_VALID) != 0 && I915_READ_START(ring) == obj->gtt_offset && (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50, 1, "915rii")) { DRM_ERROR("%s initialization failed " "ctl %08x head %08x tail %08x start %08x\n", ring->name, I915_READ_CTL(ring), I915_READ_HEAD(ring), I915_READ_TAIL(ring), I915_READ_START(ring)); return -EIO; } if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) i915_kernel_lost_context(ring->dev); else { ring->head = I915_READ_HEAD(ring); ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; ring->space = ring_space(ring); } return 0; } static int init_pipe_control(struct intel_ring_buffer *ring) { struct pipe_control *pc; struct drm_i915_gem_object *obj; int ret; if (ring->private) return 0; pc = malloc(sizeof(*pc), DRM_I915_GEM, M_WAITOK); if (!pc) return -ENOMEM; obj = i915_gem_alloc_object(ring->dev, 4096); if (obj == NULL) { DRM_ERROR("Failed to allocate seqno page\n"); ret = -ENOMEM; goto err; } i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); ret = i915_gem_object_pin(obj, 4096, true); if (ret) goto err_unref; pc->gtt_offset = obj->gtt_offset; pc->cpu_page = (uint32_t *)kva_alloc(PAGE_SIZE); if (pc->cpu_page == NULL) goto err_unpin; pmap_qenter((uintptr_t)pc->cpu_page, &obj->pages[0], 1); pmap_invalidate_cache_range((vm_offset_t)pc->cpu_page, (vm_offset_t)pc->cpu_page + PAGE_SIZE, FALSE); pc->obj = obj; ring->private = pc; return 0; err_unpin: i915_gem_object_unpin(obj); err_unref: drm_gem_object_unreference(&obj->base); err: free(pc, DRM_I915_GEM); return ret; } static void cleanup_pipe_control(struct intel_ring_buffer *ring) { struct pipe_control *pc = ring->private; struct drm_i915_gem_object *obj; if (!ring->private) return; obj = pc->obj; pmap_qremove((vm_offset_t)pc->cpu_page, 1); kva_free((uintptr_t)pc->cpu_page, PAGE_SIZE); i915_gem_object_unpin(obj); drm_gem_object_unreference(&obj->base); free(pc, DRM_I915_GEM); ring->private = NULL; } static int init_render_ring(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; int ret = init_ring_common(ring); if (INTEL_INFO(dev)->gen > 3) { - int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; - I915_WRITE(MI_MODE, mode); + I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); if (IS_GEN7(dev)) I915_WRITE(GFX_MODE_GEN7, - GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | - GFX_MODE_ENABLE(GFX_REPLAY_MODE)); + _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | + _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); } if (INTEL_INFO(dev)->gen >= 5) { ret = init_pipe_control(ring); if (ret) return ret; } if (IS_GEN6(dev)) { /* From the Sandybridge PRM, volume 1 part 3, page 24: * "If this bit is set, STCunit will have LRA as replacement * policy. [...] This bit must be reset. LRA replacement * policy is not supported." */ I915_WRITE(CACHE_MODE_0, - CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT); + _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); /* This is not explicitly set for GEN6, so read the register. * see intel_ring_mi_set_context() for why we care. * TODO: consider explicitly setting the bit for GEN5 */ ring->itlb_before_ctx_switch = !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS); } - if (INTEL_INFO(dev)->gen >= 6) { - I915_WRITE(INSTPM, - INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING); - } + if (INTEL_INFO(dev)->gen >= 6) + I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); return ret; } static void render_ring_cleanup(struct intel_ring_buffer *ring) { if (!ring->private) return; cleanup_pipe_control(ring); } static void update_mboxes(struct intel_ring_buffer *ring, u32 seqno, u32 mmio_offset) { intel_ring_emit(ring, MI_SEMAPHORE_MBOX | MI_SEMAPHORE_GLOBAL_GTT | MI_SEMAPHORE_REGISTER | MI_SEMAPHORE_UPDATE); intel_ring_emit(ring, seqno); intel_ring_emit(ring, mmio_offset); } /** * gen6_add_request - Update the semaphore mailbox registers * * @ring - ring that is adding a request * @seqno - return seqno stuck into the ring * * Update the mailbox registers in the *other* rings with the current seqno. * This acts like a signal in the canonical semaphore. */ static int gen6_add_request(struct intel_ring_buffer *ring, u32 *seqno) { u32 mbox1_reg; u32 mbox2_reg; int ret; ret = intel_ring_begin(ring, 10); if (ret) return ret; mbox1_reg = ring->signal_mbox[0]; mbox2_reg = ring->signal_mbox[1]; *seqno = i915_gem_next_request_seqno(ring); update_mboxes(ring, *seqno, mbox1_reg); update_mboxes(ring, *seqno, mbox2_reg); intel_ring_emit(ring, MI_STORE_DWORD_INDEX); intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); intel_ring_emit(ring, *seqno); intel_ring_emit(ring, MI_USER_INTERRUPT); intel_ring_advance(ring); return 0; } /** * intel_ring_sync - sync the waiter to the signaller on seqno * * @waiter - ring that is waiting * @signaller - ring which has, or will signal * @seqno - seqno which the waiter will block on */ static int -intel_ring_sync(struct intel_ring_buffer *waiter, - struct intel_ring_buffer *signaller, - int ring, - u32 seqno) +gen6_ring_sync(struct intel_ring_buffer *waiter, + struct intel_ring_buffer *signaller, + u32 seqno) { int ret; u32 dw1 = MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER; + /* Throughout all of the GEM code, seqno passed implies our current + * seqno is >= the last seqno executed. However for hardware the + * comparison is strictly greater than. + */ + seqno -= 1; + + if (signaller->semaphore_register[waiter->id] == + MI_SEMAPHORE_SYNC_INVALID) + printf("gen6_ring_sync semaphore_register %d invalid\n", + waiter->id); + ret = intel_ring_begin(waiter, 4); if (ret) return ret; - intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]); + intel_ring_emit(waiter, + dw1 | signaller->semaphore_register[waiter->id]); intel_ring_emit(waiter, seqno); intel_ring_emit(waiter, 0); intel_ring_emit(waiter, MI_NOOP); intel_ring_advance(waiter); return 0; } int render_ring_sync_to(struct intel_ring_buffer *waiter, struct intel_ring_buffer *signaller, u32 seqno); int gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter, struct intel_ring_buffer *signaller, u32 seqno); int gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter, struct intel_ring_buffer *signaller, u32 seqno); -/* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */ -int -render_ring_sync_to(struct intel_ring_buffer *waiter, - struct intel_ring_buffer *signaller, - u32 seqno) -{ - KASSERT(signaller->semaphore_register[RCS] != MI_SEMAPHORE_SYNC_INVALID, - ("valid RCS semaphore")); - return intel_ring_sync(waiter, - signaller, - RCS, - seqno); -} - -/* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */ -int -gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter, - struct intel_ring_buffer *signaller, - u32 seqno) -{ - KASSERT(signaller->semaphore_register[VCS] != MI_SEMAPHORE_SYNC_INVALID, - ("Valid VCS semaphore")); - return intel_ring_sync(waiter, - signaller, - VCS, - seqno); -} - -/* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */ -int -gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter, - struct intel_ring_buffer *signaller, - u32 seqno) -{ - KASSERT(signaller->semaphore_register[BCS] != MI_SEMAPHORE_SYNC_INVALID, - ("Valid BCS semaphore")); - return intel_ring_sync(waiter, - signaller, - BCS, - seqno); -} - #define PIPE_CONTROL_FLUSH(ring__, addr__) \ do { \ intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ PIPE_CONTROL_DEPTH_STALL); \ intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ intel_ring_emit(ring__, 0); \ intel_ring_emit(ring__, 0); \ } while (0) static int pc_render_add_request(struct intel_ring_buffer *ring, uint32_t *result) { u32 seqno = i915_gem_next_request_seqno(ring); struct pipe_control *pc = ring->private; u32 scratch_addr = pc->gtt_offset + 128; int ret; /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently * incoherent with writes to memory, i.e. completely fubar, * so we need to use PIPE_NOTIFY instead. * * However, we also need to workaround the qword write * incoherence by flushing the 6 PIPE_NOTIFY buffers out to * memory before requesting an interrupt. */ ret = intel_ring_begin(ring, 32); if (ret) return ret; intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_WRITE_FLUSH | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); intel_ring_emit(ring, seqno); intel_ring_emit(ring, 0); PIPE_CONTROL_FLUSH(ring, scratch_addr); scratch_addr += 128; /* write to separate cachelines */ PIPE_CONTROL_FLUSH(ring, scratch_addr); scratch_addr += 128; PIPE_CONTROL_FLUSH(ring, scratch_addr); scratch_addr += 128; PIPE_CONTROL_FLUSH(ring, scratch_addr); scratch_addr += 128; PIPE_CONTROL_FLUSH(ring, scratch_addr); scratch_addr += 128; PIPE_CONTROL_FLUSH(ring, scratch_addr); intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_WRITE_FLUSH | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | PIPE_CONTROL_NOTIFY); intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); intel_ring_emit(ring, seqno); intel_ring_emit(ring, 0); intel_ring_advance(ring); *result = seqno; return 0; } -static int -render_ring_add_request(struct intel_ring_buffer *ring, - uint32_t *result) -{ - u32 seqno = i915_gem_next_request_seqno(ring); - int ret; - - ret = intel_ring_begin(ring, 4); - if (ret) - return ret; - - intel_ring_emit(ring, MI_STORE_DWORD_INDEX); - intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(ring, seqno); - intel_ring_emit(ring, MI_USER_INTERRUPT); - intel_ring_advance(ring); - - *result = seqno; - return 0; -} - - static u32 +static u32 gen6_ring_get_seqno(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; /* Workaround to force correct ordering between irq and seqno writes on * ivb (and maybe also on snb) by reading from a CS register (like * ACTHD) before reading the status page. */ if (/* IS_GEN6(dev) || */IS_GEN7(dev)) intel_ring_get_active_head(ring); return intel_read_status_page(ring, I915_GEM_HWS_INDEX); } static uint32_t ring_get_seqno(struct intel_ring_buffer *ring) { if (ring->status_page.page_addr == NULL) return (-1); return intel_read_status_page(ring, I915_GEM_HWS_INDEX); } static uint32_t pc_render_get_seqno(struct intel_ring_buffer *ring) { struct pipe_control *pc = ring->private; if (pc != NULL) return pc->cpu_page[0]; else return (-1); } -static void -ironlake_enable_irq(drm_i915_private_t *dev_priv, uint32_t mask) +static bool +gen5_ring_get_irq(struct intel_ring_buffer *ring) { - dev_priv->gt_irq_mask &= ~mask; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - POSTING_READ(GTIMR); + struct drm_device *dev = ring->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + + if (!dev->irq_enabled) + return false; + + mtx_assert(&dev_priv->irq_lock, MA_OWNED); + if (ring->irq_refcount++ == 0) { + dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + POSTING_READ(GTIMR); + } + + return true; } static void -ironlake_disable_irq(drm_i915_private_t *dev_priv, uint32_t mask) +gen5_ring_put_irq(struct intel_ring_buffer *ring) { - dev_priv->gt_irq_mask |= mask; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - POSTING_READ(GTIMR); + struct drm_device *dev = ring->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + + mtx_assert(&dev_priv->irq_lock, MA_OWNED); + if (--ring->irq_refcount == 0) { + dev_priv->gt_irq_mask |= ring->irq_enable_mask; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + POSTING_READ(GTIMR); + } } -static void -i915_enable_irq(drm_i915_private_t *dev_priv, uint32_t mask) +static bool +i9xx_ring_get_irq(struct intel_ring_buffer *ring) { - dev_priv->irq_mask &= ~mask; - I915_WRITE(IMR, dev_priv->irq_mask); - POSTING_READ(IMR); + struct drm_device *dev = ring->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + + if (!dev->irq_enabled) + return false; + + mtx_assert(&dev_priv->irq_lock, MA_OWNED); + if (ring->irq_refcount++ == 0) { + dev_priv->irq_mask &= ~ring->irq_enable_mask; + I915_WRITE(IMR, dev_priv->irq_mask); + POSTING_READ(IMR); + } + + return true; } static void -i915_disable_irq(drm_i915_private_t *dev_priv, uint32_t mask) +i9xx_ring_put_irq(struct intel_ring_buffer *ring) { - dev_priv->irq_mask |= mask; - I915_WRITE(IMR, dev_priv->irq_mask); - POSTING_READ(IMR); + struct drm_device *dev = ring->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + + mtx_assert(&dev_priv->irq_lock, MA_OWNED); + if (--ring->irq_refcount == 0) { + dev_priv->irq_mask |= ring->irq_enable_mask; + I915_WRITE(IMR, dev_priv->irq_mask); + POSTING_READ(IMR); + } } static bool -render_ring_get_irq(struct intel_ring_buffer *ring) +i8xx_ring_get_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; if (!dev->irq_enabled) return false; - mtx_assert(&ring->irq_lock, MA_OWNED); + mtx_assert(&dev_priv->irq_lock, MA_OWNED); if (ring->irq_refcount++ == 0) { - if (HAS_PCH_SPLIT(dev)) - ironlake_enable_irq(dev_priv, - GT_PIPE_NOTIFY | GT_USER_INTERRUPT); - else - i915_enable_irq(dev_priv, I915_USER_INTERRUPT); + dev_priv->irq_mask &= ~ring->irq_enable_mask; + I915_WRITE16(IMR, dev_priv->irq_mask); + POSTING_READ16(IMR); } return true; } static void -render_ring_put_irq(struct intel_ring_buffer *ring) +i8xx_ring_put_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; - mtx_assert(&ring->irq_lock, MA_OWNED); + mtx_assert(&dev_priv->irq_lock, MA_OWNED); if (--ring->irq_refcount == 0) { - if (HAS_PCH_SPLIT(dev)) - ironlake_disable_irq(dev_priv, - GT_USER_INTERRUPT | - GT_PIPE_NOTIFY); - else - i915_disable_irq(dev_priv, I915_USER_INTERRUPT); + dev_priv->irq_mask |= ring->irq_enable_mask; + I915_WRITE16(IMR, dev_priv->irq_mask); + POSTING_READ16(IMR); } } void intel_ring_setup_status_page(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; uint32_t mmio = 0; /* The ring status page addresses are no longer next to the rest of * the ring registers as of gen7. */ if (IS_GEN7(dev)) { switch (ring->id) { case RCS: mmio = RENDER_HWS_PGA_GEN7; break; case BCS: mmio = BLT_HWS_PGA_GEN7; break; case VCS: mmio = BSD_HWS_PGA_GEN7; break; } } else if (IS_GEN6(dev)) { mmio = RING_HWS_PGA_GEN6(ring->mmio_base); } else { mmio = RING_HWS_PGA(ring->mmio_base); } I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); POSTING_READ(mmio); } static int bsd_ring_flush(struct intel_ring_buffer *ring, uint32_t invalidate_domains, uint32_t flush_domains) { int ret; ret = intel_ring_begin(ring, 2); if (ret) return ret; intel_ring_emit(ring, MI_FLUSH); intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); return 0; } static int -ring_add_request(struct intel_ring_buffer *ring, - uint32_t *result) +i9xx_add_request(struct intel_ring_buffer *ring, + u32 *result) { - uint32_t seqno; + u32 seqno; int ret; ret = intel_ring_begin(ring, 4); if (ret) return ret; seqno = i915_gem_next_request_seqno(ring); intel_ring_emit(ring, MI_STORE_DWORD_INDEX); intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); intel_ring_emit(ring, seqno); intel_ring_emit(ring, MI_USER_INTERRUPT); intel_ring_advance(ring); *result = seqno; return 0; } static bool -gen6_ring_get_irq(struct intel_ring_buffer *ring, uint32_t gflag, uint32_t rflag) +gen6_ring_get_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; if (!dev->irq_enabled) return false; gen6_gt_force_wake_get(dev_priv); - mtx_assert(&ring->irq_lock, MA_OWNED); + mtx_assert(&dev_priv->irq_lock, MA_OWNED); if (ring->irq_refcount++ == 0) { - ring->irq_mask &= ~rflag; - I915_WRITE_IMR(ring, ring->irq_mask); - ironlake_enable_irq(dev_priv, gflag); + I915_WRITE_IMR(ring, ~ring->irq_enable_mask); + dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + POSTING_READ(GTIMR); } return true; } static void -gen6_ring_put_irq(struct intel_ring_buffer *ring, uint32_t gflag, uint32_t rflag) +gen6_ring_put_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; - mtx_assert(&ring->irq_lock, MA_OWNED); + mtx_assert(&dev_priv->irq_lock, MA_OWNED); if (--ring->irq_refcount == 0) { - ring->irq_mask |= rflag; - I915_WRITE_IMR(ring, ring->irq_mask); - ironlake_disable_irq(dev_priv, gflag); + I915_WRITE_IMR(ring, ~0); + dev_priv->gt_irq_mask |= ring->irq_enable_mask; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + POSTING_READ(GTIMR); } gen6_gt_force_wake_put(dev_priv); } -static bool -bsd_ring_get_irq(struct intel_ring_buffer *ring) +static int +i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) { - struct drm_device *dev = ring->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + int ret; - if (!dev->irq_enabled) - return false; + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; - mtx_assert(&ring->irq_lock, MA_OWNED); - if (ring->irq_refcount++ == 0) { - if (IS_G4X(dev)) - i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT); - else - ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT); - } + intel_ring_emit(ring, + MI_BATCH_BUFFER_START | + MI_BATCH_GTT | + MI_BATCH_NON_SECURE_I965); + intel_ring_emit(ring, offset); + intel_ring_advance(ring); - return true; + return 0; } -static void -bsd_ring_put_irq(struct intel_ring_buffer *ring) -{ - struct drm_device *dev = ring->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - mtx_assert(&ring->irq_lock, MA_OWNED); - if (--ring->irq_refcount == 0) { - if (IS_G4X(dev)) - i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT); - else - ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT); - } -} - static int -ring_dispatch_execbuffer(struct intel_ring_buffer *ring, uint32_t offset, - uint32_t length) +i830_dispatch_execbuffer(struct intel_ring_buffer *ring, + u32 offset, u32 len) { int ret; - ret = intel_ring_begin(ring, 2); + ret = intel_ring_begin(ring, 4); if (ret) return ret; - intel_ring_emit(ring, - MI_BATCH_BUFFER_START | (2 << 6) | - MI_BATCH_NON_SECURE_I965); - intel_ring_emit(ring, offset); + intel_ring_emit(ring, MI_BATCH_BUFFER); + intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); + intel_ring_emit(ring, offset + len - 8); + intel_ring_emit(ring, 0); intel_ring_advance(ring); return 0; } static int -render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, - uint32_t offset, uint32_t len) +i915_dispatch_execbuffer(struct intel_ring_buffer *ring, + u32 offset, u32 len) { - struct drm_device *dev = ring->dev; int ret; - if (IS_I830(dev) || IS_845G(dev)) { - ret = intel_ring_begin(ring, 4); - if (ret) - return ret; + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; - intel_ring_emit(ring, MI_BATCH_BUFFER); - intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); - intel_ring_emit(ring, offset + len - 8); - intel_ring_emit(ring, 0); - } else { - ret = intel_ring_begin(ring, 2); - if (ret) - return ret; - - if (INTEL_INFO(dev)->gen >= 4) { - intel_ring_emit(ring, - MI_BATCH_BUFFER_START | (2 << 6) | - MI_BATCH_NON_SECURE_I965); - intel_ring_emit(ring, offset); - } else { - intel_ring_emit(ring, - MI_BATCH_BUFFER_START | (2 << 6)); - intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); - } - } + intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); + intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); intel_ring_advance(ring); return 0; } static void cleanup_status_page(struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = ring->dev->dev_private; struct drm_i915_gem_object *obj; obj = ring->status_page.obj; if (obj == NULL) return; pmap_qremove((vm_offset_t)ring->status_page.page_addr, 1); kva_free((vm_offset_t)ring->status_page.page_addr, PAGE_SIZE); i915_gem_object_unpin(obj); drm_gem_object_unreference(&obj->base); ring->status_page.obj = NULL; - - memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); } static int init_status_page(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; - drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; int ret; obj = i915_gem_alloc_object(dev, 4096); if (obj == NULL) { DRM_ERROR("Failed to allocate status page\n"); ret = -ENOMEM; goto err; } i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); ret = i915_gem_object_pin(obj, 4096, true); if (ret != 0) { goto err_unref; } ring->status_page.gfx_addr = obj->gtt_offset; ring->status_page.page_addr = (void *)kva_alloc(PAGE_SIZE); if (ring->status_page.page_addr == NULL) { - memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); goto err_unpin; } pmap_qenter((vm_offset_t)ring->status_page.page_addr, &obj->pages[0], 1); pmap_invalidate_cache_range((vm_offset_t)ring->status_page.page_addr, (vm_offset_t)ring->status_page.page_addr + PAGE_SIZE, FALSE); ring->status_page.obj = obj; memset(ring->status_page.page_addr, 0, PAGE_SIZE); intel_ring_setup_status_page(ring); DRM_DEBUG("i915: init_status_page %s hws offset: 0x%08x\n", ring->name, ring->status_page.gfx_addr); return 0; err_unpin: i915_gem_object_unpin(obj); err_unref: drm_gem_object_unreference(&obj->base); err: return ret; } -static -int intel_init_ring_buffer(struct drm_device *dev, +static int intel_init_ring_buffer(struct drm_device *dev, struct intel_ring_buffer *ring) { struct drm_i915_gem_object *obj; int ret; ring->dev = dev; INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->request_list); INIT_LIST_HEAD(&ring->gpu_write_list); + ring->size = 32 * PAGE_SIZE; - mtx_init(&ring->irq_lock, "ringb", NULL, MTX_DEF); - ring->irq_mask = ~0; - if (I915_NEED_GFX_HWS(dev)) { ret = init_status_page(ring); if (ret) return ret; } obj = i915_gem_alloc_object(dev, ring->size); if (obj == NULL) { DRM_ERROR("Failed to allocate ringbuffer\n"); ret = -ENOMEM; goto err_hws; } ring->obj = obj; ret = i915_gem_object_pin(obj, PAGE_SIZE, true); if (ret) goto err_unref; - ring->map.size = ring->size; - ring->map.offset = dev->agp->base + obj->gtt_offset; - ring->map.type = 0; - ring->map.flags = 0; - ring->map.mtrr = 0; - - drm_core_ioremap_wc(&ring->map, dev); - if (ring->map.virtual == NULL) { + ring->virtual_start = pmap_mapdev_attr( + dev->agp->base + obj->gtt_offset, ring->size, + VM_MEMATTR_WRITE_COMBINING); + if (ring->virtual_start == NULL) { DRM_ERROR("Failed to map ringbuffer.\n"); ret = -EINVAL; goto err_unpin; } - ring->virtual_start = ring->map.virtual; ret = ring->init(ring); if (ret) goto err_unmap; /* Workaround an erratum on the i830 which causes a hang if * the TAIL pointer points to within the last 2 cachelines * of the buffer. */ ring->effective_size = ring->size; if (IS_I830(ring->dev) || IS_845G(ring->dev)) ring->effective_size -= 128; return 0; err_unmap: - drm_core_ioremapfree(&ring->map, dev); + pmap_unmapdev((vm_offset_t)ring->virtual_start, ring->size); err_unpin: i915_gem_object_unpin(obj); err_unref: drm_gem_object_unreference(&obj->base); ring->obj = NULL; err_hws: cleanup_status_page(ring); return ret; } void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) { struct drm_i915_private *dev_priv; int ret; if (ring->obj == NULL) return; /* Disable the ring buffer. The ring must be idle at this point */ dev_priv = ring->dev->dev_private; ret = intel_wait_ring_idle(ring); I915_WRITE_CTL(ring, 0); - drm_core_ioremapfree(&ring->map, ring->dev); + pmap_unmapdev((vm_offset_t)ring->virtual_start, ring->size); i915_gem_object_unpin(ring->obj); drm_gem_object_unreference(&ring->obj->base); ring->obj = NULL; if (ring->cleanup) ring->cleanup(ring); cleanup_status_page(ring); } static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) { - unsigned int *virt; + uint32_t *virt; int rem = ring->size - ring->tail; if (ring->space < rem) { int ret = intel_wait_ring_buffer(ring, rem); if (ret) return ret; } - virt = (unsigned int *)((char *)ring->virtual_start + ring->tail); - rem /= 8; - while (rem--) { + virt = (uint32_t *)((char *)ring->virtual_start + ring->tail); + rem /= 4; + while (rem--) *virt++ = MI_NOOP; - *virt++ = MI_NOOP; - } ring->tail = 0; ring->space = ring_space(ring); return 0; } static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) { struct drm_i915_private *dev_priv = ring->dev->dev_private; bool was_interruptible; int ret; /* XXX As we have not yet audited all the paths to check that * they are ready for ERESTARTSYS from intel_ring_begin, do not * allow us to be interruptible by a signal. */ was_interruptible = dev_priv->mm.interruptible; dev_priv->mm.interruptible = false; - ret = i915_wait_request(ring, seqno, true); + ret = i915_wait_request(ring, seqno); dev_priv->mm.interruptible = was_interruptible; + if (!ret) + i915_gem_retire_requests_ring(ring); return ret; } static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) { struct drm_i915_gem_request *request; u32 seqno = 0; int ret; i915_gem_retire_requests_ring(ring); if (ring->last_retired_head != -1) { ring->head = ring->last_retired_head; ring->last_retired_head = -1; ring->space = ring_space(ring); if (ring->space >= n) return 0; } list_for_each_entry(request, &ring->request_list, list) { int space; if (request->tail == -1) continue; space = request->tail - (ring->tail + 8); if (space < 0) space += ring->size; if (space >= n) { seqno = request->seqno; break; } /* Consume this request in case we need more space than * is available and so need to prevent a race between * updating last_retired_head and direct reads of * I915_RING_HEAD. It also provides a nice sanity check. */ request->tail = -1; } if (seqno == 0) return -ENOSPC; ret = intel_ring_wait_seqno(ring, seqno); if (ret) return ret; if (ring->last_retired_head == -1) return -ENOSPC; ring->head = ring->last_retired_head; ring->last_retired_head = -1; ring->space = ring_space(ring); if (ring->space < n) return -ENOSPC; return 0; } int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; int end; int ret; ret = intel_ring_wait_request(ring, n); if (ret != -ENOSPC) return ret; CTR1(KTR_DRM, "ring_wait_begin %s", ring->name); - if (drm_core_check_feature(dev, DRIVER_GEM)) - /* With GEM the hangcheck timer should kick us out of the loop, - * leaving it early runs the risk of corrupting GEM state (due - * to running on almost untested codepaths). But on resume - * timers don't work yet, so prevent a complete hang in that - * case by choosing an insanely large timeout. */ - end = ticks + hz * 60; - else - end = ticks + hz * 3; + /* With GEM the hangcheck timer should kick us out of the loop, + * leaving it early runs the risk of corrupting GEM state (due + * to running on almost untested codepaths). But on resume + * timers don't work yet, so prevent a complete hang in that + * case by choosing an insanely large timeout. */ + end = ticks + hz * 60; + do { ring->head = I915_READ_HEAD(ring); ring->space = ring_space(ring); if (ring->space >= n) { CTR1(KTR_DRM, "ring_wait_end %s", ring->name); return 0; } #if 0 if (dev->primary->master) { struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; if (master_priv->sarea_priv) master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; } #else if (dev_priv->sarea_priv) dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; #endif pause("915rng", 1); if (atomic_load_acq_32(&dev_priv->mm.wedged) != 0) { CTR1(KTR_DRM, "ring_wait_end %s wedged", ring->name); return -EAGAIN; } } while (!time_after(ticks, end)); CTR1(KTR_DRM, "ring_wait_end %s busy", ring->name); return -EBUSY; } int intel_ring_begin(struct intel_ring_buffer *ring, int num_dwords) { struct drm_i915_private *dev_priv = ring->dev->dev_private; int n = 4*num_dwords; int ret; if (atomic_load_acq_int(&dev_priv->mm.wedged)) return -EIO; if (ring->tail + n > ring->effective_size) { ret = intel_wrap_ring_buffer(ring); if (ret != 0) return ret; } if (ring->space < n) { ret = intel_wait_ring_buffer(ring, n); if (ret != 0) return ret; } ring->space -= n; return 0; } void intel_ring_advance(struct intel_ring_buffer *ring) { + struct drm_i915_private *dev_priv = ring->dev->dev_private; + ring->tail &= ring->size - 1; + if (dev_priv->stop_rings & intel_ring_flag(ring)) + return; ring->write_tail(ring, ring->tail); } -static const struct intel_ring_buffer render_ring = { - .name = "render ring", - .id = RCS, - .mmio_base = RENDER_RING_BASE, - .size = 32 * PAGE_SIZE, - .init = init_render_ring, - .write_tail = ring_write_tail, - .flush = render_ring_flush, - .add_request = render_ring_add_request, - .get_seqno = ring_get_seqno, - .irq_get = render_ring_get_irq, - .irq_put = render_ring_put_irq, - .dispatch_execbuffer = render_ring_dispatch_execbuffer, - .cleanup = render_ring_cleanup, - .sync_to = render_ring_sync_to, - .semaphore_register = {MI_SEMAPHORE_SYNC_INVALID, - MI_SEMAPHORE_SYNC_RV, - MI_SEMAPHORE_SYNC_RB}, - .signal_mbox = {GEN6_VRSYNC, GEN6_BRSYNC}, -}; -/* ring buffer for bit-stream decoder */ - -static const struct intel_ring_buffer bsd_ring = { - .name = "bsd ring", - .id = VCS, - .mmio_base = BSD_RING_BASE, - .size = 32 * PAGE_SIZE, - .init = init_ring_common, - .write_tail = ring_write_tail, - .flush = bsd_ring_flush, - .add_request = ring_add_request, - .get_seqno = ring_get_seqno, - .irq_get = bsd_ring_get_irq, - .irq_put = bsd_ring_put_irq, - .dispatch_execbuffer = ring_dispatch_execbuffer, -}; - - static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, - uint32_t value) + u32 value) { drm_i915_private_t *dev_priv = ring->dev->dev_private; /* Every tail move must follow the sequence below */ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE); I915_WRITE(GEN6_BSD_RNCID, 0x0); if (_intel_wait_for(ring->dev, (I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0, 50, true, "915g6i") != 0) DRM_ERROR("timed out waiting for IDLE Indicator\n"); I915_WRITE_TAIL(ring, value); I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); } static int gen6_ring_flush(struct intel_ring_buffer *ring, uint32_t invalidate, uint32_t flush) { uint32_t cmd; int ret; ret = intel_ring_begin(ring, 4); if (ret) return ret; cmd = MI_FLUSH_DW; if (invalidate & I915_GEM_GPU_DOMAINS) cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; intel_ring_emit(ring, cmd); intel_ring_emit(ring, 0); intel_ring_emit(ring, 0); intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); return 0; } static int gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, uint32_t offset, uint32_t len) { int ret; ret = intel_ring_begin(ring, 2); if (ret) return ret; intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); /* bit0-7 is the length on GEN6+ */ intel_ring_emit(ring, offset); intel_ring_advance(ring); return 0; } -static bool -gen6_render_ring_get_irq(struct intel_ring_buffer *ring) -{ - return gen6_ring_get_irq(ring, - GT_USER_INTERRUPT, - GEN6_RENDER_USER_INTERRUPT); -} - -static void -gen6_render_ring_put_irq(struct intel_ring_buffer *ring) -{ - return gen6_ring_put_irq(ring, - GT_USER_INTERRUPT, - GEN6_RENDER_USER_INTERRUPT); -} - -static bool -gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring) -{ - return gen6_ring_get_irq(ring, - GT_GEN6_BSD_USER_INTERRUPT, - GEN6_BSD_USER_INTERRUPT); -} - -static void -gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) -{ - return gen6_ring_put_irq(ring, - GT_GEN6_BSD_USER_INTERRUPT, - GEN6_BSD_USER_INTERRUPT); -} - -/* ring buffer for Video Codec for Gen6+ */ -static const struct intel_ring_buffer gen6_bsd_ring = { - .name = "gen6 bsd ring", - .id = VCS, - .mmio_base = GEN6_BSD_RING_BASE, - .size = 32 * PAGE_SIZE, - .init = init_ring_common, - .write_tail = gen6_bsd_ring_write_tail, - .flush = gen6_ring_flush, - .add_request = gen6_add_request, - .get_seqno = gen6_ring_get_seqno, - .irq_get = gen6_bsd_ring_get_irq, - .irq_put = gen6_bsd_ring_put_irq, - .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, - .sync_to = gen6_bsd_ring_sync_to, - .semaphore_register = {MI_SEMAPHORE_SYNC_VR, - MI_SEMAPHORE_SYNC_INVALID, - MI_SEMAPHORE_SYNC_VB}, - .signal_mbox = {GEN6_RVSYNC, GEN6_BVSYNC}, -}; - /* Blitter support (SandyBridge+) */ -static bool -blt_ring_get_irq(struct intel_ring_buffer *ring) -{ - return gen6_ring_get_irq(ring, - GT_BLT_USER_INTERRUPT, - GEN6_BLITTER_USER_INTERRUPT); -} - -static void -blt_ring_put_irq(struct intel_ring_buffer *ring) -{ - gen6_ring_put_irq(ring, - GT_BLT_USER_INTERRUPT, - GEN6_BLITTER_USER_INTERRUPT); -} - static int blt_ring_flush(struct intel_ring_buffer *ring, - uint32_t invalidate, uint32_t flush) + u32 invalidate, u32 flush) { - uint32_t cmd; + u32 cmd; int ret; ret = intel_ring_begin(ring, 4); if (ret) return ret; cmd = MI_FLUSH_DW; if (invalidate & I915_GEM_DOMAIN_RENDER) cmd |= MI_INVALIDATE_TLB; intel_ring_emit(ring, cmd); intel_ring_emit(ring, 0); intel_ring_emit(ring, 0); intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); return 0; } -static const struct intel_ring_buffer gen6_blt_ring = { - .name = "blt ring", - .id = BCS, - .mmio_base = BLT_RING_BASE, - .size = 32 * PAGE_SIZE, - .init = init_ring_common, - .write_tail = ring_write_tail, - .flush = blt_ring_flush, - .add_request = gen6_add_request, - .get_seqno = gen6_ring_get_seqno, - .irq_get = blt_ring_get_irq, - .irq_put = blt_ring_put_irq, - .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, - .sync_to = gen6_blt_ring_sync_to, - .semaphore_register = {MI_SEMAPHORE_SYNC_BR, - MI_SEMAPHORE_SYNC_BV, - MI_SEMAPHORE_SYNC_INVALID}, - .signal_mbox = {GEN6_RBSYNC, GEN6_VBSYNC}, -}; - int intel_init_render_ring_buffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = &dev_priv->rings[RCS]; - *ring = render_ring; + ring->name = "render ring"; + ring->id = RCS; + ring->mmio_base = RENDER_RING_BASE; + if (INTEL_INFO(dev)->gen >= 6) { ring->add_request = gen6_add_request; ring->flush = gen6_render_ring_flush; - ring->irq_get = gen6_render_ring_get_irq; - ring->irq_put = gen6_render_ring_put_irq; + ring->irq_get = gen6_ring_get_irq; + ring->irq_put = gen6_ring_put_irq; + ring->irq_enable_mask = GT_USER_INTERRUPT; ring->get_seqno = gen6_ring_get_seqno; + ring->sync_to = gen6_ring_sync; + ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID; + ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV; + ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB; + ring->signal_mbox[0] = GEN6_VRSYNC; + ring->signal_mbox[1] = GEN6_BRSYNC; } else if (IS_GEN5(dev)) { ring->add_request = pc_render_add_request; + ring->flush = gen4_render_ring_flush; ring->get_seqno = pc_render_get_seqno; + ring->irq_get = gen5_ring_get_irq; + ring->irq_put = gen5_ring_put_irq; + ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; + } else { + ring->add_request = i9xx_add_request; + if (INTEL_INFO(dev)->gen < 4) + ring->flush = gen2_render_ring_flush; + else + ring->flush = gen4_render_ring_flush; + ring->get_seqno = ring_get_seqno; + if (IS_GEN2(dev)) { + ring->irq_get = i8xx_ring_get_irq; + ring->irq_put = i8xx_ring_put_irq; + } else { + ring->irq_get = i9xx_ring_get_irq; + ring->irq_put = i9xx_ring_put_irq; + } + ring->irq_enable_mask = I915_USER_INTERRUPT; } + ring->write_tail = ring_write_tail; + if (INTEL_INFO(dev)->gen >= 6) + ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + else if (INTEL_INFO(dev)->gen >= 4) + ring->dispatch_execbuffer = i965_dispatch_execbuffer; + else if (IS_I830(dev) || IS_845G(dev)) + ring->dispatch_execbuffer = i830_dispatch_execbuffer; + else + ring->dispatch_execbuffer = i915_dispatch_execbuffer; + ring->init = init_render_ring; + ring->cleanup = render_ring_cleanup; + if (!I915_NEED_GFX_HWS(dev)) { ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; memset(ring->status_page.page_addr, 0, PAGE_SIZE); } return intel_init_ring_buffer(dev, ring); } -int intel_render_ring_init_dri(struct drm_device *dev, uint64_t start, - uint32_t size) +int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = &dev_priv->rings[RCS]; - *ring = render_ring; + ring->name = "render ring"; + ring->id = RCS; + ring->mmio_base = RENDER_RING_BASE; + if (INTEL_INFO(dev)->gen >= 6) { - ring->add_request = gen6_add_request; - ring->irq_get = gen6_render_ring_get_irq; - ring->irq_put = gen6_render_ring_put_irq; - } else if (IS_GEN5(dev)) { - ring->add_request = pc_render_add_request; - ring->get_seqno = pc_render_get_seqno; + /* non-kms not supported on gen6+ */ + return -ENODEV; } + /* Note: gem is not supported on gen5/ilk without kms (the corresponding + * gem_init ioctl returns with -ENODEV). Hence we do not need to set up + * the special gen5 functions. */ + ring->add_request = i9xx_add_request; + if (INTEL_INFO(dev)->gen < 4) + ring->flush = gen2_render_ring_flush; + else + ring->flush = gen4_render_ring_flush; + ring->get_seqno = ring_get_seqno; + if (IS_GEN2(dev)) { + ring->irq_get = i8xx_ring_get_irq; + ring->irq_put = i8xx_ring_put_irq; + } else { + ring->irq_get = i9xx_ring_get_irq; + ring->irq_put = i9xx_ring_put_irq; + } + ring->irq_enable_mask = I915_USER_INTERRUPT; + ring->write_tail = ring_write_tail; + if (INTEL_INFO(dev)->gen >= 4) + ring->dispatch_execbuffer = i965_dispatch_execbuffer; + else if (IS_I830(dev) || IS_845G(dev)) + ring->dispatch_execbuffer = i830_dispatch_execbuffer; + else + ring->dispatch_execbuffer = i915_dispatch_execbuffer; + ring->init = init_render_ring; + ring->cleanup = render_ring_cleanup; + ring->dev = dev; INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->request_list); INIT_LIST_HEAD(&ring->gpu_write_list); ring->size = size; ring->effective_size = ring->size; if (IS_I830(ring->dev)) ring->effective_size -= 128; - ring->map.offset = start; - ring->map.size = size; - ring->map.type = 0; - ring->map.flags = 0; - ring->map.mtrr = 0; - - drm_core_ioremap_wc(&ring->map, dev); - if (ring->map.virtual == NULL) { + ring->virtual_start = pmap_mapdev_attr(start, size, + VM_MEMATTR_WRITE_COMBINING); + if (ring->virtual_start == NULL) { DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); return -ENOMEM; } - ring->virtual_start = (void *)ring->map.virtual; return 0; } int intel_init_bsd_ring_buffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = &dev_priv->rings[VCS]; - if (IS_GEN6(dev) || IS_GEN7(dev)) - *ring = gen6_bsd_ring; - else - *ring = bsd_ring; + ring->name = "bsd ring"; + ring->id = VCS; + ring->write_tail = ring_write_tail; + if (IS_GEN6(dev) || IS_GEN7(dev)) { + ring->mmio_base = GEN6_BSD_RING_BASE; + /* gen6 bsd needs a special wa for tail updates */ + if (IS_GEN6(dev)) + ring->write_tail = gen6_bsd_ring_write_tail; + ring->flush = gen6_ring_flush; + ring->add_request = gen6_add_request; + ring->get_seqno = gen6_ring_get_seqno; + ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT; + ring->irq_get = gen6_ring_get_irq; + ring->irq_put = gen6_ring_put_irq; + ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + ring->sync_to = gen6_ring_sync; + ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR; + ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID; + ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB; + ring->signal_mbox[0] = GEN6_RVSYNC; + ring->signal_mbox[1] = GEN6_BVSYNC; + } else { + ring->mmio_base = BSD_RING_BASE; + ring->flush = bsd_ring_flush; + ring->add_request = i9xx_add_request; + ring->get_seqno = ring_get_seqno; + if (IS_GEN5(dev)) { + ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; + ring->irq_get = gen5_ring_get_irq; + ring->irq_put = gen5_ring_put_irq; + } else { + ring->irq_enable_mask = I915_BSD_USER_INTERRUPT; + ring->irq_get = i9xx_ring_get_irq; + ring->irq_put = i9xx_ring_put_irq; + } + ring->dispatch_execbuffer = i965_dispatch_execbuffer; + } + ring->init = init_ring_common; + + return intel_init_ring_buffer(dev, ring); } int intel_init_blt_ring_buffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = &dev_priv->rings[BCS]; - *ring = gen6_blt_ring; + ring->name = "blitter ring"; + ring->id = BCS; + + ring->mmio_base = BLT_RING_BASE; + ring->write_tail = ring_write_tail; + ring->flush = blt_ring_flush; + ring->add_request = gen6_add_request; + ring->get_seqno = gen6_ring_get_seqno; + ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT; + ring->irq_get = gen6_ring_get_irq; + ring->irq_put = gen6_ring_put_irq; + ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + ring->sync_to = gen6_ring_sync; + ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR; + ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV; + ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID; + ring->signal_mbox[0] = GEN6_RBSYNC; + ring->signal_mbox[1] = GEN6_VBSYNC; + ring->init = init_ring_common; return intel_init_ring_buffer(dev, ring); } Index: head/sys/dev/drm2/i915/intel_ringbuffer.h =================================================================== --- head/sys/dev/drm2/i915/intel_ringbuffer.h (revision 277486) +++ head/sys/dev/drm2/i915/intel_ringbuffer.h (revision 277487) @@ -1,216 +1,215 @@ /* * $FreeBSD$ */ #ifndef _INTEL_RINGBUFFER_H_ #define _INTEL_RINGBUFFER_H_ struct intel_hw_status_page { - uint32_t *page_addr; + u32 *page_addr; unsigned int gfx_addr; struct drm_i915_gem_object *obj; }; #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) #define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base)) #define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base)) #define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) struct intel_ring_buffer { const char *name; enum intel_ring_id { RCS = 0x0, VCS, BCS, } id; #define I915_NUM_RINGS 3 - uint32_t mmio_base; + u32 mmio_base; void *virtual_start; struct drm_device *dev; struct drm_i915_gem_object *obj; - uint32_t head; - uint32_t tail; + u32 head; + u32 tail; int space; int size; int effective_size; struct intel_hw_status_page status_page; /** We track the position of the requests in the ring buffer, and * when each is retired we increment last_retired_head as the GPU * must have finished processing the request and so we know we * can advance the ringbuffer up to that position. * * last_retired_head is set to -1 after the value is consumed so * we can detect new retirements. */ u32 last_retired_head; - struct mtx irq_lock; - uint32_t irq_refcount; - uint32_t irq_mask; - uint32_t irq_seqno; /* last seq seem at irq time */ - uint32_t trace_irq_seqno; - uint32_t waiting_seqno; - uint32_t sync_seqno[I915_NUM_RINGS-1]; + u32 irq_refcount; + u32 irq_enable_mask; /* bitmask to enable ring interrupt */ + u32 trace_irq_seqno; + u32 sync_seqno[I915_NUM_RINGS-1]; bool (*irq_get)(struct intel_ring_buffer *ring); void (*irq_put)(struct intel_ring_buffer *ring); int (*init)(struct intel_ring_buffer *ring); void (*write_tail)(struct intel_ring_buffer *ring, uint32_t value); int (*flush)(struct intel_ring_buffer *ring, uint32_t invalidate_domains, uint32_t flush_domains); int (*add_request)(struct intel_ring_buffer *ring, uint32_t *seqno); uint32_t (*get_seqno)(struct intel_ring_buffer *ring); int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, uint32_t offset, uint32_t length); void (*cleanup)(struct intel_ring_buffer *ring); int (*sync_to)(struct intel_ring_buffer *ring, struct intel_ring_buffer *to, u32 seqno); u32 semaphore_register[3]; /*our mbox written by others */ u32 signal_mbox[2]; /* mboxes this ring signals to */ /** * List of objects currently involved in rendering from the * ringbuffer. * * Includes buffers having the contents of their GPU caches * flushed, not necessarily primitives. last_rendering_seqno * represents when the rendering involved will be completed. * * A reference is held on the buffer while on this list. */ struct list_head active_list; /** * List of breadcrumbs associated with GPU requests currently * outstanding. */ struct list_head request_list; /** * List of objects currently pending a GPU write flush. * * All elements on this list will belong to either the * active_list or flushing_list, last_rendering_seqno can * be used to differentiate between the two elements. */ struct list_head gpu_write_list; /** * Do we have some not yet emitted requests outstanding? */ - uint32_t outstanding_lazy_request; + u32 outstanding_lazy_request; /** * Do an explicit TLB flush before MI_SET_CONTEXT */ bool itlb_before_ctx_switch; struct i915_hw_context *default_context; struct drm_i915_gem_object *last_context_obj; drm_local_map_t map; void *private; }; static inline bool intel_ring_initialized(struct intel_ring_buffer *ring) { return ring->obj != NULL; } static inline unsigned intel_ring_flag(struct intel_ring_buffer *ring) { return 1 << ring->id; } static inline uint32_t intel_ring_sync_index(struct intel_ring_buffer *ring, struct intel_ring_buffer *other) { int idx; /* * cs -> 0 = vcs, 1 = bcs * vcs -> 0 = bcs, 1 = cs, * bcs -> 0 = cs, 1 = vcs. */ idx = (other - ring) - 1; if (idx < 0) idx += I915_NUM_RINGS; return idx; } static inline uint32_t intel_read_status_page(struct intel_ring_buffer *ring, int reg) { + /* Ensure that the compiler doesn't optimize away the load. */ + __compiler_membar(); return (atomic_load_acq_32(ring->status_page.page_addr + reg)); } void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring) { return (intel_wait_ring_buffer(ring, ring->size - 8)); } int intel_ring_begin(struct intel_ring_buffer *ring, int n); static inline void intel_ring_emit(struct intel_ring_buffer *ring, uint32_t data) { *(volatile uint32_t *)((char *)ring->virtual_start + ring->tail) = data; ring->tail += 4; } void intel_ring_advance(struct intel_ring_buffer *ring); uint32_t intel_ring_get_seqno(struct intel_ring_buffer *ring); int intel_init_render_ring_buffer(struct drm_device *dev); int intel_init_bsd_ring_buffer(struct drm_device *dev); int intel_init_blt_ring_buffer(struct drm_device *dev); u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); void intel_ring_setup_status_page(struct intel_ring_buffer *ring); static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring) { return ring->tail; } void i915_trace_irq_get(struct intel_ring_buffer *ring, uint32_t seqno); /* DRI warts */ int intel_render_ring_init_dri(struct drm_device *dev, uint64_t start, uint32_t size); #endif /* _INTEL_RINGBUFFER_H_ */ Index: head/sys/dev/drm2/i915/intel_sdvo.c =================================================================== --- head/sys/dev/drm2/i915/intel_sdvo.c (revision 277486) +++ head/sys/dev/drm2/i915/intel_sdvo.c (revision 277487) @@ -1,2680 +1,2690 @@ /* * Copyright 2006 Dave Airlie * Copyright © 2006-2007 Intel Corporation * Jesse Barnes * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include "iicbus_if.h" #define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1) #define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1) #define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1) -#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0) +#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_YPRPB0) #define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\ SDVO_TV_MASK) #define IS_TV(c) (c->output_flag & SDVO_TV_MASK) #define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK) #define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK)) #define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK)) static const char *tv_format_names[] = { "NTSC_M" , "NTSC_J" , "NTSC_443", "PAL_B" , "PAL_D" , "PAL_G" , "PAL_H" , "PAL_I" , "PAL_M" , "PAL_N" , "PAL_NC" , "PAL_60" , "SECAM_B" , "SECAM_D" , "SECAM_G" , "SECAM_K" , "SECAM_K1", "SECAM_L" , "SECAM_60" }; #define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names)) struct intel_sdvo { struct intel_encoder base; device_t i2c; u8 slave_addr; device_t ddc_iic_bus, ddc; /* Register for the SDVO device: SDVOB or SDVOC */ - int sdvo_reg; + uint32_t sdvo_reg; /* Active outputs controlled by this SDVO output */ uint16_t controlled_output; /* * Capabilities of the SDVO device returned by * i830_sdvo_get_capabilities() */ struct intel_sdvo_caps caps; /* Pixel clock limitations reported by the SDVO device, in kHz */ int pixel_clock_min, pixel_clock_max; /* * For multiple function SDVO device, * this is for current attached outputs. */ uint16_t attached_output; /* * Hotplug activation bits for this device */ uint8_t hotplug_active[2]; /** * This is used to select the color range of RBG outputs in HDMI mode. * It is only valid when using TMDS encoding and 8 bit per color mode. */ uint32_t color_range; /** * This is set if we're going to treat the device as TV-out. * * While we have these nice friendly flags for output types that ought * to decide this for us, the S-Video output on our HDMI+S-Video card * shows up as RGB1 (VGA). */ bool is_tv; + /* On different gens SDVOB is at different places. */ + bool is_sdvob; + /* This is for current tv format name */ int tv_format_index; /** * This is set if we treat the device as HDMI, instead of DVI. */ bool is_hdmi; bool has_hdmi_monitor; bool has_hdmi_audio; /** * This is set if we detect output of sdvo device as LVDS and * have a valid fixed mode to use with the panel. */ bool is_lvds; /** * This is sdvo fixed pannel mode pointer */ struct drm_display_mode *sdvo_lvds_fixed_mode; /* DDC bus used by this SDVO encoder */ uint8_t ddc_bus; /* Input timings for adjusted_mode */ struct intel_sdvo_dtd input_dtd; }; struct intel_sdvo_connector { struct intel_connector base; /* Mark the type of connector */ uint16_t output_flag; enum hdmi_force_audio force_audio; /* This contains all current supported TV format */ u8 tv_format_supported[TV_FORMAT_NUM]; int format_supported_num; struct drm_property *tv_format; /* add the property for the SDVO-TV */ struct drm_property *left; struct drm_property *right; struct drm_property *top; struct drm_property *bottom; struct drm_property *hpos; struct drm_property *vpos; struct drm_property *contrast; struct drm_property *saturation; struct drm_property *hue; struct drm_property *sharpness; struct drm_property *flicker_filter; struct drm_property *flicker_filter_adaptive; struct drm_property *flicker_filter_2d; struct drm_property *tv_chroma_filter; struct drm_property *tv_luma_filter; struct drm_property *dot_crawl; /* add the property for the SDVO-TV/LVDS */ struct drm_property *brightness; /* Add variable to record current setting for the above property */ u32 left_margin, right_margin, top_margin, bottom_margin; /* this is to get the range of margin.*/ u32 max_hscan, max_vscan; u32 max_hpos, cur_hpos; u32 max_vpos, cur_vpos; u32 cur_brightness, max_brightness; u32 cur_contrast, max_contrast; u32 cur_saturation, max_saturation; u32 cur_hue, max_hue; u32 cur_sharpness, max_sharpness; u32 cur_flicker_filter, max_flicker_filter; u32 cur_flicker_filter_adaptive, max_flicker_filter_adaptive; u32 cur_flicker_filter_2d, max_flicker_filter_2d; u32 cur_tv_chroma_filter, max_tv_chroma_filter; u32 cur_tv_luma_filter, max_tv_luma_filter; u32 cur_dot_crawl, max_dot_crawl; }; static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder) { return container_of(encoder, struct intel_sdvo, base.base); } static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector) { return container_of(intel_attached_encoder(connector), struct intel_sdvo, base); } static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector) { return container_of(to_intel_connector(connector), struct intel_sdvo_connector, base); } static bool intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags); static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, int type); static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector); /** * Writes the SDVOB or SDVOC with the given value, but always writes both * SDVOB and SDVOC to work around apparent hardware issues (according to * comments in the BIOS). */ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) { struct drm_device *dev = intel_sdvo->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 bval = val, cval = val; int i; if (intel_sdvo->sdvo_reg == PCH_SDVOB) { I915_WRITE(intel_sdvo->sdvo_reg, val); I915_READ(intel_sdvo->sdvo_reg); return; } if (intel_sdvo->sdvo_reg == SDVOB) { cval = I915_READ(SDVOC); } else { bval = I915_READ(SDVOB); } /* * Write the registers twice for luck. Sometimes, * writing them only once doesn't appear to 'stick'. * The BIOS does this too. Yay, magic */ for (i = 0; i < 2; i++) { I915_WRITE(SDVOB, bval); I915_READ(SDVOB); I915_WRITE(SDVOC, cval); I915_READ(SDVOC); } } static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch) { struct iic_msg msgs[] = { { .slave = intel_sdvo->slave_addr << 1, .flags = 0, .len = 1, .buf = &addr, }, { .slave = intel_sdvo->slave_addr << 1, .flags = IIC_M_RD, .len = 1, .buf = ch, } }; int ret; if ((ret = iicbus_transfer(intel_sdvo->i2c, msgs, 2)) == 0) return true; DRM_DEBUG_KMS("i2c transfer returned %d\n", ret); return false; } #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} /** Mapping of command numbers to names, for debug output */ static const struct _sdvo_cmd_name { u8 cmd; const char *name; } sdvo_cmd_names[] = { SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS), /* Add the op code for SDVO enhancements */ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER), /* HDMI op code */ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), }; -#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB) -#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC") +#define SDVO_NAME(svdo) ((svdo)->is_sdvob ? "SDVOB" : "SDVOC") -static void -intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, - const void *args, int args_len) +static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, + const void *args, int args_len) { int i; if ((drm_debug_flag & DRM_DEBUGBITS_KMS) == 0) return; DRM_DEBUG_KMS("%s: W: %02X ", SDVO_NAME(intel_sdvo), cmd); for (i = 0; i < args_len; i++) printf("%02X ", ((const u8 *)args)[i]); for (; i < 8; i++) printf(" "); for (i = 0; i < DRM_ARRAY_SIZE(sdvo_cmd_names); i++) { if (cmd == sdvo_cmd_names[i].cmd) { printf("(%s)", sdvo_cmd_names[i].name); break; } } if (i == DRM_ARRAY_SIZE(sdvo_cmd_names)) printf("(%02X)", cmd); printf("\n"); } static const char *cmd_status_names[] = { "Power on", "Success", "Not supported", "Invalid arg", "Pending", "Target not specified", "Scaling not supported" }; static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, const void *args, int args_len) { u8 buf[args_len*2 + 2], status; struct iic_msg msgs[args_len + 3]; int i, ret; intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len); for (i = 0; i < args_len; i++) { msgs[i].slave = intel_sdvo->slave_addr << 1; msgs[i].flags = 0; msgs[i].len = 2; msgs[i].buf = buf + 2 *i; buf[2*i + 0] = SDVO_I2C_ARG_0 - i; buf[2*i + 1] = ((const u8*)args)[i]; } msgs[i].slave = intel_sdvo->slave_addr << 1; msgs[i].flags = 0; msgs[i].len = 2; msgs[i].buf = buf + 2*i; buf[2*i + 0] = SDVO_I2C_OPCODE; buf[2*i + 1] = cmd; /* the following two are to read the response */ status = SDVO_I2C_CMD_STATUS; msgs[i+1].slave = intel_sdvo->slave_addr << 1; msgs[i+1].flags = 0; msgs[i+1].len = 1; msgs[i+1].buf = &status; msgs[i+2].slave = intel_sdvo->slave_addr << 1; msgs[i+2].flags = IIC_M_RD; msgs[i+2].len = 1; msgs[i+2].buf = &status; ret = iicbus_transfer(intel_sdvo->i2c, msgs, i+3); if (ret != 0) { DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); return (false); } #if 0 if (ret != i+3) { /* failure in I2C transfer */ DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3); return false; } #endif return true; } static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, void *response, int response_len) { u8 retry = 5; u8 status; int i; DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo)); /* * The documentation states that all commands will be * processed within 15µs, and that we need only poll * the status byte a maximum of 3 times in order for the * command to be complete. * * Check 5 times in case the hardware failed to read the docs. */ if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS, &status)) goto log_fail; while (status == SDVO_CMD_STATUS_PENDING && retry--) { DELAY(15); if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS, &status)) goto log_fail; } if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0) { if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) printf("(%s)", cmd_status_names[status]); else printf("(??? %d)", status); } if (status != SDVO_CMD_STATUS_SUCCESS) goto log_fail; /* Read the command response */ for (i = 0; i < response_len; i++) { if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_RETURN_0 + i, &((u8 *)response)[i])) goto log_fail; if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0) printf(" %02X", ((u8 *)response)[i]); } if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0) printf("\n"); return (true); log_fail: if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0) printf("... failed\n"); return (false); } static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) { if (mode->clock >= 100000) return 1; else if (mode->clock >= 50000) return 2; else return 4; } static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, u8 ddc_bus) { /* This must be the immediately preceding write before the i2c xfer */ return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &ddc_bus, 1); } static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len) { if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len)) return false; return intel_sdvo_read_response(intel_sdvo, NULL, 0); } static bool intel_sdvo_get_value(struct intel_sdvo *intel_sdvo, u8 cmd, void *value, int len) { if (!intel_sdvo_write_cmd(intel_sdvo, cmd, NULL, 0)) return false; return intel_sdvo_read_response(intel_sdvo, value, len); } static bool intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo) { struct intel_sdvo_set_target_input_args targets = {0}; return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_TARGET_INPUT, &targets, sizeof(targets)); } /** * Return whether each input is trained. * * This function is making an assumption about the layout of the response, * which should be checked against the docs. */ static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *input_1, bool *input_2) { struct intel_sdvo_get_trained_inputs_response response; CTASSERT(sizeof(response) == 1); if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS, &response, sizeof(response))) return false; *input_1 = response.input0_trained; *input_2 = response.input1_trained; return true; } static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo, u16 outputs) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, sizeof(outputs)); } static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo, int mode) { u8 state = SDVO_ENCODER_STATE_ON; switch (mode) { case DRM_MODE_DPMS_ON: state = SDVO_ENCODER_STATE_ON; break; case DRM_MODE_DPMS_STANDBY: state = SDVO_ENCODER_STATE_STANDBY; break; case DRM_MODE_DPMS_SUSPEND: state = SDVO_ENCODER_STATE_SUSPEND; break; case DRM_MODE_DPMS_OFF: state = SDVO_ENCODER_STATE_OFF; break; } return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state)); } static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo, int *clock_min, int *clock_max) { struct intel_sdvo_pixel_clock_range clocks; CTASSERT(sizeof(clocks) == 4); if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, &clocks, sizeof(clocks))) return false; /* Convert the values from units of 10 kHz to kHz. */ *clock_min = clocks.min * 10; *clock_max = clocks.max * 10; return true; } static bool intel_sdvo_set_target_output(struct intel_sdvo *intel_sdvo, u16 outputs) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, sizeof(outputs)); } static bool intel_sdvo_set_timing(struct intel_sdvo *intel_sdvo, u8 cmd, struct intel_sdvo_dtd *dtd) { return intel_sdvo_set_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) && intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2)); } static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo, struct intel_sdvo_dtd *dtd) { return intel_sdvo_set_timing(intel_sdvo, SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); } static bool intel_sdvo_set_output_timing(struct intel_sdvo *intel_sdvo, struct intel_sdvo_dtd *dtd) { return intel_sdvo_set_timing(intel_sdvo, SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); } static bool intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo, uint16_t clock, uint16_t width, uint16_t height) { struct intel_sdvo_preferred_input_timing_args args; memset(&args, 0, sizeof(args)); args.clock = clock; args.width = width; args.height = height; args.interlace = 0; if (intel_sdvo->is_lvds && (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width || intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height)) args.scaled = 1; return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, &args, sizeof(args)); } static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo, struct intel_sdvo_dtd *dtd) { CTASSERT(sizeof(dtd->part1) == 8); CTASSERT(sizeof(dtd->part2) == 8); return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, &dtd->part1, sizeof(dtd->part1)) && intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, &dtd->part2, sizeof(dtd->part2)); } static bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo, u8 val) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); } static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, const struct drm_display_mode *mode) { uint16_t width, height; uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len; uint16_t h_sync_offset, v_sync_offset; int mode_clock; - width = mode->crtc_hdisplay; - height = mode->crtc_vdisplay; + width = mode->hdisplay; + height = mode->vdisplay; - /* do some mode translations */ - h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start; - h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; + /* do some mode translations */ + h_blank_len = mode->htotal - mode->hdisplay; + h_sync_len = mode->hsync_end - mode->hsync_start; - v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start; - v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start; + v_blank_len = mode->vtotal - mode->vdisplay; + v_sync_len = mode->vsync_end - mode->vsync_start; - h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; - v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; + h_sync_offset = mode->hsync_start - mode->hdisplay; + v_sync_offset = mode->vsync_start - mode->vdisplay; mode_clock = mode->clock; mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1; mode_clock /= 10; dtd->part1.clock = mode_clock; dtd->part1.h_active = width & 0xff; dtd->part1.h_blank = h_blank_len & 0xff; dtd->part1.h_high = (((width >> 8) & 0xf) << 4) | ((h_blank_len >> 8) & 0xf); dtd->part1.v_active = height & 0xff; dtd->part1.v_blank = v_blank_len & 0xff; dtd->part1.v_high = (((height >> 8) & 0xf) << 4) | ((v_blank_len >> 8) & 0xf); dtd->part2.h_sync_off = h_sync_offset & 0xff; dtd->part2.h_sync_width = h_sync_len & 0xff; dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 | (v_sync_len & 0xf); dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) | ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) | ((v_sync_len & 0x30) >> 4); dtd->part2.dtd_flags = 0x18; if (mode->flags & DRM_MODE_FLAG_PHSYNC) dtd->part2.dtd_flags |= 0x2; if (mode->flags & DRM_MODE_FLAG_PVSYNC) dtd->part2.dtd_flags |= 0x4; dtd->part2.sdvo_flags = 0; dtd->part2.v_sync_off_high = v_sync_offset & 0xc0; dtd->part2.reserved = 0; } static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, const struct intel_sdvo_dtd *dtd) { mode->hdisplay = dtd->part1.h_active; mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8; mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off; mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2; mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width; mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4; mode->htotal = mode->hdisplay + dtd->part1.h_blank; mode->htotal += (dtd->part1.h_high & 0xf) << 8; mode->vdisplay = dtd->part1.v_active; mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8; mode->vsync_start = mode->vdisplay; mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf; mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2; mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0; mode->vsync_end = mode->vsync_start + (dtd->part2.v_sync_off_width & 0xf); mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4; mode->vtotal = mode->vdisplay + dtd->part1.v_blank; mode->vtotal += (dtd->part1.v_high & 0xf) << 8; mode->clock = dtd->part1.clock * 10; mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC); if (dtd->part2.dtd_flags & 0x2) mode->flags |= DRM_MODE_FLAG_PHSYNC; if (dtd->part2.dtd_flags & 0x4) mode->flags |= DRM_MODE_FLAG_PVSYNC; } static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo) { struct intel_sdvo_encode encode; CTASSERT(sizeof(encode) == 2); return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_SUPP_ENCODE, &encode, sizeof(encode)); } static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo, uint8_t mode) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1); } static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo, uint8_t mode) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1); } #if 0 static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) { int i, j; uint8_t set_buf_index[2]; uint8_t av_split; uint8_t buf_size; uint8_t buf[48]; uint8_t *pos; intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1); for (i = 0; i <= av_split; i++) { set_buf_index[0] = i; set_buf_index[1] = 0; intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2); intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0); intel_sdvo_read_response(encoder, &buf_size, 1); pos = buf; for (j = 0; j <= buf_size; j += 8) { intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA, NULL, 0); intel_sdvo_read_response(encoder, pos, 8); pos += 8; } } } #endif static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) { struct dip_infoframe avi_if = { .type = DIP_TYPE_AVI, .ver = DIP_VERSION_AVI, .len = DIP_LEN_AVI, }; uint8_t tx_rate = SDVO_HBUF_TX_VSYNC; uint8_t set_buf_index[2] = { 1, 0 }; - uint64_t *data = (uint64_t *)&avi_if; + uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)]; + uint64_t *data = (uint64_t *)sdvo_data; unsigned i; intel_dip_infoframe_csum(&avi_if); + /* sdvo spec says that the ecc is handled by the hw, and it looks like + * we must not send the ecc field, either. */ + memcpy(sdvo_data, &avi_if, 3); + sdvo_data[3] = avi_if.checksum; + memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi)); + if (!intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2)) return false; - for (i = 0; i < sizeof(avi_if); i += 8) { + for (i = 0; i < sizeof(sdvo_data); i += 8) { if (!intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, data, 8)) return false; data++; } return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); } static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo) { struct intel_sdvo_tv_format format; uint32_t format_map; format_map = 1 << intel_sdvo->tv_format_index; memset(&format, 0, sizeof(format)); memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map))); CTASSERT(sizeof(format) == 6); return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_TV_FORMAT, &format, sizeof(format)); } static bool intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo, const struct drm_display_mode *mode) { struct intel_sdvo_dtd output_dtd; if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output)) return false; intel_sdvo_get_dtd_from_mode(&output_dtd, mode); if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd)) return false; return true; } static bool intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { /* Reset the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) return false; if (!intel_sdvo_create_preferred_input_timing(intel_sdvo, mode->clock / 10, mode->hdisplay, mode->vdisplay)) return false; if (!intel_sdvo_get_preferred_input_timing(intel_sdvo, &intel_sdvo->input_dtd)) return false; intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd); return true; } static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, + struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); int multiplier; /* We need to construct preferred input timings based on our * output timings. To do that, we have to set the output * timings, even though this isn't really the right place in * the sequence to do it. Oh well. */ if (intel_sdvo->is_tv) { if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode)) return false; (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode); } else if (intel_sdvo->is_lvds) { if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, intel_sdvo->sdvo_lvds_fixed_mode)) return false; (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode); } /* Make the CRTC code factor in the SDVO pixel multiplier. The * SDVO device will factor out the multiplier during mode_set. */ multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode); intel_mode_set_pixel_multiplier(adjusted_mode, multiplier); return true; } static void intel_sdvo_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); u32 sdvox; struct intel_sdvo_in_out_map in_out; struct intel_sdvo_dtd input_dtd, output_dtd; int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); int rate; if (!mode) return; /* First, set the input mapping for the first input to our controlled * output. This is only correct if we're a single-input device, in * which case the first input is the output from the appropriate SDVO * channel on the motherboard. In a two-input device, the first input * will be SDVOB and the second SDVOC. */ in_out.in0 = intel_sdvo->attached_output; in_out.in1 = 0; intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_IN_OUT_MAP, &in_out, sizeof(in_out)); /* Set the output timings to the screen */ if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output)) return; /* lvds has a special fixed output timing. */ if (intel_sdvo->is_lvds) intel_sdvo_get_dtd_from_mode(&output_dtd, intel_sdvo->sdvo_lvds_fixed_mode); else intel_sdvo_get_dtd_from_mode(&output_dtd, mode); (void) intel_sdvo_set_output_timing(intel_sdvo, &output_dtd); /* Set the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) return; if (intel_sdvo->has_hdmi_monitor) { intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); intel_sdvo_set_colorimetry(intel_sdvo, SDVO_COLORIMETRY_RGB256); intel_sdvo_set_avi_infoframe(intel_sdvo); } else intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI); if (intel_sdvo->is_tv && !intel_sdvo_set_tv_format(intel_sdvo)) return; /* We have tried to get input timing in mode_fixup, and filled into * adjusted_mode. */ intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd); switch (pixel_multiplier) { default: case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break; } if (!intel_sdvo_set_clock_rate_mult(intel_sdvo, rate)) return; /* Set the SDVO control regs. */ if (INTEL_INFO(dev)->gen >= 4) { /* The real mode polarity is set by the SDVO commands, using * struct intel_sdvo_dtd. */ sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH; if (intel_sdvo->is_hdmi) sdvox |= intel_sdvo->color_range; if (INTEL_INFO(dev)->gen < 5) sdvox |= SDVO_BORDER_ENABLE; } else { sdvox = I915_READ(intel_sdvo->sdvo_reg); switch (intel_sdvo->sdvo_reg) { case SDVOB: sdvox &= SDVOB_PRESERVE_MASK; break; case SDVOC: sdvox &= SDVOC_PRESERVE_MASK; break; } sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; } if (INTEL_PCH_TYPE(dev) >= PCH_CPT) sdvox |= TRANSCODER_CPT(intel_crtc->pipe); else sdvox |= TRANSCODER(intel_crtc->pipe); if (intel_sdvo->has_hdmi_audio) sdvox |= SDVO_AUDIO_ENABLE; if (INTEL_INFO(dev)->gen >= 4) { /* done in crtc_mode_set as the dpll_md reg must be written early */ } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { /* done in crtc_mode_set as it lives inside the dpll register */ } else { sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT; } if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL && INTEL_INFO(dev)->gen < 5) sdvox |= SDVO_STALL_SELECT; intel_sdvo_write_sdvox(intel_sdvo, sdvox); } static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); u32 temp; if (mode != DRM_MODE_DPMS_ON) { intel_sdvo_set_active_outputs(intel_sdvo, 0); if (0) intel_sdvo_set_encoder_power_state(intel_sdvo, mode); if (mode == DRM_MODE_DPMS_OFF) { temp = I915_READ(intel_sdvo->sdvo_reg); if ((temp & SDVO_ENABLE) != 0) { intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE); } } } else { bool input1, input2; int i; u8 status; temp = I915_READ(intel_sdvo->sdvo_reg); if ((temp & SDVO_ENABLE) == 0) intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE); for (i = 0; i < 2; i++) intel_wait_for_vblank(dev, intel_crtc->pipe); status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2); /* Warn if the device reported failure to sync. * A lot of SDVO devices fail to notify of sync, but it's * a given it the status is a success, we succeeded. */ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) { DRM_DEBUG_KMS("First %s output reported failure to " "sync\n", SDVO_NAME(intel_sdvo)); } if (0) intel_sdvo_set_encoder_power_state(intel_sdvo, mode); intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output); } return; } static int intel_sdvo_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; if (intel_sdvo->pixel_clock_min > mode->clock) return MODE_CLOCK_LOW; if (intel_sdvo->pixel_clock_max < mode->clock) return MODE_CLOCK_HIGH; if (intel_sdvo->is_lvds) { if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay) return MODE_PANEL; if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay) return MODE_PANEL; } return MODE_OK; } static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps) { CTASSERT(sizeof(*caps) == 8); if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps))) return false; DRM_DEBUG_KMS("SDVO capabilities:\n" " vendor_id: %d\n" " device_id: %d\n" " device_rev_id: %d\n" " sdvo_version_major: %d\n" " sdvo_version_minor: %d\n" " sdvo_inputs_mask: %d\n" " smooth_scaling: %d\n" " sharp_scaling: %d\n" " up_scaling: %d\n" " down_scaling: %d\n" " stall_support: %d\n" " output_flags: %d\n", caps->vendor_id, caps->device_id, caps->device_rev_id, caps->sdvo_version_major, caps->sdvo_version_minor, caps->sdvo_inputs_mask, caps->smooth_scaling, caps->sharp_scaling, caps->up_scaling, caps->down_scaling, caps->stall_support, caps->output_flags); return true; } static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo) { struct drm_device *dev = intel_sdvo->base.base.dev; u8 response[2]; /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise * on the line. */ if (IS_I945G(dev) || IS_I945GM(dev)) return false; return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, &response, 2) && response[0]; } static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder) { struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2); } static bool intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) { /* Is there more than one type of output? */ return bitcount16(intel_sdvo->caps.output_flags) > 1; } static struct edid * intel_sdvo_get_edid(struct drm_connector *connector) { struct intel_sdvo *sdvo = intel_attached_sdvo(connector); return drm_get_edid(connector, sdvo->ddc); } /* Mac mini hack -- use the same DDC as the analog connector */ static struct edid * intel_sdvo_get_analog_edid(struct drm_connector *connector) { struct drm_i915_private *dev_priv = connector->dev->dev_private; return drm_get_edid(connector, - dev_priv->gmbus[dev_priv->crt_ddc_pin]); + intel_gmbus_get_adapter(dev_priv, + dev_priv->crt_ddc_pin)); } static enum drm_connector_status intel_sdvo_tmds_sink_detect(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); enum drm_connector_status status; struct edid *edid; edid = intel_sdvo_get_edid(connector); if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) { u8 ddc, saved_ddc = intel_sdvo->ddc_bus; /* * Don't use the 1 as the argument of DDC bus switch to get * the EDID. It is used for SDVO SPD ROM. */ for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) { intel_sdvo->ddc_bus = ddc; edid = intel_sdvo_get_edid(connector); if (edid) break; } /* * If we found the EDID on the other bus, * assume that is the correct DDC bus. */ if (edid == NULL) intel_sdvo->ddc_bus = saved_ddc; } /* * When there is no edid and no monitor is connected with VGA * port, try to use the CRT ddc to read the EDID for DVI-connector. */ if (edid == NULL) edid = intel_sdvo_get_analog_edid(connector); status = connector_status_unknown; if (edid != NULL) { /* DDC bus is shared, match EDID to connector type */ if (edid->input & DRM_EDID_INPUT_DIGITAL) { status = connector_status_connected; if (intel_sdvo->is_hdmi) { intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); } } else status = connector_status_disconnected; connector->display_info.raw_edid = NULL; free(edid, DRM_MEM_KMS); } if (status == connector_status_connected) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); if (intel_sdvo_connector->force_audio != HDMI_AUDIO_AUTO) intel_sdvo->has_hdmi_audio = (intel_sdvo_connector->force_audio == HDMI_AUDIO_ON); } return status; } static bool intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo, struct edid *edid) { bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL); bool connector_is_digital = !!IS_DIGITAL(sdvo); DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n", connector_is_digital, monitor_is_digital); return connector_is_digital == monitor_is_digital; } static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector, bool force) { uint16_t response; struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); enum drm_connector_status ret; if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) return connector_status_unknown; /* add 30ms delay when the output type might be TV */ - if (intel_sdvo->caps.output_flags & - (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0)) + if (intel_sdvo->caps.output_flags & SDVO_TV_MASK) drm_msleep(30, "915svo"); if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) return connector_status_unknown; DRM_DEBUG_KMS("SDVO response %d %d [%x]\n", response & 0xff, response >> 8, intel_sdvo_connector->output_flag); if (response == 0) return connector_status_disconnected; intel_sdvo->attached_output = response; intel_sdvo->has_hdmi_monitor = false; intel_sdvo->has_hdmi_audio = false; if ((intel_sdvo_connector->output_flag & response) == 0) ret = connector_status_disconnected; else if (IS_TMDS(intel_sdvo_connector)) ret = intel_sdvo_tmds_sink_detect(connector); else { struct edid *edid; /* if we have an edid check it matches the connection */ edid = intel_sdvo_get_edid(connector); if (edid == NULL) edid = intel_sdvo_get_analog_edid(connector); if (edid != NULL) { if (intel_sdvo_connector_matches_edid(intel_sdvo_connector, edid)) ret = connector_status_connected; else ret = connector_status_disconnected; connector->display_info.raw_edid = NULL; free(edid, DRM_MEM_KMS); } else ret = connector_status_connected; } /* May update encoder flag for like clock for SDVO TV, etc.*/ if (ret == connector_status_connected) { intel_sdvo->is_tv = false; intel_sdvo->is_lvds = false; intel_sdvo->base.needs_tv_clock = false; if (response & SDVO_TV_MASK) { intel_sdvo->is_tv = true; intel_sdvo->base.needs_tv_clock = true; } if (response & SDVO_LVDS_MASK) intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL; } return ret; } static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) { struct edid *edid; /* set the bus switch and get the modes */ edid = intel_sdvo_get_edid(connector); /* * Mac mini hack. On this device, the DVI-I connector shares one DDC * link between analog and digital outputs. So, if the regular SDVO * DDC fails, check to see if the analog output is disconnected, in * which case we'll look there for the digital DDC data. */ if (edid == NULL) edid = intel_sdvo_get_analog_edid(connector); if (edid != NULL) { if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector), edid)) { drm_mode_connector_update_edid_property(connector, edid); drm_add_edid_modes(connector, edid); } connector->display_info.raw_edid = NULL; free(edid, DRM_MEM_KMS); } } /* * Set of SDVO TV modes. * Note! This is in reply order (see loop in get_tv_modes). * XXX: all 60Hz refresh? */ static const struct drm_display_mode sdvo_tv_modes[] = { { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384, 416, 0, 200, 201, 232, 233, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814, 320, 321, 384, 416, 0, 240, 241, 272, 273, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910, 400, 401, 464, 496, 0, 300, 301, 332, 333, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913, 640, 641, 704, 736, 0, 350, 351, 382, 383, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121, 640, 641, 704, 736, 0, 400, 401, 432, 433, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 22654, 640, 641, 704, 736, 0, 480, 481, 512, 513, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624, 704, 705, 768, 800, 0, 480, 481, 512, 513, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232, 704, 705, 768, 800, 0, 576, 577, 608, 609, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751, 720, 721, 784, 816, 0, 350, 351, 382, 383, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199, 720, 721, 784, 816, 0, 400, 401, 432, 433, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116, 720, 721, 784, 816, 0, 480, 481, 512, 513, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054, 720, 721, 784, 816, 0, 540, 541, 572, 573, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816, 720, 721, 784, 816, 0, 576, 577, 608, 609, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570, 768, 769, 832, 864, 0, 576, 577, 608, 609, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030, 800, 801, 864, 896, 0, 600, 601, 632, 633, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581, 832, 833, 896, 928, 0, 624, 625, 656, 657, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707, 920, 921, 984, 1016, 0, 766, 767, 798, 799, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827, 1024, 1025, 1088, 1120, 0, 768, 769, 800, 801, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265, 1280, 1281, 1344, 1376, 0, 1024, 1025, 1056, 1057, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, }; static void intel_sdvo_get_tv_modes(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct intel_sdvo_sdtv_resolution_request tv_res; uint32_t reply = 0, format_map = 0; int i; /* Read the list of supported input resolutions for the selected TV * format. */ format_map = 1 << intel_sdvo->tv_format_index; memcpy(&tv_res, &format_map, min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request))); if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output)) return; CTASSERT(sizeof(tv_res) == 3); if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, &tv_res, sizeof(tv_res))) return; if (!intel_sdvo_read_response(intel_sdvo, &reply, 3)) return; for (i = 0; i < DRM_ARRAY_SIZE(sdvo_tv_modes); i++) if (reply & (1 << i)) { struct drm_display_mode *nmode; nmode = drm_mode_duplicate(connector->dev, &sdvo_tv_modes[i]); if (nmode) drm_mode_probed_add(connector, nmode); } } static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct drm_i915_private *dev_priv = connector->dev->dev_private; struct drm_display_mode *newmode; /* * Attempt to get the mode list from DDC. * Assume that the preferred modes are * arranged in priority order. */ intel_ddc_get_modes(connector, intel_sdvo->i2c); if (!list_empty(&connector->probed_modes)) goto end; /* Fetch modes from VBT */ if (dev_priv->sdvo_lvds_vbt_mode != NULL) { newmode = drm_mode_duplicate(connector->dev, dev_priv->sdvo_lvds_vbt_mode); if (newmode != NULL) { /* Guarantee the mode is preferred */ newmode->type = (DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER); drm_mode_probed_add(connector, newmode); } } end: list_for_each_entry(newmode, &connector->probed_modes, head) { if (newmode->type & DRM_MODE_TYPE_PREFERRED) { intel_sdvo->sdvo_lvds_fixed_mode = drm_mode_duplicate(connector->dev, newmode); - drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, - 0); - intel_sdvo->is_lvds = true; break; } } } static int intel_sdvo_get_modes(struct drm_connector *connector) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); if (IS_TV(intel_sdvo_connector)) intel_sdvo_get_tv_modes(connector); else if (IS_LVDS(intel_sdvo_connector)) intel_sdvo_get_lvds_modes(connector); else intel_sdvo_get_ddc_modes(connector); return !list_empty(&connector->probed_modes); } static void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); struct drm_device *dev = connector->dev; if (intel_sdvo_connector->left) drm_property_destroy(dev, intel_sdvo_connector->left); if (intel_sdvo_connector->right) drm_property_destroy(dev, intel_sdvo_connector->right); if (intel_sdvo_connector->top) drm_property_destroy(dev, intel_sdvo_connector->top); if (intel_sdvo_connector->bottom) drm_property_destroy(dev, intel_sdvo_connector->bottom); if (intel_sdvo_connector->hpos) drm_property_destroy(dev, intel_sdvo_connector->hpos); if (intel_sdvo_connector->vpos) drm_property_destroy(dev, intel_sdvo_connector->vpos); if (intel_sdvo_connector->saturation) drm_property_destroy(dev, intel_sdvo_connector->saturation); if (intel_sdvo_connector->contrast) drm_property_destroy(dev, intel_sdvo_connector->contrast); if (intel_sdvo_connector->hue) drm_property_destroy(dev, intel_sdvo_connector->hue); if (intel_sdvo_connector->sharpness) drm_property_destroy(dev, intel_sdvo_connector->sharpness); if (intel_sdvo_connector->flicker_filter) drm_property_destroy(dev, intel_sdvo_connector->flicker_filter); if (intel_sdvo_connector->flicker_filter_2d) drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_2d); if (intel_sdvo_connector->flicker_filter_adaptive) drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_adaptive); if (intel_sdvo_connector->tv_luma_filter) drm_property_destroy(dev, intel_sdvo_connector->tv_luma_filter); if (intel_sdvo_connector->tv_chroma_filter) drm_property_destroy(dev, intel_sdvo_connector->tv_chroma_filter); if (intel_sdvo_connector->dot_crawl) drm_property_destroy(dev, intel_sdvo_connector->dot_crawl); if (intel_sdvo_connector->brightness) drm_property_destroy(dev, intel_sdvo_connector->brightness); } static void intel_sdvo_destroy(struct drm_connector *connector) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); if (intel_sdvo_connector->tv_format) drm_property_destroy(connector->dev, intel_sdvo_connector->tv_format); intel_sdvo_destroy_enhance_property(connector); #if 0 drm_sysfs_connector_remove(connector); #endif drm_connector_cleanup(connector); free(connector, DRM_MEM_KMS); } static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct edid *edid; bool has_audio = false; if (!intel_sdvo->is_hdmi) return false; edid = intel_sdvo_get_edid(connector); if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL) has_audio = drm_detect_monitor_audio(edid); return has_audio; } static int intel_sdvo_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t val) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); struct drm_i915_private *dev_priv = connector->dev->dev_private; uint16_t temp_value; uint8_t cmd; int ret; ret = drm_connector_property_set_value(connector, property, val); if (ret) return ret; if (property == dev_priv->force_audio_property) { int i = val; bool has_audio; if (i == intel_sdvo_connector->force_audio) return 0; intel_sdvo_connector->force_audio = i; if (i == HDMI_AUDIO_AUTO) has_audio = intel_sdvo_detect_hdmi_audio(connector); else has_audio = (i == HDMI_AUDIO_ON); if (has_audio == intel_sdvo->has_hdmi_audio) return 0; intel_sdvo->has_hdmi_audio = has_audio; goto done; } if (property == dev_priv->broadcast_rgb_property) { if (val == !!intel_sdvo->color_range) return 0; intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0; goto done; } #define CHECK_PROPERTY(name, NAME) \ if (intel_sdvo_connector->name == property) { \ if (intel_sdvo_connector->cur_##name == temp_value) return 0; \ if (intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \ cmd = SDVO_CMD_SET_##NAME; \ intel_sdvo_connector->cur_##name = temp_value; \ goto set_value; \ } if (property == intel_sdvo_connector->tv_format) { if (val >= TV_FORMAT_NUM) return -EINVAL; if (intel_sdvo->tv_format_index == intel_sdvo_connector->tv_format_supported[val]) return 0; intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[val]; goto done; } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) { temp_value = val; if (intel_sdvo_connector->left == property) { drm_connector_property_set_value(connector, intel_sdvo_connector->right, val); if (intel_sdvo_connector->left_margin == temp_value) return 0; intel_sdvo_connector->left_margin = temp_value; intel_sdvo_connector->right_margin = temp_value; temp_value = intel_sdvo_connector->max_hscan - intel_sdvo_connector->left_margin; cmd = SDVO_CMD_SET_OVERSCAN_H; goto set_value; } else if (intel_sdvo_connector->right == property) { drm_connector_property_set_value(connector, intel_sdvo_connector->left, val); if (intel_sdvo_connector->right_margin == temp_value) return 0; intel_sdvo_connector->left_margin = temp_value; intel_sdvo_connector->right_margin = temp_value; temp_value = intel_sdvo_connector->max_hscan - intel_sdvo_connector->left_margin; cmd = SDVO_CMD_SET_OVERSCAN_H; goto set_value; } else if (intel_sdvo_connector->top == property) { drm_connector_property_set_value(connector, intel_sdvo_connector->bottom, val); if (intel_sdvo_connector->top_margin == temp_value) return 0; intel_sdvo_connector->top_margin = temp_value; intel_sdvo_connector->bottom_margin = temp_value; temp_value = intel_sdvo_connector->max_vscan - intel_sdvo_connector->top_margin; cmd = SDVO_CMD_SET_OVERSCAN_V; goto set_value; } else if (intel_sdvo_connector->bottom == property) { drm_connector_property_set_value(connector, intel_sdvo_connector->top, val); if (intel_sdvo_connector->bottom_margin == temp_value) return 0; intel_sdvo_connector->top_margin = temp_value; intel_sdvo_connector->bottom_margin = temp_value; temp_value = intel_sdvo_connector->max_vscan - intel_sdvo_connector->top_margin; cmd = SDVO_CMD_SET_OVERSCAN_V; goto set_value; } CHECK_PROPERTY(hpos, HPOS) CHECK_PROPERTY(vpos, VPOS) CHECK_PROPERTY(saturation, SATURATION) CHECK_PROPERTY(contrast, CONTRAST) CHECK_PROPERTY(hue, HUE) CHECK_PROPERTY(brightness, BRIGHTNESS) CHECK_PROPERTY(sharpness, SHARPNESS) CHECK_PROPERTY(flicker_filter, FLICKER_FILTER) CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D) CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE) CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER) CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER) CHECK_PROPERTY(dot_crawl, DOT_CRAWL) } return -EINVAL; /* unknown property */ set_value: if (!intel_sdvo_set_value(intel_sdvo, cmd, &temp_value, 2)) return -EIO; done: if (intel_sdvo->base.base.crtc) { struct drm_crtc *crtc = intel_sdvo->base.base.crtc; drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); } return 0; #undef CHECK_PROPERTY } static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = { .dpms = intel_sdvo_dpms, .mode_fixup = intel_sdvo_mode_fixup, .prepare = intel_encoder_prepare, .mode_set = intel_sdvo_mode_set, .commit = intel_encoder_commit, }; static const struct drm_connector_funcs intel_sdvo_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = intel_sdvo_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_sdvo_set_property, .destroy = intel_sdvo_destroy, }; static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { .get_modes = intel_sdvo_get_modes, .mode_valid = intel_sdvo_mode_valid, .best_encoder = intel_best_encoder, }; static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) { struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); if (intel_sdvo->sdvo_lvds_fixed_mode != NULL) drm_mode_destroy(encoder->dev, intel_sdvo->sdvo_lvds_fixed_mode); device_delete_child(intel_sdvo->base.base.dev->device, intel_sdvo->ddc_iic_bus); intel_encoder_destroy(encoder); } static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { .destroy = intel_sdvo_enc_destroy, }; static void intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo) { uint16_t mask = 0; unsigned int num_bits; /* Make a mask of outputs less than or equal to our own priority in the * list. */ switch (sdvo->controlled_output) { case SDVO_OUTPUT_LVDS1: mask |= SDVO_OUTPUT_LVDS1; case SDVO_OUTPUT_LVDS0: mask |= SDVO_OUTPUT_LVDS0; case SDVO_OUTPUT_TMDS1: mask |= SDVO_OUTPUT_TMDS1; case SDVO_OUTPUT_TMDS0: mask |= SDVO_OUTPUT_TMDS0; case SDVO_OUTPUT_RGB1: mask |= SDVO_OUTPUT_RGB1; case SDVO_OUTPUT_RGB0: mask |= SDVO_OUTPUT_RGB0; break; } /* Count bits to find what number we are in the priority list. */ mask &= sdvo->caps.output_flags; num_bits = bitcount16(mask); /* If more than 3 outputs, default to DDC bus 3 for now. */ if (num_bits > 3) num_bits = 3; /* Corresponds to SDVO_CONTROL_BUS_DDCx */ sdvo->ddc_bus = 1 << num_bits; } /** * Choose the appropriate DDC bus for control bus switch command for this * SDVO output based on the controlled output. * * DDC bus number assignment is in a priority order of RGB outputs, then TMDS * outputs, then LVDS outputs. */ static void intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, struct intel_sdvo *sdvo, u32 reg) { struct sdvo_device_mapping *mapping; - if (IS_SDVOB(reg)) + if (sdvo->is_sdvob) mapping = &(dev_priv->sdvo_mappings[0]); else mapping = &(dev_priv->sdvo_mappings[1]); if (mapping->initialized) sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4); else intel_sdvo_guess_ddc_bus(sdvo); } static void intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, struct intel_sdvo *sdvo, u32 reg) { struct sdvo_device_mapping *mapping; u8 pin; - if (IS_SDVOB(reg)) + if (sdvo->is_sdvob) mapping = &dev_priv->sdvo_mappings[0]; else mapping = &dev_priv->sdvo_mappings[1]; pin = GMBUS_PORT_DPB; if (mapping->initialized) pin = mapping->i2c_pin; - if (pin < GMBUS_NUM_PORTS) { - sdvo->i2c = dev_priv->gmbus[pin]; + if (intel_gmbus_is_port_valid(pin)) { + sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin); intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ); intel_gmbus_force_bit(sdvo->i2c, true); } else { - sdvo->i2c = dev_priv->gmbus[GMBUS_PORT_DPB]; + sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB); } } static bool intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device) { return intel_sdvo_check_supp_encode(intel_sdvo); } static u8 -intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) +intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo) { struct drm_i915_private *dev_priv = dev->dev_private; struct sdvo_device_mapping *my_mapping, *other_mapping; - if (IS_SDVOB(sdvo_reg)) { + if (sdvo->is_sdvob) { my_mapping = &dev_priv->sdvo_mappings[0]; other_mapping = &dev_priv->sdvo_mappings[1]; } else { my_mapping = &dev_priv->sdvo_mappings[1]; other_mapping = &dev_priv->sdvo_mappings[0]; } /* If the BIOS described our SDVO device, take advantage of it. */ if (my_mapping->slave_addr) return my_mapping->slave_addr; /* If the BIOS only described a different SDVO device, use the * address that it isn't using. */ if (other_mapping->slave_addr) { if (other_mapping->slave_addr == 0x70) return 0x72; else return 0x70; } /* No SDVO device info is found for another DVO port, * so use mapping assumption we had before BIOS parsing. */ - if (IS_SDVOB(sdvo_reg)) + if (sdvo->is_sdvob) return 0x70; else return 0x72; } static void intel_sdvo_connector_init(struct intel_sdvo_connector *connector, struct intel_sdvo *encoder) { drm_connector_init(encoder->base.base.dev, &connector->base.base, &intel_sdvo_connector_funcs, connector->base.base.connector_type); drm_connector_helper_add(&connector->base.base, &intel_sdvo_connector_helper_funcs); connector->base.base.interlace_allowed = 1; connector->base.base.doublescan_allowed = 0; connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; intel_connector_attach_encoder(&connector->base, &encoder->base); #if 0 drm_sysfs_connector_add(&connector->base.base); #endif } static void intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector) { struct drm_device *dev = connector->base.base.dev; intel_attach_force_audio_property(&connector->base.base); if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) intel_attach_broadcast_rgb_property(&connector->base.base); } static bool intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) { struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_connector *connector; struct intel_encoder *intel_encoder = to_intel_encoder(encoder); struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; intel_sdvo_connector = malloc(sizeof(struct intel_sdvo_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO); if (device == 0) { intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0; intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0; } else if (device == 1) { intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1; intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1; } intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) { connector->polled = DRM_CONNECTOR_POLL_HPD; intel_sdvo->hotplug_active[0] |= 1 << device; /* Some SDVO devices have one-shot hotplug interrupts. * Ensure that they get re-enabled when an interrupt happens. */ intel_encoder->hot_plug = intel_sdvo_enable_hotplug; intel_sdvo_enable_hotplug(intel_encoder); } else connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; encoder->encoder_type = DRM_MODE_ENCODER_TMDS; connector->connector_type = DRM_MODE_CONNECTOR_DVID; if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; intel_sdvo->is_hdmi = true; } intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT)); intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (intel_sdvo->is_hdmi) intel_sdvo_add_hdmi_properties(intel_sdvo_connector); return true; } static bool intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) { struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_connector *connector; struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; intel_sdvo_connector = malloc(sizeof(struct intel_sdvo_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO); if (!intel_sdvo_connector) return false; intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; intel_sdvo->controlled_output |= type; intel_sdvo_connector->output_flag = type; intel_sdvo->is_tv = true; intel_sdvo->base.needs_tv_clock = true; intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type)) goto err; if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) goto err; return true; err: intel_sdvo_destroy(connector); return false; } static bool intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) { struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_connector *connector; struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; intel_sdvo_connector = malloc(sizeof(struct intel_sdvo_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO); intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; connector->polled = DRM_CONNECTOR_POLL_CONNECT; encoder->encoder_type = DRM_MODE_ENCODER_DAC; connector->connector_type = DRM_MODE_CONNECTOR_VGA; if (device == 0) { intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0; intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; } else if (device == 1) { intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1; intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; } intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT)); intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); return true; } static bool intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) { struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_connector *connector; struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; intel_sdvo_connector = malloc(sizeof(struct intel_sdvo_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO); intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; encoder->encoder_type = DRM_MODE_ENCODER_LVDS; connector->connector_type = DRM_MODE_CONNECTOR_LVDS; if (device == 0) { intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0; intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; } else if (device == 1) { intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1; intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; } intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) | (1 << INTEL_SDVO_LVDS_CLONE_BIT)); intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) goto err; return true; err: intel_sdvo_destroy(connector); return false; } static bool intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags) { intel_sdvo->is_tv = false; intel_sdvo->base.needs_tv_clock = false; intel_sdvo->is_lvds = false; /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ if (flags & SDVO_OUTPUT_TMDS0) if (!intel_sdvo_dvi_init(intel_sdvo, 0)) return false; if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK) if (!intel_sdvo_dvi_init(intel_sdvo, 1)) return false; /* TV has no XXX1 function block */ if (flags & SDVO_OUTPUT_SVID0) if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_SVID0)) return false; if (flags & SDVO_OUTPUT_CVBS0) if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0)) return false; + if (flags & SDVO_OUTPUT_YPRPB0) + if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_YPRPB0)) + return false; + if (flags & SDVO_OUTPUT_RGB0) if (!intel_sdvo_analog_init(intel_sdvo, 0)) return false; if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK) if (!intel_sdvo_analog_init(intel_sdvo, 1)) return false; if (flags & SDVO_OUTPUT_LVDS0) if (!intel_sdvo_lvds_init(intel_sdvo, 0)) return false; if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK) if (!intel_sdvo_lvds_init(intel_sdvo, 1)) return false; if ((flags & SDVO_OUTPUT_MASK) == 0) { unsigned char bytes[2]; intel_sdvo->controlled_output = 0; memcpy(bytes, &intel_sdvo->caps.output_flags, 2); DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n", SDVO_NAME(intel_sdvo), bytes[0], bytes[1]); return false; } intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); return true; } static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, int type) { struct drm_device *dev = intel_sdvo->base.base.dev; struct intel_sdvo_tv_format format; uint32_t format_map, i; if (!intel_sdvo_set_target_output(intel_sdvo, type)) return false; CTASSERT(sizeof(format) == 6); if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_SUPPORTED_TV_FORMATS, &format, sizeof(format))) return false; memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format))); if (format_map == 0) return false; intel_sdvo_connector->format_supported_num = 0; for (i = 0 ; i < TV_FORMAT_NUM; i++) if (format_map & (1 << i)) intel_sdvo_connector->tv_format_supported[intel_sdvo_connector->format_supported_num++] = i; intel_sdvo_connector->tv_format = drm_property_create(dev, DRM_MODE_PROP_ENUM, "mode", intel_sdvo_connector->format_supported_num); if (!intel_sdvo_connector->tv_format) return false; for (i = 0; i < intel_sdvo_connector->format_supported_num; i++) drm_property_add_enum( intel_sdvo_connector->tv_format, i, i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]); intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0]; drm_connector_attach_property(&intel_sdvo_connector->base.base, intel_sdvo_connector->tv_format, 0); return true; } #define ENHANCEMENT(name, NAME) do { \ if (enhancements.name) { \ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \ !intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \ return false; \ intel_sdvo_connector->max_##name = data_value[0]; \ intel_sdvo_connector->cur_##name = response; \ intel_sdvo_connector->name = \ drm_property_create_range(dev, 0, #name, 0, data_value[0]); \ if (!intel_sdvo_connector->name) return false; \ drm_connector_attach_property(connector, \ intel_sdvo_connector->name, \ intel_sdvo_connector->cur_##name); \ DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \ data_value[0], data_value[1], response); \ } \ } while (0) static bool intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, struct intel_sdvo_enhancements_reply enhancements) { struct drm_device *dev = intel_sdvo->base.base.dev; struct drm_connector *connector = &intel_sdvo_connector->base.base; uint16_t response, data_value[2]; /* when horizontal overscan is supported, Add the left/right property */ if (enhancements.overscan_h) { if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_OVERSCAN_H, &data_value, 4)) return false; if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_OVERSCAN_H, &response, 2)) return false; intel_sdvo_connector->max_hscan = data_value[0]; intel_sdvo_connector->left_margin = data_value[0] - response; intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin; intel_sdvo_connector->left = drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]); if (!intel_sdvo_connector->left) return false; drm_connector_attach_property(connector, intel_sdvo_connector->left, intel_sdvo_connector->left_margin); intel_sdvo_connector->right = drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]); if (!intel_sdvo_connector->right) return false; drm_connector_attach_property(connector, intel_sdvo_connector->right, intel_sdvo_connector->right_margin); DRM_DEBUG_KMS("h_overscan: max %d, " "default %d, current %d\n", data_value[0], data_value[1], response); } if (enhancements.overscan_v) { if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_OVERSCAN_V, &data_value, 4)) return false; if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_OVERSCAN_V, &response, 2)) return false; intel_sdvo_connector->max_vscan = data_value[0]; intel_sdvo_connector->top_margin = data_value[0] - response; intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin; intel_sdvo_connector->top = drm_property_create_range(dev, 0, "top_margin", 0, data_value[0]); if (!intel_sdvo_connector->top) return false; drm_connector_attach_property(connector, intel_sdvo_connector->top, intel_sdvo_connector->top_margin); intel_sdvo_connector->bottom = drm_property_create_range(dev, 0, "bottom_margin", 0, data_value[0]); if (!intel_sdvo_connector->bottom) return false; drm_connector_attach_property(connector, intel_sdvo_connector->bottom, intel_sdvo_connector->bottom_margin); DRM_DEBUG_KMS("v_overscan: max %d, " "default %d, current %d\n", data_value[0], data_value[1], response); } ENHANCEMENT(hpos, HPOS); ENHANCEMENT(vpos, VPOS); ENHANCEMENT(saturation, SATURATION); ENHANCEMENT(contrast, CONTRAST); ENHANCEMENT(hue, HUE); ENHANCEMENT(sharpness, SHARPNESS); ENHANCEMENT(brightness, BRIGHTNESS); ENHANCEMENT(flicker_filter, FLICKER_FILTER); ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE); ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D); ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER); ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER); if (enhancements.dot_crawl) { if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2)) return false; intel_sdvo_connector->max_dot_crawl = 1; intel_sdvo_connector->cur_dot_crawl = response & 0x1; intel_sdvo_connector->dot_crawl = drm_property_create_range(dev, 0, "dot_crawl", 0, 1); if (!intel_sdvo_connector->dot_crawl) return false; drm_connector_attach_property(connector, intel_sdvo_connector->dot_crawl, intel_sdvo_connector->cur_dot_crawl); DRM_DEBUG_KMS("dot crawl: current %d\n", response); } return true; } static bool intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, struct intel_sdvo_enhancements_reply enhancements) { struct drm_device *dev = intel_sdvo->base.base.dev; struct drm_connector *connector = &intel_sdvo_connector->base.base; uint16_t response, data_value[2]; ENHANCEMENT(brightness, BRIGHTNESS); return true; } #undef ENHANCEMENT static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector) { union { struct intel_sdvo_enhancements_reply reply; uint16_t response; } enhancements; CTASSERT(sizeof(enhancements) == 2); enhancements.response = 0; intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, &enhancements, sizeof(enhancements)); if (enhancements.response == 0) { DRM_DEBUG_KMS("No enhancement is supported\n"); return true; } if (IS_TV(intel_sdvo_connector)) return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply); else if (IS_LVDS(intel_sdvo_connector)) return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply); else return true; } struct intel_sdvo_ddc_proxy_sc { struct intel_sdvo *intel_sdvo; device_t port; }; static int intel_sdvo_ddc_proxy_probe(device_t idev) { return (BUS_PROBE_DEFAULT); } static int intel_sdvo_ddc_proxy_attach(device_t idev) { struct intel_sdvo_ddc_proxy_sc *sc; sc = device_get_softc(idev); sc->port = device_add_child(idev, "iicbus", -1); if (sc->port == NULL) return (ENXIO); device_quiet(sc->port); bus_generic_attach(idev); return (0); } static int intel_sdvo_ddc_proxy_detach(device_t idev) { struct intel_sdvo_ddc_proxy_sc *sc; device_t port; sc = device_get_softc(idev); port = sc->port; bus_generic_detach(idev); if (port != NULL) device_delete_child(idev, port); return (0); } static int intel_sdvo_ddc_proxy_reset(device_t idev, u_char speed, u_char addr, u_char *oldaddr) { struct intel_sdvo_ddc_proxy_sc *sc; struct intel_sdvo *sdvo; sc = device_get_softc(idev); sdvo = sc->intel_sdvo; return (IICBUS_RESET(device_get_parent(sdvo->i2c), speed, addr, oldaddr)); } static int intel_sdvo_ddc_proxy_transfer(device_t idev, struct iic_msg *msgs, uint32_t num) { struct intel_sdvo_ddc_proxy_sc *sc; struct intel_sdvo *sdvo; sc = device_get_softc(idev); sdvo = sc->intel_sdvo; if (!intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus)) return (EIO); return (iicbus_transfer(sdvo->i2c, msgs, num)); } static bool intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo, struct drm_device *dev, int sdvo_reg) { struct intel_sdvo_ddc_proxy_sc *sc; int ret; sdvo->ddc_iic_bus = device_add_child(dev->device, "intel_sdvo_ddc_proxy", sdvo_reg); if (sdvo->ddc_iic_bus == NULL) { DRM_ERROR("cannot create ddc proxy bus %d\n", sdvo_reg); return (false); } device_quiet(sdvo->ddc_iic_bus); ret = device_probe_and_attach(sdvo->ddc_iic_bus); if (ret != 0) { DRM_ERROR("cannot attach proxy bus %d error %d\n", sdvo_reg, ret); device_delete_child(dev->device, sdvo->ddc_iic_bus); return (false); } sc = device_get_softc(sdvo->ddc_iic_bus); sc->intel_sdvo = sdvo; sdvo->ddc = sc->port; return (true); } static device_method_t intel_sdvo_ddc_proxy_methods[] = { DEVMETHOD(device_probe, intel_sdvo_ddc_proxy_probe), DEVMETHOD(device_attach, intel_sdvo_ddc_proxy_attach), DEVMETHOD(device_detach, intel_sdvo_ddc_proxy_detach), DEVMETHOD(iicbus_reset, intel_sdvo_ddc_proxy_reset), DEVMETHOD(iicbus_transfer, intel_sdvo_ddc_proxy_transfer), DEVMETHOD_END }; static driver_t intel_sdvo_ddc_proxy_driver = { "intel_sdvo_ddc_proxy", intel_sdvo_ddc_proxy_methods, sizeof(struct intel_sdvo_ddc_proxy_sc) }; static devclass_t intel_sdvo_devclass; DRIVER_MODULE_ORDERED(intel_sdvo_ddc_proxy, drmn, intel_sdvo_ddc_proxy_driver, intel_sdvo_devclass, 0, 0, SI_ORDER_FIRST); -bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) +bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *intel_encoder; struct intel_sdvo *intel_sdvo; int i; intel_sdvo = malloc(sizeof(struct intel_sdvo), DRM_MEM_KMS, M_WAITOK | M_ZERO); intel_sdvo->sdvo_reg = sdvo_reg; - intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1; + intel_sdvo->is_sdvob = is_sdvob; + intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1; intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg); if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev, sdvo_reg)) { free(intel_sdvo, DRM_MEM_KMS); return false; } /* encoder type will be decided later */ intel_encoder = &intel_sdvo->base; intel_encoder->type = INTEL_OUTPUT_SDVO; drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0); /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { u8 byte; if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) { - DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", - IS_SDVOB(sdvo_reg) ? 'B' : 'C'); + DRM_DEBUG_KMS("No SDVO device found on %s\n", + SDVO_NAME(intel_sdvo)); goto err; } } - if (IS_SDVOB(sdvo_reg)) + if (intel_sdvo->is_sdvob) dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; else dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs); /* In default case sdvo lvds is false */ if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) goto err; /* Set up hotplug command - note paranoia about contents of reply. * We assume that the hardware is in a sane state, and only touch * the bits we think we understand. */ intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2); intel_sdvo->hotplug_active[0] &= ~0x3; - if (!intel_sdvo_output_setup(intel_sdvo, - intel_sdvo->caps.output_flags)) { - DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", - IS_SDVOB(sdvo_reg) ? 'B' : 'C'); - goto err; - } + if (intel_sdvo_output_setup(intel_sdvo, + intel_sdvo->caps.output_flags) != true) { + DRM_DEBUG_KMS("SDVO output failed to setup on %s\n", + SDVO_NAME(intel_sdvo)); + goto err; + } intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); /* Set the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) goto err; if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo, &intel_sdvo->pixel_clock_min, &intel_sdvo->pixel_clock_max)) goto err; DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, " "clock range %dMHz - %dMHz, " "input 1: %c, input 2: %c, " "output 1: %c, output 2: %c\n", SDVO_NAME(intel_sdvo), intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id, intel_sdvo->caps.device_rev_id, intel_sdvo->pixel_clock_min / 1000, intel_sdvo->pixel_clock_max / 1000, (intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N', (intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N', /* check currently supported outputs */ intel_sdvo->caps.output_flags & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N', intel_sdvo->caps.output_flags & (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); return true; err: drm_encoder_cleanup(&intel_encoder->base); free(intel_sdvo, DRM_MEM_KMS); return false; } Index: head/sys/dev/drm2/i915/intel_sprite.c =================================================================== --- head/sys/dev/drm2/i915/intel_sprite.c (revision 277486) +++ head/sys/dev/drm2/i915/intel_sprite.c (revision 277487) @@ -1,669 +1,705 @@ /* * Copyright © 2011 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Jesse Barnes * * New plane/sprite handling. * * The older chips had a separate interface for programming plane related * registers; newer ones are much simpler and we can use the new DRM plane * support. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include static void ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t x, uint32_t y, uint32_t src_w, uint32_t src_h) { struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane *intel_plane = to_intel_plane(plane); int pipe = intel_plane->pipe; u32 sprctl, sprscale = 0; int pixel_size; sprctl = I915_READ(SPRCTL(pipe)); /* Mask out pixel format bits in case we change it */ sprctl &= ~SPRITE_PIXFORMAT_MASK; sprctl &= ~SPRITE_RGB_ORDER_RGBX; sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK; switch (fb->pixel_format) { case DRM_FORMAT_XBGR8888: sprctl |= SPRITE_FORMAT_RGBX888; pixel_size = 4; break; case DRM_FORMAT_XRGB8888: sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX; pixel_size = 4; break; case DRM_FORMAT_YUYV: sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV; pixel_size = 2; break; case DRM_FORMAT_YVYU: sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU; pixel_size = 2; break; case DRM_FORMAT_UYVY: sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY; pixel_size = 2; break; case DRM_FORMAT_VYUY: sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY; pixel_size = 2; break; default: DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n"); sprctl |= DVS_FORMAT_RGBX888; pixel_size = 4; break; } if (obj->tiling_mode != I915_TILING_NONE) sprctl |= SPRITE_TILED; /* must disable */ sprctl |= SPRITE_TRICKLE_FEED_DISABLE; sprctl |= SPRITE_ENABLE; /* Sizes are 0 based */ src_w--; src_h--; crtc_w--; crtc_h--; intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size); /* * IVB workaround: must disable low power watermarks for at least * one frame before enabling scaling. LP watermarks can be re-enabled * when scaling is disabled. */ if (crtc_w != src_w || crtc_h != src_h) { - dev_priv->sprite_scaling_enabled = true; - sandybridge_update_wm(dev); - intel_wait_for_vblank(dev, pipe); + if (!dev_priv->sprite_scaling_enabled) { + dev_priv->sprite_scaling_enabled = true; + intel_update_watermarks(dev); + intel_wait_for_vblank(dev, pipe); + } sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; } else { - dev_priv->sprite_scaling_enabled = false; - /* potentially re-enable LP watermarks */ - sandybridge_update_wm(dev); + if (dev_priv->sprite_scaling_enabled) { + dev_priv->sprite_scaling_enabled = false; + /* potentially re-enable LP watermarks */ + intel_update_watermarks(dev); + } } I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); if (obj->tiling_mode != I915_TILING_NONE) { I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x); } else { unsigned long offset; offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); I915_WRITE(SPRLINOFF(pipe), offset); } I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w); I915_WRITE(SPRSCALE(pipe), sprscale); I915_WRITE(SPRCTL(pipe), sprctl); - I915_WRITE(SPRSURF(pipe), obj->gtt_offset); + I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset); POSTING_READ(SPRSURF(pipe)); } static void ivb_disable_plane(struct drm_plane *plane) { struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane *intel_plane = to_intel_plane(plane); int pipe = intel_plane->pipe; I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE); /* Can't leave the scaler enabled... */ I915_WRITE(SPRSCALE(pipe), 0); /* Activate double buffered register update */ - I915_WRITE(SPRSURF(pipe), 0); + I915_MODIFY_DISPBASE(SPRSURF(pipe), 0); POSTING_READ(SPRSURF(pipe)); + + dev_priv->sprite_scaling_enabled = false; + intel_update_watermarks(dev); } static int ivb_update_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key) { struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane *intel_plane; u32 sprctl; int ret = 0; intel_plane = to_intel_plane(plane); I915_WRITE(SPRKEYVAL(intel_plane->pipe), key->min_value); I915_WRITE(SPRKEYMAX(intel_plane->pipe), key->max_value); I915_WRITE(SPRKEYMSK(intel_plane->pipe), key->channel_mask); sprctl = I915_READ(SPRCTL(intel_plane->pipe)); sprctl &= ~(SPRITE_SOURCE_KEY | SPRITE_DEST_KEY); if (key->flags & I915_SET_COLORKEY_DESTINATION) sprctl |= SPRITE_DEST_KEY; else if (key->flags & I915_SET_COLORKEY_SOURCE) sprctl |= SPRITE_SOURCE_KEY; I915_WRITE(SPRCTL(intel_plane->pipe), sprctl); POSTING_READ(SPRKEYMSK(intel_plane->pipe)); return ret; } static void ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key) { struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane *intel_plane; u32 sprctl; intel_plane = to_intel_plane(plane); key->min_value = I915_READ(SPRKEYVAL(intel_plane->pipe)); key->max_value = I915_READ(SPRKEYMAX(intel_plane->pipe)); key->channel_mask = I915_READ(SPRKEYMSK(intel_plane->pipe)); key->flags = 0; sprctl = I915_READ(SPRCTL(intel_plane->pipe)); if (sprctl & SPRITE_DEST_KEY) key->flags = I915_SET_COLORKEY_DESTINATION; else if (sprctl & SPRITE_SOURCE_KEY) key->flags = I915_SET_COLORKEY_SOURCE; else key->flags = I915_SET_COLORKEY_NONE; } static void -snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, +ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t x, uint32_t y, uint32_t src_w, uint32_t src_h) { struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane *intel_plane = to_intel_plane(plane); int pipe = intel_plane->pipe, pixel_size; - u32 dvscntr, dvsscale = 0; + u32 dvscntr, dvsscale; dvscntr = I915_READ(DVSCNTR(pipe)); /* Mask out pixel format bits in case we change it */ dvscntr &= ~DVS_PIXFORMAT_MASK; dvscntr &= ~DVS_RGB_ORDER_XBGR; dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK; switch (fb->pixel_format) { case DRM_FORMAT_XBGR8888: dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR; pixel_size = 4; break; case DRM_FORMAT_XRGB8888: dvscntr |= DVS_FORMAT_RGBX888; pixel_size = 4; break; case DRM_FORMAT_YUYV: dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV; pixel_size = 2; break; case DRM_FORMAT_YVYU: dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU; pixel_size = 2; break; case DRM_FORMAT_UYVY: dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY; pixel_size = 2; break; case DRM_FORMAT_VYUY: dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY; pixel_size = 2; break; default: DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n"); dvscntr |= DVS_FORMAT_RGBX888; pixel_size = 4; break; } if (obj->tiling_mode != I915_TILING_NONE) dvscntr |= DVS_TILED; - /* must disable */ - dvscntr |= DVS_TRICKLE_FEED_DISABLE; + if (IS_GEN6(dev)) + dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */ dvscntr |= DVS_ENABLE; /* Sizes are 0 based */ src_w--; src_h--; crtc_w--; crtc_h--; intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size); - if (crtc_w != src_w || crtc_h != src_h) + dvsscale = 0; + if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h) dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); if (obj->tiling_mode != I915_TILING_NONE) { I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x); } else { unsigned long offset; offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); I915_WRITE(DVSLINOFF(pipe), offset); } I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); I915_WRITE(DVSSCALE(pipe), dvsscale); I915_WRITE(DVSCNTR(pipe), dvscntr); - I915_WRITE(DVSSURF(pipe), obj->gtt_offset); + I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset); POSTING_READ(DVSSURF(pipe)); } static void -snb_disable_plane(struct drm_plane *plane) +ilk_disable_plane(struct drm_plane *plane) { struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane *intel_plane = to_intel_plane(plane); int pipe = intel_plane->pipe; I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE); /* Disable the scaler */ I915_WRITE(DVSSCALE(pipe), 0); /* Flush double buffered register updates */ - I915_WRITE(DVSSURF(pipe), 0); + I915_MODIFY_DISPBASE(DVSSURF(pipe), 0); POSTING_READ(DVSSURF(pipe)); } static void intel_enable_primary(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int reg = DSPCNTR(intel_crtc->plane); I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE); } static void intel_disable_primary(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int reg = DSPCNTR(intel_crtc->plane); I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE); } static int -snb_update_colorkey(struct drm_plane *plane, +ilk_update_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key) { struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane *intel_plane; u32 dvscntr; int ret = 0; intel_plane = to_intel_plane(plane); I915_WRITE(DVSKEYVAL(intel_plane->pipe), key->min_value); I915_WRITE(DVSKEYMAX(intel_plane->pipe), key->max_value); I915_WRITE(DVSKEYMSK(intel_plane->pipe), key->channel_mask); dvscntr = I915_READ(DVSCNTR(intel_plane->pipe)); dvscntr &= ~(DVS_SOURCE_KEY | DVS_DEST_KEY); if (key->flags & I915_SET_COLORKEY_DESTINATION) dvscntr |= DVS_DEST_KEY; else if (key->flags & I915_SET_COLORKEY_SOURCE) dvscntr |= DVS_SOURCE_KEY; I915_WRITE(DVSCNTR(intel_plane->pipe), dvscntr); POSTING_READ(DVSKEYMSK(intel_plane->pipe)); return ret; } static void -snb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key) +ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key) { struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane *intel_plane; u32 dvscntr; intel_plane = to_intel_plane(plane); key->min_value = I915_READ(DVSKEYVAL(intel_plane->pipe)); key->max_value = I915_READ(DVSKEYMAX(intel_plane->pipe)); key->channel_mask = I915_READ(DVSKEYMSK(intel_plane->pipe)); key->flags = 0; dvscntr = I915_READ(DVSCNTR(intel_plane->pipe)); if (dvscntr & DVS_DEST_KEY) key->flags = I915_SET_COLORKEY_DESTINATION; else if (dvscntr & DVS_SOURCE_KEY) key->flags = I915_SET_COLORKEY_SOURCE; else key->flags = I915_SET_COLORKEY_NONE; } static int intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) { struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_plane *intel_plane = to_intel_plane(plane); struct intel_framebuffer *intel_fb; struct drm_i915_gem_object *obj, *old_obj; int pipe = intel_plane->pipe; int ret = 0; int x = src_x >> 16, y = src_y >> 16; int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay; bool disable_primary = false; intel_fb = to_intel_framebuffer(fb); obj = intel_fb->obj; old_obj = intel_plane->obj; src_w = src_w >> 16; src_h = src_h >> 16; /* Pipe must be running... */ if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE)) return -EINVAL; if (crtc_x >= primary_w || crtc_y >= primary_h) return -EINVAL; /* Don't modify another pipe's plane */ if (intel_plane->pipe != intel_crtc->pipe) return -EINVAL; /* * Clamp the width & height into the visible area. Note we don't * try to scale the source if part of the visible region is offscreen. * The caller must handle that by adjusting source offset and size. */ if ((crtc_x < 0) && ((crtc_x + crtc_w) > 0)) { crtc_w += crtc_x; crtc_x = 0; } if ((crtc_x + crtc_w) <= 0) /* Nothing to display */ goto out; if ((crtc_x + crtc_w) > primary_w) crtc_w = primary_w - crtc_x; if ((crtc_y < 0) && ((crtc_y + crtc_h) > 0)) { crtc_h += crtc_y; crtc_y = 0; } if ((crtc_y + crtc_h) <= 0) /* Nothing to display */ goto out; if (crtc_y + crtc_h > primary_h) crtc_h = primary_h - crtc_y; if (!crtc_w || !crtc_h) /* Again, nothing to display */ goto out; /* * We can take a larger source and scale it down, but * only so much... 16x is the max on SNB. */ if (((src_w * src_h) / (crtc_w * crtc_h)) > intel_plane->max_downscale) return -EINVAL; /* * If the sprite is completely covering the primary plane, * we can disable the primary and save power. */ if ((crtc_x == 0) && (crtc_y == 0) && (crtc_w == primary_w) && (crtc_h == primary_h)) disable_primary = true; DRM_LOCK(dev); ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); if (ret) goto out_unlock; intel_plane->obj = obj; /* * Be sure to re-enable the primary before the sprite is no longer * covering it fully. */ if (!disable_primary && intel_plane->primary_disabled) { intel_enable_primary(crtc); intel_plane->primary_disabled = false; } intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y, crtc_w, crtc_h, x, y, src_w, src_h); if (disable_primary) { intel_disable_primary(crtc); intel_plane->primary_disabled = true; } /* Unpin old obj after new one is active to avoid ugliness */ if (old_obj) { /* * It's fairly common to simply update the position of * an existing object. In that case, we don't need to * wait for vblank to avoid ugliness, we only need to * do the pin & ref bookkeeping. */ if (old_obj != obj) { DRM_UNLOCK(dev); intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); DRM_LOCK(dev); } intel_unpin_fb_obj(old_obj); } out_unlock: DRM_UNLOCK(dev); out: return ret; } static int intel_disable_plane(struct drm_plane *plane) { struct drm_device *dev = plane->dev; struct intel_plane *intel_plane = to_intel_plane(plane); int ret = 0; if (intel_plane->primary_disabled) { intel_enable_primary(plane->crtc); intel_plane->primary_disabled = false; } intel_plane->disable_plane(plane); if (!intel_plane->obj) goto out; DRM_LOCK(dev); intel_unpin_fb_obj(intel_plane->obj); intel_plane->obj = NULL; DRM_UNLOCK(dev); out: return ret; } static void intel_destroy_plane(struct drm_plane *plane) { struct intel_plane *intel_plane = to_intel_plane(plane); intel_disable_plane(plane); drm_plane_cleanup(plane); free(intel_plane, DRM_MEM_KMS); } int intel_sprite_set_colorkey(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_intel_sprite_colorkey *set = data; - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_mode_object *obj; struct drm_plane *plane; struct intel_plane *intel_plane; int ret = 0; - if (!dev_priv) - return -EINVAL; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; /* Make sure we don't try to enable both src & dest simultaneously */ if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) return -EINVAL; sx_xlock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE); if (!obj) { ret = -EINVAL; goto out_unlock; } plane = obj_to_plane(obj); intel_plane = to_intel_plane(plane); ret = intel_plane->update_colorkey(plane, set); out_unlock: sx_xunlock(&dev->mode_config.mutex); return ret; } int intel_sprite_get_colorkey(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_intel_sprite_colorkey *get = data; - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_mode_object *obj; struct drm_plane *plane; struct intel_plane *intel_plane; int ret = 0; - if (!dev_priv) - return -EINVAL; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; sx_xlock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE); if (!obj) { ret = -EINVAL; goto out_unlock; } plane = obj_to_plane(obj); intel_plane = to_intel_plane(plane); intel_plane->get_colorkey(plane, get); out_unlock: sx_xunlock(&dev->mode_config.mutex); return ret; } static const struct drm_plane_funcs intel_plane_funcs = { .update_plane = intel_update_plane, .disable_plane = intel_disable_plane, .destroy = intel_destroy_plane, }; +static uint32_t ilk_plane_formats[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, +}; + static uint32_t snb_plane_formats[] = { DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, }; int intel_plane_init(struct drm_device *dev, enum pipe pipe) { struct intel_plane *intel_plane; unsigned long possible_crtcs; + const uint32_t *plane_formats; + int num_plane_formats; int ret; - if (!(IS_GEN6(dev) || IS_GEN7(dev))) + if (INTEL_INFO(dev)->gen < 5) return -ENODEV; intel_plane = malloc(sizeof(struct intel_plane), DRM_MEM_KMS, M_WAITOK | M_ZERO); - if (IS_GEN6(dev)) { + switch (INTEL_INFO(dev)->gen) { + case 5: + case 6: intel_plane->max_downscale = 16; - intel_plane->update_plane = snb_update_plane; - intel_plane->disable_plane = snb_disable_plane; - intel_plane->update_colorkey = snb_update_colorkey; - intel_plane->get_colorkey = snb_get_colorkey; - } else if (IS_GEN7(dev)) { + intel_plane->update_plane = ilk_update_plane; + intel_plane->disable_plane = ilk_disable_plane; + intel_plane->update_colorkey = ilk_update_colorkey; + intel_plane->get_colorkey = ilk_get_colorkey; + + if (IS_GEN6(dev)) { + plane_formats = snb_plane_formats; + num_plane_formats = DRM_ARRAY_SIZE(snb_plane_formats); + } else { + plane_formats = ilk_plane_formats; + num_plane_formats = DRM_ARRAY_SIZE(ilk_plane_formats); + } + break; + + case 7: intel_plane->max_downscale = 2; intel_plane->update_plane = ivb_update_plane; intel_plane->disable_plane = ivb_disable_plane; intel_plane->update_colorkey = ivb_update_colorkey; intel_plane->get_colorkey = ivb_get_colorkey; + + plane_formats = snb_plane_formats; + num_plane_formats = DRM_ARRAY_SIZE(snb_plane_formats); + break; + + default: + return -ENODEV; } intel_plane->pipe = pipe; possible_crtcs = (1 << pipe); ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs, - &intel_plane_funcs, snb_plane_formats, - DRM_ARRAY_SIZE(snb_plane_formats), false); + &intel_plane_funcs, + plane_formats, num_plane_formats, + false); if (ret) free(intel_plane, DRM_MEM_KMS); return ret; } Index: head/sys/dev/drm2/i915/intel_tv.c =================================================================== --- head/sys/dev/drm2/i915/intel_tv.c (revision 277486) +++ head/sys/dev/drm2/i915/intel_tv.c (revision 277487) @@ -1,1609 +1,1616 @@ /* * Copyright © 2006-2008 Intel Corporation * Jesse Barnes * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt * */ /** @file * Integrated TV-out support for the 915GM and 945GM. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include enum tv_margin { TV_MARGIN_LEFT, TV_MARGIN_TOP, TV_MARGIN_RIGHT, TV_MARGIN_BOTTOM }; /** Private structure for the integrated TV support */ struct intel_tv { struct intel_encoder base; int type; const char *tv_format; int margin[4]; u32 save_TV_H_CTL_1; u32 save_TV_H_CTL_2; u32 save_TV_H_CTL_3; u32 save_TV_V_CTL_1; u32 save_TV_V_CTL_2; u32 save_TV_V_CTL_3; u32 save_TV_V_CTL_4; u32 save_TV_V_CTL_5; u32 save_TV_V_CTL_6; u32 save_TV_V_CTL_7; u32 save_TV_SC_CTL_1, save_TV_SC_CTL_2, save_TV_SC_CTL_3; u32 save_TV_CSC_Y; u32 save_TV_CSC_Y2; u32 save_TV_CSC_U; u32 save_TV_CSC_U2; u32 save_TV_CSC_V; u32 save_TV_CSC_V2; u32 save_TV_CLR_KNOBS; u32 save_TV_CLR_LEVEL; u32 save_TV_WIN_POS; u32 save_TV_WIN_SIZE; u32 save_TV_FILTER_CTL_1; u32 save_TV_FILTER_CTL_2; u32 save_TV_FILTER_CTL_3; u32 save_TV_H_LUMA[60]; u32 save_TV_H_CHROMA[60]; u32 save_TV_V_LUMA[43]; u32 save_TV_V_CHROMA[43]; u32 save_TV_DAC; u32 save_TV_CTL; }; struct video_levels { int blank, black, burst; }; struct color_conversion { u16 ry, gy, by, ay; u16 ru, gu, bu, au; u16 rv, gv, bv, av; }; static const u32 filter_table[] = { 0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140, 0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000, 0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160, 0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780, 0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50, 0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20, 0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0, 0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0, 0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020, 0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140, 0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20, 0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848, 0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900, 0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080, 0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060, 0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140, 0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000, 0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160, 0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780, 0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50, 0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20, 0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0, 0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0, 0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020, 0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140, 0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20, 0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848, 0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900, 0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080, 0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060, 0x36403000, 0x2D002CC0, 0x30003640, 0x2D0036C0, 0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540, 0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00, 0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000, 0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00, 0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40, 0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240, 0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00, 0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0, 0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840, 0x28003100, 0x28002F00, 0x00003100, 0x36403000, 0x2D002CC0, 0x30003640, 0x2D0036C0, 0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540, 0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00, 0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000, 0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00, 0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40, 0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240, 0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00, 0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0, 0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840, 0x28003100, 0x28002F00, 0x00003100, }; /* * Color conversion values have 3 separate fixed point formats: * * 10 bit fields (ay, au) * 1.9 fixed point (b.bbbbbbbbb) * 11 bit fields (ry, by, ru, gu, gv) * exp.mantissa (ee.mmmmmmmmm) * ee = 00 = 10^-1 (0.mmmmmmmmm) * ee = 01 = 10^-2 (0.0mmmmmmmmm) * ee = 10 = 10^-3 (0.00mmmmmmmmm) * ee = 11 = 10^-4 (0.000mmmmmmmmm) * 12 bit fields (gy, rv, bu) * exp.mantissa (eee.mmmmmmmmm) * eee = 000 = 10^-1 (0.mmmmmmmmm) * eee = 001 = 10^-2 (0.0mmmmmmmmm) * eee = 010 = 10^-3 (0.00mmmmmmmmm) * eee = 011 = 10^-4 (0.000mmmmmmmmm) * eee = 100 = reserved * eee = 101 = reserved * eee = 110 = reserved * eee = 111 = 10^0 (m.mmmmmmmm) (only usable for 1.0 representation) * * Saturation and contrast are 8 bits, with their own representation: * 8 bit field (saturation, contrast) * exp.mantissa (ee.mmmmmm) * ee = 00 = 10^-1 (0.mmmmmm) * ee = 01 = 10^0 (m.mmmmm) * ee = 10 = 10^1 (mm.mmmm) * ee = 11 = 10^2 (mmm.mmm) * * Simple conversion function: * * static u32 * float_to_csc_11(float f) * { * u32 exp; * u32 mant; * u32 ret; * * if (f < 0) * f = -f; * * if (f >= 1) { * exp = 0x7; * mant = 1 << 8; * } else { * for (exp = 0; exp < 3 && f < 0.5; exp++) * f *= 2.0; * mant = (f * (1 << 9) + 0.5); * if (mant >= (1 << 9)) * mant = (1 << 9) - 1; * } * ret = (exp << 9) | mant; * return ret; * } */ /* * Behold, magic numbers! If we plant them they might grow a big * s-video cable to the sky... or something. * * Pre-converted to appropriate hex value. */ /* * PAL & NTSC values for composite & s-video connections */ static const struct color_conversion ntsc_m_csc_composite = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200, .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200, }; static const struct video_levels ntsc_m_levels_composite = { .blank = 225, .black = 267, .burst = 113, }; static const struct color_conversion ntsc_m_csc_svideo = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133, .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200, .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200, }; static const struct video_levels ntsc_m_levels_svideo = { .blank = 266, .black = 316, .burst = 133, }; static const struct color_conversion ntsc_j_csc_composite = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119, .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0200, .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0200, }; static const struct video_levels ntsc_j_levels_composite = { .blank = 225, .black = 225, .burst = 113, }; static const struct color_conversion ntsc_j_csc_svideo = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c, .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0200, .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0200, }; static const struct video_levels ntsc_j_levels_svideo = { .blank = 266, .black = 266, .burst = 133, }; static const struct color_conversion pal_csc_composite = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113, .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0200, .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0200, }; static const struct video_levels pal_levels_composite = { .blank = 237, .black = 237, .burst = 118, }; static const struct color_conversion pal_csc_svideo = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145, .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0200, .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0200, }; static const struct video_levels pal_levels_svideo = { .blank = 280, .black = 280, .burst = 139, }; static const struct color_conversion pal_m_csc_composite = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200, .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200, }; static const struct video_levels pal_m_levels_composite = { .blank = 225, .black = 267, .burst = 113, }; static const struct color_conversion pal_m_csc_svideo = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133, .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200, .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200, }; static const struct video_levels pal_m_levels_svideo = { .blank = 266, .black = 316, .burst = 133, }; static const struct color_conversion pal_n_csc_composite = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200, .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200, }; static const struct video_levels pal_n_levels_composite = { .blank = 225, .black = 267, .burst = 118, }; static const struct color_conversion pal_n_csc_svideo = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133, .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200, .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200, }; static const struct video_levels pal_n_levels_svideo = { .blank = 266, .black = 316, .burst = 139, }; /* * Component connections */ static const struct color_conversion sdtv_csc_yprpb = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145, .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0200, .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200, }; static const struct color_conversion sdtv_csc_rgb = { .ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166, .ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166, .rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166, }; static const struct color_conversion hdtv_csc_yprpb = { .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145, .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200, .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200, }; static const struct color_conversion hdtv_csc_rgb = { .ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166, .ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166, .rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166, }; static const struct video_levels component_levels = { .blank = 279, .black = 279, .burst = 0, }; struct tv_mode { const char *name; int clock; int refresh; /* in millihertz (for precision) */ u32 oversample; int hsync_end, hblank_start, hblank_end, htotal; bool progressive, trilevel_sync, component_only; int vsync_start_f1, vsync_start_f2, vsync_len; bool veq_ena; int veq_start_f1, veq_start_f2, veq_len; int vi_end_f1, vi_end_f2, nbr_end; bool burst_ena; int hburst_start, hburst_len; int vburst_start_f1, vburst_end_f1; int vburst_start_f2, vburst_end_f2; int vburst_start_f3, vburst_end_f3; int vburst_start_f4, vburst_end_f4; /* * subcarrier programming */ int dda2_size, dda3_size, dda1_inc, dda2_inc, dda3_inc; u32 sc_reset; bool pal_burst; /* * blank/black levels */ const struct video_levels *composite_levels, *svideo_levels; const struct color_conversion *composite_color, *svideo_color; const u32 *filter_table; int max_srcw; }; /* * Sub carrier DDA * * I think this works as follows: * * subcarrier freq = pixel_clock * (dda1_inc + dda2_inc / dda2_size) / 4096 * * Presumably, when dda3 is added in, it gets to adjust the dda2_inc value * * So, * dda1_ideal = subcarrier/pixel * 4096 * dda1_inc = floor (dda1_ideal) * dda2 = dda1_ideal - dda1_inc * * then pick a ratio for dda2 that gives the closest approximation. If * you can't get close enough, you can play with dda3 as well. This * seems likely to happen when dda2 is small as the jumps would be larger * * To invert this, * * pixel_clock = subcarrier * 4096 / (dda1_inc + dda2_inc / dda2_size) * * The constants below were all computed using a 107.520MHz clock */ /** * Register programming values for TV modes. * * These values account for -1s required. */ static const struct tv_mode tv_modes[] = { { .name = "NTSC-M", .clock = 108000, .refresh = 59940, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */ .hsync_end = 64, .hblank_end = 124, .hblank_start = 836, .htotal = 857, .progressive = false, .trilevel_sync = false, .vsync_start_f1 = 6, .vsync_start_f2 = 7, .vsync_len = 6, .veq_ena = true, .veq_start_f1 = 0, .veq_start_f2 = 1, .veq_len = 18, .vi_end_f1 = 20, .vi_end_f2 = 21, .nbr_end = 240, .burst_ena = true, .hburst_start = 72, .hburst_len = 34, .vburst_start_f1 = 9, .vburst_end_f1 = 240, .vburst_start_f2 = 10, .vburst_end_f2 = 240, .vburst_start_f3 = 9, .vburst_end_f3 = 240, .vburst_start_f4 = 10, .vburst_end_f4 = 240, /* desired 3.5800000 actual 3.5800000 clock 107.52 */ .dda1_inc = 135, .dda2_inc = 20800, .dda2_size = 27456, .dda3_inc = 0, .dda3_size = 0, .sc_reset = TV_SC_RESET_EVERY_4, .pal_burst = false, .composite_levels = &ntsc_m_levels_composite, .composite_color = &ntsc_m_csc_composite, .svideo_levels = &ntsc_m_levels_svideo, .svideo_color = &ntsc_m_csc_svideo, .filter_table = filter_table, }, { .name = "NTSC-443", .clock = 108000, .refresh = 59940, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */ .hsync_end = 64, .hblank_end = 124, .hblank_start = 836, .htotal = 857, .progressive = false, .trilevel_sync = false, .vsync_start_f1 = 6, .vsync_start_f2 = 7, .vsync_len = 6, .veq_ena = true, .veq_start_f1 = 0, .veq_start_f2 = 1, .veq_len = 18, .vi_end_f1 = 20, .vi_end_f2 = 21, .nbr_end = 240, .burst_ena = true, .hburst_start = 72, .hburst_len = 34, .vburst_start_f1 = 9, .vburst_end_f1 = 240, .vburst_start_f2 = 10, .vburst_end_f2 = 240, .vburst_start_f3 = 9, .vburst_end_f3 = 240, .vburst_start_f4 = 10, .vburst_end_f4 = 240, /* desired 4.4336180 actual 4.4336180 clock 107.52 */ .dda1_inc = 168, .dda2_inc = 4093, .dda2_size = 27456, .dda3_inc = 310, .dda3_size = 525, .sc_reset = TV_SC_RESET_NEVER, .pal_burst = false, .composite_levels = &ntsc_m_levels_composite, .composite_color = &ntsc_m_csc_composite, .svideo_levels = &ntsc_m_levels_svideo, .svideo_color = &ntsc_m_csc_svideo, .filter_table = filter_table, }, { .name = "NTSC-J", .clock = 108000, .refresh = 59940, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */ .hsync_end = 64, .hblank_end = 124, .hblank_start = 836, .htotal = 857, .progressive = false, .trilevel_sync = false, .vsync_start_f1 = 6, .vsync_start_f2 = 7, .vsync_len = 6, .veq_ena = true, .veq_start_f1 = 0, .veq_start_f2 = 1, .veq_len = 18, .vi_end_f1 = 20, .vi_end_f2 = 21, .nbr_end = 240, .burst_ena = true, .hburst_start = 72, .hburst_len = 34, .vburst_start_f1 = 9, .vburst_end_f1 = 240, .vburst_start_f2 = 10, .vburst_end_f2 = 240, .vburst_start_f3 = 9, .vburst_end_f3 = 240, .vburst_start_f4 = 10, .vburst_end_f4 = 240, /* desired 3.5800000 actual 3.5800000 clock 107.52 */ .dda1_inc = 135, .dda2_inc = 20800, .dda2_size = 27456, .dda3_inc = 0, .dda3_size = 0, .sc_reset = TV_SC_RESET_EVERY_4, .pal_burst = false, .composite_levels = &ntsc_j_levels_composite, .composite_color = &ntsc_j_csc_composite, .svideo_levels = &ntsc_j_levels_svideo, .svideo_color = &ntsc_j_csc_svideo, .filter_table = filter_table, }, { .name = "PAL-M", .clock = 108000, .refresh = 59940, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */ .hsync_end = 64, .hblank_end = 124, .hblank_start = 836, .htotal = 857, .progressive = false, .trilevel_sync = false, .vsync_start_f1 = 6, .vsync_start_f2 = 7, .vsync_len = 6, .veq_ena = true, .veq_start_f1 = 0, .veq_start_f2 = 1, .veq_len = 18, .vi_end_f1 = 20, .vi_end_f2 = 21, .nbr_end = 240, .burst_ena = true, .hburst_start = 72, .hburst_len = 34, .vburst_start_f1 = 9, .vburst_end_f1 = 240, .vburst_start_f2 = 10, .vburst_end_f2 = 240, .vburst_start_f3 = 9, .vburst_end_f3 = 240, .vburst_start_f4 = 10, .vburst_end_f4 = 240, /* desired 3.5800000 actual 3.5800000 clock 107.52 */ .dda1_inc = 135, .dda2_inc = 16704, .dda2_size = 27456, .dda3_inc = 0, .dda3_size = 0, .sc_reset = TV_SC_RESET_EVERY_8, .pal_burst = true, .composite_levels = &pal_m_levels_composite, .composite_color = &pal_m_csc_composite, .svideo_levels = &pal_m_levels_svideo, .svideo_color = &pal_m_csc_svideo, .filter_table = filter_table, }, { /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ .name = "PAL-N", .clock = 108000, .refresh = 50000, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, .hsync_end = 64, .hblank_end = 128, .hblank_start = 844, .htotal = 863, .progressive = false, .trilevel_sync = false, .vsync_start_f1 = 6, .vsync_start_f2 = 7, .vsync_len = 6, .veq_ena = true, .veq_start_f1 = 0, .veq_start_f2 = 1, .veq_len = 18, .vi_end_f1 = 24, .vi_end_f2 = 25, .nbr_end = 286, .burst_ena = true, .hburst_start = 73, .hburst_len = 34, .vburst_start_f1 = 8, .vburst_end_f1 = 285, .vburst_start_f2 = 8, .vburst_end_f2 = 286, .vburst_start_f3 = 9, .vburst_end_f3 = 286, .vburst_start_f4 = 9, .vburst_end_f4 = 285, /* desired 4.4336180 actual 4.4336180 clock 107.52 */ .dda1_inc = 135, .dda2_inc = 23578, .dda2_size = 27648, .dda3_inc = 134, .dda3_size = 625, .sc_reset = TV_SC_RESET_EVERY_8, .pal_burst = true, .composite_levels = &pal_n_levels_composite, .composite_color = &pal_n_csc_composite, .svideo_levels = &pal_n_levels_svideo, .svideo_color = &pal_n_csc_svideo, .filter_table = filter_table, }, { /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ .name = "PAL", .clock = 108000, .refresh = 50000, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, .hsync_end = 64, .hblank_end = 142, .hblank_start = 844, .htotal = 863, .progressive = false, .trilevel_sync = false, .vsync_start_f1 = 5, .vsync_start_f2 = 6, .vsync_len = 5, .veq_ena = true, .veq_start_f1 = 0, .veq_start_f2 = 1, .veq_len = 15, .vi_end_f1 = 24, .vi_end_f2 = 25, .nbr_end = 286, .burst_ena = true, .hburst_start = 73, .hburst_len = 32, .vburst_start_f1 = 8, .vburst_end_f1 = 285, .vburst_start_f2 = 8, .vburst_end_f2 = 286, .vburst_start_f3 = 9, .vburst_end_f3 = 286, .vburst_start_f4 = 9, .vburst_end_f4 = 285, /* desired 4.4336180 actual 4.4336180 clock 107.52 */ .dda1_inc = 168, .dda2_inc = 4122, .dda2_size = 27648, .dda3_inc = 67, .dda3_size = 625, .sc_reset = TV_SC_RESET_EVERY_8, .pal_burst = true, .composite_levels = &pal_levels_composite, .composite_color = &pal_csc_composite, .svideo_levels = &pal_levels_svideo, .svideo_color = &pal_csc_svideo, .filter_table = filter_table, }, { .name = "720p@60Hz", .clock = 148800, .refresh = 60000, .oversample = TV_OVERSAMPLE_2X, .component_only = 1, .hsync_end = 80, .hblank_end = 300, .hblank_start = 1580, .htotal = 1649, .progressive = true, .trilevel_sync = true, .vsync_start_f1 = 10, .vsync_start_f2 = 10, .vsync_len = 10, .veq_ena = false, .vi_end_f1 = 29, .vi_end_f2 = 29, .nbr_end = 719, .burst_ena = false, .filter_table = filter_table, }, { .name = "720p@50Hz", .clock = 148800, .refresh = 50000, .oversample = TV_OVERSAMPLE_2X, .component_only = 1, .hsync_end = 80, .hblank_end = 300, .hblank_start = 1580, .htotal = 1979, .progressive = true, .trilevel_sync = true, .vsync_start_f1 = 10, .vsync_start_f2 = 10, .vsync_len = 10, .veq_ena = false, .vi_end_f1 = 29, .vi_end_f2 = 29, .nbr_end = 719, .burst_ena = false, .filter_table = filter_table, .max_srcw = 800 }, { .name = "1080i@50Hz", .clock = 148800, .refresh = 50000, .oversample = TV_OVERSAMPLE_2X, .component_only = 1, .hsync_end = 88, .hblank_end = 235, .hblank_start = 2155, .htotal = 2639, .progressive = false, .trilevel_sync = true, .vsync_start_f1 = 4, .vsync_start_f2 = 5, .vsync_len = 10, .veq_ena = true, .veq_start_f1 = 4, .veq_start_f2 = 4, .veq_len = 10, .vi_end_f1 = 21, .vi_end_f2 = 22, .nbr_end = 539, .burst_ena = false, .filter_table = filter_table, }, { .name = "1080i@60Hz", .clock = 148800, .refresh = 60000, .oversample = TV_OVERSAMPLE_2X, .component_only = 1, .hsync_end = 88, .hblank_end = 235, .hblank_start = 2155, .htotal = 2199, .progressive = false, .trilevel_sync = true, .vsync_start_f1 = 4, .vsync_start_f2 = 5, .vsync_len = 10, .veq_ena = true, .veq_start_f1 = 4, .veq_start_f2 = 4, .veq_len = 10, .vi_end_f1 = 21, .vi_end_f2 = 22, .nbr_end = 539, .burst_ena = false, .filter_table = filter_table, }, }; static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder) { return container_of(encoder, struct intel_tv, base.base); } static struct intel_tv *intel_attached_tv(struct drm_connector *connector) { return container_of(intel_attached_encoder(connector), struct intel_tv, base); } static void intel_tv_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; switch (mode) { case DRM_MODE_DPMS_ON: I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE); break; } } static const struct tv_mode * intel_tv_mode_lookup(const char *tv_format) { int i; - for (i = 0; i < sizeof(tv_modes) / sizeof(tv_modes[0]); i++) { + for (i = 0; i < DRM_ARRAY_SIZE(tv_modes); i++) { const struct tv_mode *tv_mode = &tv_modes[i]; if (!strcmp(tv_format, tv_mode->name)) return tv_mode; } return NULL; } static const struct tv_mode * intel_tv_mode_find(struct intel_tv *intel_tv) { return intel_tv_mode_lookup(intel_tv->tv_format); } static enum drm_mode_status intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct intel_tv *intel_tv = intel_attached_tv(connector); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); /* Ensure TV refresh is close to desired refresh */ if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) < 1000) return MODE_OK; return MODE_CLOCK_RANGE; } static bool -intel_tv_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, +intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_mode_config *drm_config = &dev->mode_config; struct intel_tv *intel_tv = enc_to_intel_tv(encoder); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); struct drm_encoder *other_encoder; if (!tv_mode) return false; /* FIXME: lock encoder list */ list_for_each_entry(other_encoder, &drm_config->encoder_list, head) { if (other_encoder != encoder && other_encoder->crtc == encoder->crtc) return false; } adjusted_mode->clock = tv_mode->clock; return true; } static void intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_tv *intel_tv = enc_to_intel_tv(encoder); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); u32 tv_ctl; u32 hctl1, hctl2, hctl3; u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7; u32 scctl1, scctl2, scctl3; int i, j; const struct video_levels *video_levels; const struct color_conversion *color_conversion; bool burst_ena; int pipe = intel_crtc->pipe; if (!tv_mode) return; /* can't happen (mode_prepare prevents this) */ tv_ctl = I915_READ(TV_CTL); tv_ctl &= TV_CTL_SAVE; switch (intel_tv->type) { default: case DRM_MODE_CONNECTOR_Unknown: case DRM_MODE_CONNECTOR_Composite: tv_ctl |= TV_ENC_OUTPUT_COMPOSITE; video_levels = tv_mode->composite_levels; color_conversion = tv_mode->composite_color; burst_ena = tv_mode->burst_ena; break; case DRM_MODE_CONNECTOR_Component: tv_ctl |= TV_ENC_OUTPUT_COMPONENT; video_levels = &component_levels; if (tv_mode->burst_ena) color_conversion = &sdtv_csc_yprpb; else color_conversion = &hdtv_csc_yprpb; burst_ena = false; break; case DRM_MODE_CONNECTOR_SVIDEO: tv_ctl |= TV_ENC_OUTPUT_SVIDEO; video_levels = tv_mode->svideo_levels; color_conversion = tv_mode->svideo_color; burst_ena = tv_mode->burst_ena; break; } hctl1 = (tv_mode->hsync_end << TV_HSYNC_END_SHIFT) | (tv_mode->htotal << TV_HTOTAL_SHIFT); hctl2 = (tv_mode->hburst_start << 16) | (tv_mode->hburst_len << TV_HBURST_LEN_SHIFT); if (burst_ena) hctl2 |= TV_BURST_ENA; hctl3 = (tv_mode->hblank_start << TV_HBLANK_START_SHIFT) | (tv_mode->hblank_end << TV_HBLANK_END_SHIFT); vctl1 = (tv_mode->nbr_end << TV_NBR_END_SHIFT) | (tv_mode->vi_end_f1 << TV_VI_END_F1_SHIFT) | (tv_mode->vi_end_f2 << TV_VI_END_F2_SHIFT); vctl2 = (tv_mode->vsync_len << TV_VSYNC_LEN_SHIFT) | (tv_mode->vsync_start_f1 << TV_VSYNC_START_F1_SHIFT) | (tv_mode->vsync_start_f2 << TV_VSYNC_START_F2_SHIFT); vctl3 = (tv_mode->veq_len << TV_VEQ_LEN_SHIFT) | (tv_mode->veq_start_f1 << TV_VEQ_START_F1_SHIFT) | (tv_mode->veq_start_f2 << TV_VEQ_START_F2_SHIFT); if (tv_mode->veq_ena) vctl3 |= TV_EQUAL_ENA; vctl4 = (tv_mode->vburst_start_f1 << TV_VBURST_START_F1_SHIFT) | (tv_mode->vburst_end_f1 << TV_VBURST_END_F1_SHIFT); vctl5 = (tv_mode->vburst_start_f2 << TV_VBURST_START_F2_SHIFT) | (tv_mode->vburst_end_f2 << TV_VBURST_END_F2_SHIFT); vctl6 = (tv_mode->vburst_start_f3 << TV_VBURST_START_F3_SHIFT) | (tv_mode->vburst_end_f3 << TV_VBURST_END_F3_SHIFT); vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) | (tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT); if (intel_crtc->pipe == 1) tv_ctl |= TV_ENC_PIPEB_SELECT; tv_ctl |= tv_mode->oversample; if (tv_mode->progressive) tv_ctl |= TV_PROGRESSIVE; if (tv_mode->trilevel_sync) tv_ctl |= TV_TRILEVEL_SYNC; if (tv_mode->pal_burst) tv_ctl |= TV_PAL_BURST; scctl1 = 0; if (tv_mode->dda1_inc) scctl1 |= TV_SC_DDA1_EN; if (tv_mode->dda2_inc) scctl1 |= TV_SC_DDA2_EN; if (tv_mode->dda3_inc) scctl1 |= TV_SC_DDA3_EN; scctl1 |= tv_mode->sc_reset; if (video_levels) scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT; scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT; scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT | tv_mode->dda2_inc << TV_SCDDA2_INC_SHIFT; scctl3 = tv_mode->dda3_size << TV_SCDDA3_SIZE_SHIFT | tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT; /* Enable two fixes for the chips that need them. */ if (dev->pci_device < 0x2772) tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX; I915_WRITE(TV_H_CTL_1, hctl1); I915_WRITE(TV_H_CTL_2, hctl2); I915_WRITE(TV_H_CTL_3, hctl3); I915_WRITE(TV_V_CTL_1, vctl1); I915_WRITE(TV_V_CTL_2, vctl2); I915_WRITE(TV_V_CTL_3, vctl3); I915_WRITE(TV_V_CTL_4, vctl4); I915_WRITE(TV_V_CTL_5, vctl5); I915_WRITE(TV_V_CTL_6, vctl6); I915_WRITE(TV_V_CTL_7, vctl7); I915_WRITE(TV_SC_CTL_1, scctl1); I915_WRITE(TV_SC_CTL_2, scctl2); I915_WRITE(TV_SC_CTL_3, scctl3); if (color_conversion) { I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) | color_conversion->gy); I915_WRITE(TV_CSC_Y2, (color_conversion->by << 16) | color_conversion->ay); I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) | color_conversion->gu); I915_WRITE(TV_CSC_U2, (color_conversion->bu << 16) | color_conversion->au); I915_WRITE(TV_CSC_V, (color_conversion->rv << 16) | color_conversion->gv); I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) | color_conversion->av); } if (INTEL_INFO(dev)->gen >= 4) I915_WRITE(TV_CLR_KNOBS, 0x00404000); else I915_WRITE(TV_CLR_KNOBS, 0x00606000); if (video_levels) I915_WRITE(TV_CLR_LEVEL, ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | (video_levels->blank << TV_BLANK_LEVEL_SHIFT))); { int pipeconf_reg = PIPECONF(pipe); int dspcntr_reg = DSPCNTR(intel_crtc->plane); int pipeconf = I915_READ(pipeconf_reg); int dspcntr = I915_READ(dspcntr_reg); int dspbase_reg = DSPADDR(intel_crtc->plane); int xpos = 0x0, ypos = 0x0; unsigned int xsize, ysize; /* Pipe must be off here */ I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE); /* Flush the plane changes */ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); /* Wait for vblank for the disable to take effect */ if (IS_GEN2(dev)) intel_wait_for_vblank(dev, intel_crtc->pipe); I915_WRITE(pipeconf_reg, pipeconf & ~PIPECONF_ENABLE); /* Wait for vblank for the disable to take effect. */ intel_wait_for_pipe_off(dev, intel_crtc->pipe); /* Filter ctl must be set before TV_WIN_SIZE */ I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE); xsize = tv_mode->hblank_start - tv_mode->hblank_end; if (tv_mode->progressive) ysize = tv_mode->nbr_end + 1; else ysize = 2*tv_mode->nbr_end + 1; xpos += intel_tv->margin[TV_MARGIN_LEFT]; ypos += intel_tv->margin[TV_MARGIN_TOP]; xsize -= (intel_tv->margin[TV_MARGIN_LEFT] + intel_tv->margin[TV_MARGIN_RIGHT]); ysize -= (intel_tv->margin[TV_MARGIN_TOP] + intel_tv->margin[TV_MARGIN_BOTTOM]); I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos); I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize); I915_WRITE(pipeconf_reg, pipeconf); I915_WRITE(dspcntr_reg, dspcntr); /* Flush the plane changes */ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); } j = 0; for (i = 0; i < 60; i++) I915_WRITE(TV_H_LUMA_0 + (i<<2), tv_mode->filter_table[j++]); for (i = 0; i < 60; i++) I915_WRITE(TV_H_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]); for (i = 0; i < 43; i++) I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]); for (i = 0; i < 43; i++) I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]); I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE); I915_WRITE(TV_CTL, tv_ctl); } static const struct drm_display_mode reported_modes[] = { { .name = "NTSC 480i", .clock = 107520, .hdisplay = 1280, .hsync_start = 1368, .hsync_end = 1496, .htotal = 1712, .vdisplay = 1024, .vsync_start = 1027, .vsync_end = 1034, .vtotal = 1104, .type = DRM_MODE_TYPE_DRIVER, }, }; /** * Detects TV presence by checking for load. * * Requires that the current pipe's DPLL is active. * \return true if TV is connected. * \return false if TV is disconnected. */ static int intel_tv_detect_type(struct intel_tv *intel_tv, struct drm_connector *connector) { struct drm_encoder *encoder = &intel_tv->base.base; struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 tv_ctl, save_tv_ctl; u32 tv_dac, save_tv_dac; int type; /* Disable TV interrupts around load detect or we'll recurse */ if (connector->polled & DRM_CONNECTOR_POLL_HPD) { mtx_lock(&dev_priv->irq_lock); i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); mtx_unlock(&dev_priv->irq_lock); } save_tv_dac = tv_dac = I915_READ(TV_DAC); save_tv_ctl = tv_ctl = I915_READ(TV_CTL); /* Poll for TV detection */ tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK); tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; if (intel_crtc->pipe == 1) tv_ctl |= TV_ENC_PIPEB_SELECT; else tv_ctl &= ~TV_ENC_PIPEB_SELECT; tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK); tv_dac |= (TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL | TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL | DAC_CTL_OVERRIDE | DAC_A_0_7_V | DAC_B_0_7_V | DAC_C_0_7_V); + + /* + * The TV sense state should be cleared to zero on cantiga platform. Otherwise + * the TV is misdetected. This is hardware requirement. + */ + if (IS_GM45(dev)) + tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL | + TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL); + I915_WRITE(TV_CTL, tv_ctl); I915_WRITE(TV_DAC, tv_dac); POSTING_READ(TV_DAC); intel_wait_for_vblank(intel_tv->base.base.dev, to_intel_crtc(intel_tv->base.base.crtc)->pipe); type = -1; tv_dac = I915_READ(TV_DAC); DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac); /* * A B C * 0 1 1 Composite * 1 0 X svideo * 0 0 0 Component */ if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) { DRM_DEBUG_KMS("Detected Composite TV connection\n"); type = DRM_MODE_CONNECTOR_Composite; } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) { DRM_DEBUG_KMS("Detected S-Video TV connection\n"); type = DRM_MODE_CONNECTOR_SVIDEO; } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) { DRM_DEBUG_KMS("Detected Component TV connection\n"); type = DRM_MODE_CONNECTOR_Component; } else { DRM_DEBUG_KMS("Unrecognised TV connection\n"); type = -1; } I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); I915_WRITE(TV_CTL, save_tv_ctl); /* Restore interrupt config */ if (connector->polled & DRM_CONNECTOR_POLL_HPD) { mtx_lock(&dev_priv->irq_lock); i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); mtx_unlock(&dev_priv->irq_lock); } return type; } /* * Here we set accurate tv format according to connector type * i.e Component TV should not be assigned by NTSC or PAL */ static void intel_tv_find_better_format(struct drm_connector *connector) { struct intel_tv *intel_tv = intel_attached_tv(connector); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); int i; if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) == tv_mode->component_only) return; for (i = 0; i < sizeof(tv_modes) / sizeof(*tv_modes); i++) { tv_mode = tv_modes + i; if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) == tv_mode->component_only) break; } intel_tv->tv_format = tv_mode->name; drm_connector_property_set_value(connector, connector->dev->mode_config.tv_mode_property, i); } /** * Detect the TV connection. * * Currently this always returns CONNECTOR_STATUS_UNKNOWN, as we need to be sure * we have a pipe programmed in order to probe the TV. */ static enum drm_connector_status intel_tv_detect(struct drm_connector *connector, bool force) { struct drm_display_mode mode; struct intel_tv *intel_tv = intel_attached_tv(connector); int type; mode = reported_modes[0]; drm_mode_set_crtcinfo(&mode, 0); - if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) { - type = intel_tv_detect_type(intel_tv, connector); - } else if (force) { + if (force) { struct intel_load_detect_pipe tmp; if (intel_get_load_detect_pipe(&intel_tv->base, connector, &mode, &tmp)) { type = intel_tv_detect_type(intel_tv, connector); intel_release_load_detect_pipe(&intel_tv->base, connector, &tmp); } else return connector_status_unknown; } else return connector->status; if (type < 0) return connector_status_disconnected; intel_tv->type = type; intel_tv_find_better_format(connector); return connector_status_connected; } static const struct input_res { const char *name; int w, h; } input_res_table[] = { {"640x480", 640, 480}, {"800x600", 800, 600}, {"1024x768", 1024, 768}, {"1280x1024", 1280, 1024}, {"848x480", 848, 480}, {"1280x720", 1280, 720}, {"1920x1080", 1920, 1080}, }; /* * Chose preferred mode according to line number of TV format */ static void intel_tv_chose_preferred_modes(struct drm_connector *connector, struct drm_display_mode *mode_ptr) { struct intel_tv *intel_tv = intel_attached_tv(connector); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; else if (tv_mode->nbr_end > 480) { if (tv_mode->progressive == true && tv_mode->nbr_end < 720) { if (mode_ptr->vdisplay == 720) mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; } else if (mode_ptr->vdisplay == 1080) mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; } } /** * Stub get_modes function. * * This should probably return a set of fixed modes, unless we can figure out * how to probe modes off of TV connections. */ static int intel_tv_get_modes(struct drm_connector *connector) { struct drm_display_mode *mode_ptr; struct intel_tv *intel_tv = intel_attached_tv(connector); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); int j, count = 0; u64 tmp; for (j = 0; j < DRM_ARRAY_SIZE(input_res_table); j++) { const struct input_res *input = &input_res_table[j]; unsigned int hactive_s = input->w; unsigned int vactive_s = input->h; if (tv_mode->max_srcw && input->w > tv_mode->max_srcw) continue; if (input->w > 1024 && (!tv_mode->progressive && !tv_mode->component_only)) continue; mode_ptr = drm_mode_create(connector->dev); if (!mode_ptr) continue; strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN); mode_ptr->hdisplay = hactive_s; mode_ptr->hsync_start = hactive_s + 1; mode_ptr->hsync_end = hactive_s + 64; if (mode_ptr->hsync_end <= mode_ptr->hsync_start) mode_ptr->hsync_end = mode_ptr->hsync_start + 1; mode_ptr->htotal = hactive_s + 96; mode_ptr->vdisplay = vactive_s; mode_ptr->vsync_start = vactive_s + 1; mode_ptr->vsync_end = vactive_s + 32; if (mode_ptr->vsync_end <= mode_ptr->vsync_start) mode_ptr->vsync_end = mode_ptr->vsync_start + 1; mode_ptr->vtotal = vactive_s + 33; tmp = (u64) tv_mode->refresh * mode_ptr->vtotal; tmp *= mode_ptr->htotal; tmp = tmp / 1000000; mode_ptr->clock = (int) tmp; mode_ptr->type = DRM_MODE_TYPE_DRIVER; intel_tv_chose_preferred_modes(connector, mode_ptr); drm_mode_probed_add(connector, mode_ptr); count++; } return count; } static void intel_tv_destroy(struct drm_connector *connector) { #if 0 drm_sysfs_connector_remove(connector); #endif drm_connector_cleanup(connector); free(connector, DRM_MEM_KMS); } static int intel_tv_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t val) { struct drm_device *dev = connector->dev; struct intel_tv *intel_tv = intel_attached_tv(connector); struct drm_crtc *crtc = intel_tv->base.base.crtc; int ret = 0; bool changed = false; ret = drm_connector_property_set_value(connector, property, val); if (ret < 0) goto out; if (property == dev->mode_config.tv_left_margin_property && intel_tv->margin[TV_MARGIN_LEFT] != val) { intel_tv->margin[TV_MARGIN_LEFT] = val; changed = true; } else if (property == dev->mode_config.tv_right_margin_property && intel_tv->margin[TV_MARGIN_RIGHT] != val) { intel_tv->margin[TV_MARGIN_RIGHT] = val; changed = true; } else if (property == dev->mode_config.tv_top_margin_property && intel_tv->margin[TV_MARGIN_TOP] != val) { intel_tv->margin[TV_MARGIN_TOP] = val; changed = true; } else if (property == dev->mode_config.tv_bottom_margin_property && intel_tv->margin[TV_MARGIN_BOTTOM] != val) { intel_tv->margin[TV_MARGIN_BOTTOM] = val; changed = true; } else if (property == dev->mode_config.tv_mode_property) { if (val >= DRM_ARRAY_SIZE(tv_modes)) { ret = -EINVAL; goto out; } if (!strcmp(intel_tv->tv_format, tv_modes[val].name)) goto out; intel_tv->tv_format = tv_modes[val].name; changed = true; } else { ret = -EINVAL; goto out; } if (changed && crtc) drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); out: return ret; } static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = { .dpms = intel_tv_dpms, .mode_fixup = intel_tv_mode_fixup, .prepare = intel_encoder_prepare, .mode_set = intel_tv_mode_set, .commit = intel_encoder_commit, }; static const struct drm_connector_funcs intel_tv_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = intel_tv_detect, .destroy = intel_tv_destroy, .set_property = intel_tv_set_property, .fill_modes = drm_helper_probe_single_connector_modes, }; static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { .mode_valid = intel_tv_mode_valid, .get_modes = intel_tv_get_modes, .best_encoder = intel_best_encoder, }; static const struct drm_encoder_funcs intel_tv_enc_funcs = { .destroy = intel_encoder_destroy, }; /* * Enumerate the child dev array parsed from VBT to check whether * the integrated TV is present. * If it is present, return 1. * If it is not present, return false. * If no child dev is parsed from VBT, it assumes that the TV is present. */ static int tv_is_present_in_vbt(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct child_device_config *p_child; int i, ret; if (!dev_priv->child_dev_num) return 1; ret = 0; for (i = 0; i < dev_priv->child_dev_num; i++) { p_child = dev_priv->child_dev + i; /* * If the device type is not TV, continue. */ if (p_child->device_type != DEVICE_TYPE_INT_TV && p_child->device_type != DEVICE_TYPE_TV) continue; /* Only when the addin_offset is non-zero, it is regarded * as present. */ if (p_child->addin_offset) { ret = 1; break; } } return ret; } void intel_tv_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_connector *connector; struct intel_tv *intel_tv; struct intel_encoder *intel_encoder; struct intel_connector *intel_connector; u32 tv_dac_on, tv_dac_off, save_tv_dac; char *tv_format_names[DRM_ARRAY_SIZE(tv_modes)]; int i, initial_mode = 0; if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED) return; if (!tv_is_present_in_vbt(dev)) { DRM_DEBUG_KMS("Integrated TV is not present.\n"); return; } /* Even if we have an encoder we may not have a connector */ if (!dev_priv->int_tv_support) return; /* * Sanity check the TV output by checking to see if the * DAC register holds a value */ save_tv_dac = I915_READ(TV_DAC); I915_WRITE(TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN); tv_dac_on = I915_READ(TV_DAC); I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); tv_dac_off = I915_READ(TV_DAC); I915_WRITE(TV_DAC, save_tv_dac); /* * If the register does not hold the state change enable * bit, (either as a 0 or a 1), assume it doesn't really * exist */ if ((tv_dac_on & TVDAC_STATE_CHG_EN) == 0 || (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) return; intel_tv = malloc(sizeof(struct intel_tv), DRM_MEM_KMS, M_WAITOK | M_ZERO); intel_connector = malloc(sizeof(struct intel_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO); intel_encoder = &intel_tv->base; connector = &intel_connector->base; /* The documentation, for the older chipsets at least, recommend * using a polling method rather than hotplug detection for TVs. * This is because in order to perform the hotplug detection, the PLLs * for the TV must be kept alive increasing power drain and starving * bandwidth from other encoders. Notably for instance, it causes * pipe underruns on Crestline when this encoder is supposedly idle. * * More recent chipsets favour HDMI rather than integrated S-Video. */ connector->polled = DRM_CONNECTOR_POLL_CONNECT; drm_connector_init(dev, connector, &intel_tv_connector_funcs, DRM_MODE_CONNECTOR_SVIDEO); drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs, DRM_MODE_ENCODER_TVDAC); intel_connector_attach_encoder(intel_connector, intel_encoder); intel_encoder->type = INTEL_OUTPUT_TVOUT; intel_encoder->crtc_mask = (1 << 0) | (1 << 1); intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT); intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1)); intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT); intel_tv->type = DRM_MODE_CONNECTOR_Unknown; /* BIOS margin values */ intel_tv->margin[TV_MARGIN_LEFT] = 54; intel_tv->margin[TV_MARGIN_TOP] = 36; intel_tv->margin[TV_MARGIN_RIGHT] = 46; intel_tv->margin[TV_MARGIN_BOTTOM] = 37; intel_tv->tv_format = tv_modes[initial_mode].name; drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs); drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); connector->interlace_allowed = false; connector->doublescan_allowed = false; /* Create TV properties then attach current values */ for (i = 0; i < DRM_ARRAY_SIZE(tv_modes); i++) tv_format_names[i] = __DECONST(char *, tv_modes[i].name); drm_mode_create_tv_properties(dev, DRM_ARRAY_SIZE(tv_modes), tv_format_names); drm_connector_attach_property(connector, dev->mode_config.tv_mode_property, initial_mode); drm_connector_attach_property(connector, dev->mode_config.tv_left_margin_property, intel_tv->margin[TV_MARGIN_LEFT]); drm_connector_attach_property(connector, dev->mode_config.tv_top_margin_property, intel_tv->margin[TV_MARGIN_TOP]); drm_connector_attach_property(connector, dev->mode_config.tv_right_margin_property, intel_tv->margin[TV_MARGIN_RIGHT]); drm_connector_attach_property(connector, dev->mode_config.tv_bottom_margin_property, intel_tv->margin[TV_MARGIN_BOTTOM]); #if 0 drm_sysfs_connector_add(connector); #endif } Index: head/sys/dev/drm2/radeon/atombios_encoders.c =================================================================== --- head/sys/dev/drm2/radeon/atombios_encoders.c (revision 277486) +++ head/sys/dev/drm2/radeon/atombios_encoders.c (revision 277487) @@ -1,2660 +1,2660 @@ /* * Copyright 2007-11 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher */ #include __FBSDID("$FreeBSD$"); #include #include #include #include "radeon.h" #include "radeon_asic.h" /* Declares several prototypes; clang is pleased. */ #include "atom.h" static u8 radeon_atom_get_backlight_level_from_reg(struct radeon_device *rdev) { u8 backlight_level; u32 bios_2_scratch; if (rdev->family >= CHIP_R600) bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH); else bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH); backlight_level = ((bios_2_scratch & ATOM_S2_CURRENT_BL_LEVEL_MASK) >> ATOM_S2_CURRENT_BL_LEVEL_SHIFT); return backlight_level; } static void radeon_atom_set_backlight_level_to_reg(struct radeon_device *rdev, u8 backlight_level) { u32 bios_2_scratch; if (rdev->family >= CHIP_R600) bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH); else bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH); bios_2_scratch &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK; bios_2_scratch |= ((backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) & ATOM_S2_CURRENT_BL_LEVEL_MASK); if (rdev->family >= CHIP_R600) WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch); else WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch); } u8 atombios_get_backlight_level(struct radeon_encoder *radeon_encoder) { struct drm_device *dev = radeon_encoder->base.dev; struct radeon_device *rdev = dev->dev_private; if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU)) return 0; return radeon_atom_get_backlight_level_from_reg(rdev); } void atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level) { struct drm_encoder *encoder = &radeon_encoder->base; struct drm_device *dev = radeon_encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder_atom_dig *dig; DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; int index; if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU)) return; if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) && radeon_encoder->enc_priv) { dig = radeon_encoder->enc_priv; dig->backlight_level = level; radeon_atom_set_backlight_level_to_reg(rdev, dig->backlight_level); switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_LVDS: case ENCODER_OBJECT_ID_INTERNAL_LVTM1: index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); if (dig->backlight_level == 0) { args.ucAction = ATOM_LCD_BLOFF; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } else { args.ucAction = ATOM_LCD_BL_BRIGHTNESS_CONTROL; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); args.ucAction = ATOM_LCD_BLON; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: if (dig->backlight_level == 0) atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); else { atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL, 0, 0); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); } break; default: break; } } } #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) static u8 radeon_atom_bl_level(struct backlight_device *bd) { u8 level; /* Convert brightness to hardware level */ if (bd->props.brightness < 0) level = 0; else if (bd->props.brightness > RADEON_MAX_BL_LEVEL) level = RADEON_MAX_BL_LEVEL; else level = bd->props.brightness; return level; } static int radeon_atom_backlight_update_status(struct backlight_device *bd) { struct radeon_backlight_privdata *pdata = bl_get_data(bd); struct radeon_encoder *radeon_encoder = pdata->encoder; atombios_set_backlight_level(radeon_encoder, radeon_atom_bl_level(bd)); return 0; } static int radeon_atom_backlight_get_brightness(struct backlight_device *bd) { struct radeon_backlight_privdata *pdata = bl_get_data(bd); struct radeon_encoder *radeon_encoder = pdata->encoder; struct drm_device *dev = radeon_encoder->base.dev; struct radeon_device *rdev = dev->dev_private; return radeon_atom_get_backlight_level_from_reg(rdev); } static const struct backlight_ops radeon_atom_backlight_ops = { .get_brightness = radeon_atom_backlight_get_brightness, .update_status = radeon_atom_backlight_update_status, }; void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, struct drm_connector *drm_connector) { struct drm_device *dev = radeon_encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct backlight_device *bd; struct backlight_properties props; struct radeon_backlight_privdata *pdata; struct radeon_encoder_atom_dig *dig; u8 backlight_level; char bl_name[16]; if (!radeon_encoder->enc_priv) return; if (!rdev->is_atom_bios) return; if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU)) return; pdata = malloc(sizeof(struct radeon_backlight_privdata), DRM_MEM_DRIVER, M_WAITOK); if (!pdata) { DRM_ERROR("Memory allocation failed\n"); goto error; } memset(&props, 0, sizeof(props)); props.max_brightness = RADEON_MAX_BL_LEVEL; props.type = BACKLIGHT_RAW; snprintf(bl_name, sizeof(bl_name), "radeon_bl%d", dev->primary->index); bd = backlight_device_register(bl_name, &drm_connector->kdev, pdata, &radeon_atom_backlight_ops, &props); if (IS_ERR(bd)) { DRM_ERROR("Backlight registration failed\n"); goto error; } pdata->encoder = radeon_encoder; backlight_level = radeon_atom_get_backlight_level_from_reg(rdev); dig = radeon_encoder->enc_priv; dig->bl_dev = bd; bd->props.brightness = radeon_atom_backlight_get_brightness(bd); bd->props.power = FB_BLANK_UNBLANK; backlight_update_status(bd); DRM_INFO("radeon atom DIG backlight initialized\n"); return; error: free(pdata, DRM_MEM_DRIVER); return; } static void radeon_atom_backlight_exit(struct radeon_encoder *radeon_encoder) { struct drm_device *dev = radeon_encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct backlight_device *bd = NULL; struct radeon_encoder_atom_dig *dig; if (!radeon_encoder->enc_priv) return; if (!rdev->is_atom_bios) return; if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU)) return; dig = radeon_encoder->enc_priv; bd = dig->bl_dev; dig->bl_dev = NULL; if (bd) { struct radeon_legacy_backlight_privdata *pdata; pdata = bl_get_data(bd); backlight_device_unregister(bd); free(pdata, DRM_MEM_DRIVER); DRM_INFO("radeon atom LVDS backlight unloaded\n"); } } #else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, struct drm_connector *drm_connector) { } static void radeon_atom_backlight_exit(struct radeon_encoder *encoder) { } #endif static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_LVDS: case ENCODER_OBJECT_ID_INTERNAL_TMDS1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: case ENCODER_OBJECT_ID_INTERNAL_LVTM1: case ENCODER_OBJECT_ID_INTERNAL_DVO1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: case ENCODER_OBJECT_ID_INTERNAL_DDI: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: return true; default: return false; } } static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, + struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; /* set the active encoder to connector routing */ radeon_encoder_set_active_device(encoder); drm_mode_set_crtcinfo(adjusted_mode, 0); /* hw bug */ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; /* get the native mode for LVDS */ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) radeon_panel_mode_fixup(encoder, adjusted_mode); /* get the native mode for TV */ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv; if (tv_dac) { if (tv_dac->tv_std == TV_STD_NTSC || tv_dac->tv_std == TV_STD_NTSC_J || tv_dac->tv_std == TV_STD_PAL_M) radeon_atom_get_tv_timings(rdev, 0, adjusted_mode); else radeon_atom_get_tv_timings(rdev, 1, adjusted_mode); } } if (ASIC_IS_DCE3(rdev) && ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) { struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); radeon_dp_set_link_config(connector, adjusted_mode); } return true; } static void atombios_dac_setup(struct drm_encoder *encoder, int action) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); DAC_ENCODER_CONTROL_PS_ALLOCATION args; int index = 0; struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; memset(&args, 0, sizeof(args)); switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl); break; case ENCODER_OBJECT_ID_INTERNAL_DAC2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl); break; } args.ucAction = action; if (radeon_encoder->active_device & (ATOM_DEVICE_CRT_SUPPORT)) args.ucDacStandard = ATOM_DAC1_PS2; else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) args.ucDacStandard = ATOM_DAC1_CV; else { switch (dac_info->tv_std) { case TV_STD_PAL: case TV_STD_PAL_M: case TV_STD_SCART_PAL: case TV_STD_SECAM: case TV_STD_PAL_CN: args.ucDacStandard = ATOM_DAC1_PAL; break; case TV_STD_NTSC: case TV_STD_NTSC_J: case TV_STD_PAL_60: default: args.ucDacStandard = ATOM_DAC1_NTSC; break; } } args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } static void atombios_tv_setup(struct drm_encoder *encoder, int action) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); TV_ENCODER_CONTROL_PS_ALLOCATION args; int index = 0; struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; memset(&args, 0, sizeof(args)); index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl); args.sTVEncoder.ucAction = action; if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) args.sTVEncoder.ucTvStandard = ATOM_TV_CV; else { switch (dac_info->tv_std) { case TV_STD_NTSC: args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC; break; case TV_STD_PAL: args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; break; case TV_STD_PAL_M: args.sTVEncoder.ucTvStandard = ATOM_TV_PALM; break; case TV_STD_PAL_60: args.sTVEncoder.ucTvStandard = ATOM_TV_PAL60; break; case TV_STD_NTSC_J: args.sTVEncoder.ucTvStandard = ATOM_TV_NTSCJ; break; case TV_STD_SCART_PAL: args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; /* ??? */ break; case TV_STD_SECAM: args.sTVEncoder.ucTvStandard = ATOM_TV_SECAM; break; case TV_STD_PAL_CN: args.sTVEncoder.ucTvStandard = ATOM_TV_PALCN; break; default: args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC; break; } } args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } static u8 radeon_atom_get_bpc(struct drm_encoder *encoder) { struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); int bpc = 8; if (connector) bpc = radeon_get_monitor_bpc(connector); switch (bpc) { case 0: return PANEL_BPC_UNDEFINE; case 6: return PANEL_6BIT_PER_COLOR; case 8: default: return PANEL_8BIT_PER_COLOR; case 10: return PANEL_10BIT_PER_COLOR; case 12: return PANEL_12BIT_PER_COLOR; case 16: return PANEL_16BIT_PER_COLOR; } } union dvo_encoder_control { ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds; DVO_ENCODER_CONTROL_PS_ALLOCATION dvo; DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3; }; void atombios_dvo_setup(struct drm_encoder *encoder, int action) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); union dvo_encoder_control args; int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); uint8_t frev, crev; memset(&args, 0, sizeof(args)); if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return; /* some R4xx chips have the wrong frev */ if (rdev->family <= CHIP_RV410) frev = 1; switch (frev) { case 1: switch (crev) { case 1: /* R4xx, R5xx */ args.ext_tmds.sXTmdsEncoder.ucEnable = action; if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL; args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB; break; case 2: /* RS600/690/740 */ args.dvo.sDVOEncoder.ucAction = action; args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); /* DFP1, CRT1, TV1 depending on the type of port */ args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX; if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL; break; case 3: /* R6xx */ args.dvo_v3.ucAction = action; args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); args.dvo_v3.ucDVOConfig = 0; /* XXX */ break; default: DRM_ERROR("Unknown table version %d, %d\n", frev, crev); break; } break; default: DRM_ERROR("Unknown table version %d, %d\n", frev, crev); break; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } union lvds_encoder_control { LVDS_ENCODER_CONTROL_PS_ALLOCATION v1; LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2; }; void atombios_digital_setup(struct drm_encoder *encoder, int action) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; union lvds_encoder_control args; int index = 0; int hdmi_detected = 0; uint8_t frev, crev; if (!dig) return; if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) hdmi_detected = 1; memset(&args, 0, sizeof(args)); switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_LVDS: index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl); break; case ENCODER_OBJECT_ID_INTERNAL_TMDS1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl); break; case ENCODER_OBJECT_ID_INTERNAL_LVTM1: if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl); else index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl); break; } if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return; switch (frev) { case 1: case 2: switch (crev) { case 1: args.v1.ucMisc = 0; args.v1.ucAction = action; if (hdmi_detected) args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB; } else { if (dig->linkb) args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; /*if (pScrn->rgbBits == 8) */ args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB; } break; case 2: case 3: args.v2.ucMisc = 0; args.v2.ucAction = action; if (crev == 3) { if (dig->coherent_mode) args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT; } if (hdmi_detected) args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); args.v2.ucTruncate = 0; args.v2.ucSpatial = 0; args.v2.ucTemporal = 0; args.v2.ucFRC = 0; if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) { args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN; if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH; } if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) { args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN; if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH; if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2) args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4; } } else { if (dig->linkb) args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; } break; default: DRM_ERROR("Unknown table version %d, %d\n", frev, crev); break; } break; default: DRM_ERROR("Unknown table version %d, %d\n", frev, crev); break; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } int atombios_get_encoder_mode(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_connector *connector; struct radeon_connector *radeon_connector; struct radeon_connector_atom_dig *dig_connector; /* dp bridges are always DP */ if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) return ATOM_ENCODER_MODE_DP; /* DVO is always DVO */ if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DVO1) || (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)) return ATOM_ENCODER_MODE_DVO; connector = radeon_get_connector_for_encoder(encoder); /* if we don't have an active device yet, just use one of * the connectors tied to the encoder. */ if (!connector) connector = radeon_get_connector_for_encoder_init(encoder); radeon_connector = to_radeon_connector(connector); switch (connector->connector_type) { case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ if (drm_detect_hdmi_monitor(radeon_connector->edid) && radeon_audio) return ATOM_ENCODER_MODE_HDMI; else if (radeon_connector->use_digital) return ATOM_ENCODER_MODE_DVI; else return ATOM_ENCODER_MODE_CRT; break; case DRM_MODE_CONNECTOR_DVID: case DRM_MODE_CONNECTOR_HDMIA: default: if (drm_detect_hdmi_monitor(radeon_connector->edid) && radeon_audio) return ATOM_ENCODER_MODE_HDMI; else return ATOM_ENCODER_MODE_DVI; break; case DRM_MODE_CONNECTOR_LVDS: return ATOM_ENCODER_MODE_LVDS; break; case DRM_MODE_CONNECTOR_DisplayPort: dig_connector = radeon_connector->con_priv; if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) return ATOM_ENCODER_MODE_DP; else if (drm_detect_hdmi_monitor(radeon_connector->edid) && radeon_audio) return ATOM_ENCODER_MODE_HDMI; else return ATOM_ENCODER_MODE_DVI; break; case DRM_MODE_CONNECTOR_eDP: return ATOM_ENCODER_MODE_DP; case DRM_MODE_CONNECTOR_DVIA: case DRM_MODE_CONNECTOR_VGA: return ATOM_ENCODER_MODE_CRT; break; case DRM_MODE_CONNECTOR_Composite: case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_9PinDIN: /* fix me */ return ATOM_ENCODER_MODE_TV; /*return ATOM_ENCODER_MODE_CV;*/ break; } } /* * DIG Encoder/Transmitter Setup * * DCE 3.0/3.1 * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA. * Supports up to 3 digital outputs * - 2 DIG encoder blocks. * DIG1 can drive UNIPHY link A or link B * DIG2 can drive UNIPHY link B or LVTMA * * DCE 3.2 * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B). * Supports up to 5 digital outputs * - 2 DIG encoder blocks. * DIG1/2 can drive UNIPHY0/1/2 link A or link B * * DCE 4.0/5.0/6.0 * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B). * Supports up to 6 digital outputs * - 6 DIG encoder blocks. * - DIG to PHY mapping is hardcoded * DIG1 drives UNIPHY0 link A, A+B * DIG2 drives UNIPHY0 link B * DIG3 drives UNIPHY1 link A, A+B * DIG4 drives UNIPHY1 link B * DIG5 drives UNIPHY2 link A, A+B * DIG6 drives UNIPHY2 link B * * DCE 4.1 * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B). * Supports up to 6 digital outputs * - 2 DIG encoder blocks. * llano * DIG1/2 can drive UNIPHY0/1/2 link A or link B * ontario * DIG1 drives UNIPHY0/1/2 link A * DIG2 drives UNIPHY0/1/2 link B * * Routing * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links) * Examples: * crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI * crtc1 -> dig1 -> UNIPHY0 link B -> DP * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI */ union dig_encoder_control { DIG_ENCODER_CONTROL_PS_ALLOCATION v1; DIG_ENCODER_CONTROL_PARAMETERS_V2 v2; DIG_ENCODER_CONTROL_PARAMETERS_V3 v3; DIG_ENCODER_CONTROL_PARAMETERS_V4 v4; }; void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); union dig_encoder_control args; int index = 0; uint8_t frev, crev; int dp_clock = 0; int dp_lane_count = 0; int hpd_id = RADEON_HPD_NONE; if (connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; dp_clock = dig_connector->dp_clock; dp_lane_count = dig_connector->dp_lane_count; hpd_id = radeon_connector->hpd.hpd; } /* no dig encoder assigned */ if (dig->dig_encoder == -1) return; memset(&args, 0, sizeof(args)); if (ASIC_IS_DCE4(rdev)) index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl); else { if (dig->dig_encoder) index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); else index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); } if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return; switch (frev) { case 1: switch (crev) { case 1: args.v1.ucAction = action; args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE) args.v3.ucPanelMode = panel_mode; else args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder); if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) args.v1.ucLaneNum = dp_lane_count; else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v1.ucLaneNum = 8; else args.v1.ucLaneNum = 4; if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000)) args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3; break; } if (dig->linkb) args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; else args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; break; case 2: case 3: args.v3.ucAction = action; args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE) args.v3.ucPanelMode = panel_mode; else args.v3.ucEncoderMode = atombios_get_encoder_mode(encoder); if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode)) args.v3.ucLaneNum = dp_lane_count; else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v3.ucLaneNum = 8; else args.v3.ucLaneNum = 4; if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode) && (dp_clock == 270000)) args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; args.v3.acConfig.ucDigSel = dig->dig_encoder; args.v3.ucBitPerColor = radeon_atom_get_bpc(encoder); break; case 4: args.v4.ucAction = action; args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE) args.v4.ucPanelMode = panel_mode; else args.v4.ucEncoderMode = atombios_get_encoder_mode(encoder); if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode)) args.v4.ucLaneNum = dp_lane_count; else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v4.ucLaneNum = 8; else args.v4.ucLaneNum = 4; if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode)) { if (dp_clock == 270000) args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ; else if (dp_clock == 540000) args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ; } args.v4.acConfig.ucDigSel = dig->dig_encoder; args.v4.ucBitPerColor = radeon_atom_get_bpc(encoder); if (hpd_id == RADEON_HPD_NONE) args.v4.ucHPD_ID = 0; else args.v4.ucHPD_ID = hpd_id + 1; break; default: DRM_ERROR("Unknown table version %d, %d\n", frev, crev); break; } break; default: DRM_ERROR("Unknown table version %d, %d\n", frev, crev); break; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } union dig_transmitter_control { DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1; DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3; DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4; DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 v5; }; void atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct drm_connector *connector; union dig_transmitter_control args; int index = 0; uint8_t frev, crev; bool is_dp = false; int pll_id = 0; int dp_clock = 0; int dp_lane_count = 0; int connector_object_id = 0; int igp_lane_info = 0; int dig_encoder = dig->dig_encoder; int hpd_id = RADEON_HPD_NONE; if (action == ATOM_TRANSMITTER_ACTION_INIT) { connector = radeon_get_connector_for_encoder_init(encoder); /* just needed to avoid bailing in the encoder check. the encoder * isn't used for init */ dig_encoder = 0; } else connector = radeon_get_connector_for_encoder(encoder); if (connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; hpd_id = radeon_connector->hpd.hpd; dp_clock = dig_connector->dp_clock; dp_lane_count = dig_connector->dp_lane_count; connector_object_id = (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; igp_lane_info = dig_connector->igp_lane_info; } if (encoder->crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); pll_id = radeon_crtc->pll_id; } /* no dig encoder assigned */ if (dig_encoder == -1) return; if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder))) is_dp = true; memset(&args, 0, sizeof(args)); switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl); break; } if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return; switch (frev) { case 1: switch (crev) { case 1: args.v1.ucAction = action; if (action == ATOM_TRANSMITTER_ACTION_INIT) { args.v1.usInitInfo = cpu_to_le16(connector_object_id); } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { args.v1.asMode.ucLaneSel = lane_num; args.v1.asMode.ucLaneSet = lane_set; } else { if (is_dp) args.v1.usPixelClock = cpu_to_le16(dp_clock / 10); else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); else args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); } args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; if (dig_encoder) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; else args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; if ((rdev->flags & RADEON_IS_IGP) && (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) { if (is_dp || !radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) { if (igp_lane_info & 0x1) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; else if (igp_lane_info & 0x2) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7; else if (igp_lane_info & 0x4) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11; else if (igp_lane_info & 0x8) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; } else { if (igp_lane_info & 0x3) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; else if (igp_lane_info & 0xc) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; } } if (dig->linkb) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB; else args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA; if (is_dp) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { if (dig->coherent_mode) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK; } break; case 2: args.v2.ucAction = action; if (action == ATOM_TRANSMITTER_ACTION_INIT) { args.v2.usInitInfo = cpu_to_le16(connector_object_id); } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { args.v2.asMode.ucLaneSel = lane_num; args.v2.asMode.ucLaneSet = lane_set; } else { if (is_dp) args.v2.usPixelClock = cpu_to_le16(dp_clock / 10); else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); else args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); } args.v2.acConfig.ucEncoderSel = dig_encoder; if (dig->linkb) args.v2.acConfig.ucLinkSel = 1; switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: args.v2.acConfig.ucTransmitterSel = 0; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: args.v2.acConfig.ucTransmitterSel = 1; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: args.v2.acConfig.ucTransmitterSel = 2; break; } if (is_dp) { args.v2.acConfig.fCoherentMode = 1; args.v2.acConfig.fDPConnector = 1; } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { if (dig->coherent_mode) args.v2.acConfig.fCoherentMode = 1; if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v2.acConfig.fDualLinkConnector = 1; } break; case 3: args.v3.ucAction = action; if (action == ATOM_TRANSMITTER_ACTION_INIT) { args.v3.usInitInfo = cpu_to_le16(connector_object_id); } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { args.v3.asMode.ucLaneSel = lane_num; args.v3.asMode.ucLaneSet = lane_set; } else { if (is_dp) args.v3.usPixelClock = cpu_to_le16(dp_clock / 10); else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v3.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); else args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); } if (is_dp) args.v3.ucLaneNum = dp_lane_count; else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v3.ucLaneNum = 8; else args.v3.ucLaneNum = 4; if (dig->linkb) args.v3.acConfig.ucLinkSel = 1; if (dig_encoder & 1) args.v3.acConfig.ucEncoderSel = 1; /* Select the PLL for the PHY * DP PHY should be clocked from external src if there is * one. */ /* On DCE4, if there is an external clock, it generates the DP ref clock */ if (is_dp && rdev->clock.dp_extclk) args.v3.acConfig.ucRefClkSource = 2; /* external src */ else args.v3.acConfig.ucRefClkSource = pll_id; switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: args.v3.acConfig.ucTransmitterSel = 0; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: args.v3.acConfig.ucTransmitterSel = 1; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: args.v3.acConfig.ucTransmitterSel = 2; break; } if (is_dp) args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */ else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { if (dig->coherent_mode) args.v3.acConfig.fCoherentMode = 1; if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v3.acConfig.fDualLinkConnector = 1; } break; case 4: args.v4.ucAction = action; if (action == ATOM_TRANSMITTER_ACTION_INIT) { args.v4.usInitInfo = cpu_to_le16(connector_object_id); } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { args.v4.asMode.ucLaneSel = lane_num; args.v4.asMode.ucLaneSet = lane_set; } else { if (is_dp) args.v4.usPixelClock = cpu_to_le16(dp_clock / 10); else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v4.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); else args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); } if (is_dp) args.v4.ucLaneNum = dp_lane_count; else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v4.ucLaneNum = 8; else args.v4.ucLaneNum = 4; if (dig->linkb) args.v4.acConfig.ucLinkSel = 1; if (dig_encoder & 1) args.v4.acConfig.ucEncoderSel = 1; /* Select the PLL for the PHY * DP PHY should be clocked from external src if there is * one. */ /* On DCE5 DCPLL usually generates the DP ref clock */ if (is_dp) { if (rdev->clock.dp_extclk) args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK; else args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL; } else args.v4.acConfig.ucRefClkSource = pll_id; switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: args.v4.acConfig.ucTransmitterSel = 0; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: args.v4.acConfig.ucTransmitterSel = 1; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: args.v4.acConfig.ucTransmitterSel = 2; break; } if (is_dp) args.v4.acConfig.fCoherentMode = 1; /* DP requires coherent */ else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { if (dig->coherent_mode) args.v4.acConfig.fCoherentMode = 1; if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v4.acConfig.fDualLinkConnector = 1; } break; case 5: args.v5.ucAction = action; if (is_dp) args.v5.usSymClock = cpu_to_le16(dp_clock / 10); else args.v5.usSymClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: if (dig->linkb) args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYB; else args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYA; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: if (dig->linkb) args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYD; else args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYC; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: if (dig->linkb) args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYF; else args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYE; break; } if (is_dp) args.v5.ucLaneNum = dp_lane_count; else if (radeon_encoder->pixel_clock > 165000) args.v5.ucLaneNum = 8; else args.v5.ucLaneNum = 4; args.v5.ucConnObjId = connector_object_id; args.v5.ucDigMode = atombios_get_encoder_mode(encoder); if (is_dp && rdev->clock.dp_extclk) args.v5.asConfig.ucPhyClkSrcId = ENCODER_REFCLK_SRC_EXTCLK; else args.v5.asConfig.ucPhyClkSrcId = pll_id; if (is_dp) args.v5.asConfig.ucCoherentMode = 1; /* DP requires coherent */ else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { if (dig->coherent_mode) args.v5.asConfig.ucCoherentMode = 1; } if (hpd_id == RADEON_HPD_NONE) args.v5.asConfig.ucHPDSel = 0; else args.v5.asConfig.ucHPDSel = hpd_id + 1; args.v5.ucDigEncoderSel = 1 << dig_encoder; args.v5.ucDPLaneSet = lane_set; break; default: DRM_ERROR("Unknown table version %d, %d\n", frev, crev); break; } break; default: DRM_ERROR("Unknown table version %d, %d\n", frev, crev); break; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } bool atombios_set_edp_panel_power(struct drm_connector *connector, int action) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_device *dev = radeon_connector->base.dev; struct radeon_device *rdev = dev->dev_private; union dig_transmitter_control args; int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); uint8_t frev, crev; if (connector->connector_type != DRM_MODE_CONNECTOR_eDP) goto done; if (!ASIC_IS_DCE4(rdev)) goto done; if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) && (action != ATOM_TRANSMITTER_ACTION_POWER_OFF)) goto done; if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) goto done; memset(&args, 0, sizeof(args)); args.v1.ucAction = action; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); /* wait for the panel to power up */ if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) { int i; for (i = 0; i < 300; i++) { if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) return true; DRM_MDELAY(1); } return false; } done: return true; } union external_encoder_control { EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1; EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3; }; static void atombios_external_encoder_setup(struct drm_encoder *encoder, struct drm_encoder *ext_encoder, int action) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder); union external_encoder_control args; struct drm_connector *connector; int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl); u8 frev, crev; int dp_clock = 0; int dp_lane_count = 0; int connector_object_id = 0; u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) connector = radeon_get_connector_for_encoder_init(encoder); else connector = radeon_get_connector_for_encoder(encoder); if (connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; dp_clock = dig_connector->dp_clock; dp_lane_count = dig_connector->dp_lane_count; connector_object_id = (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; } memset(&args, 0, sizeof(args)); if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return; switch (frev) { case 1: /* no params on frev 1 */ break; case 2: switch (crev) { case 1: case 2: args.v1.sDigEncoder.ucAction = action; args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); if (ENCODER_MODE_IS_DP(args.v1.sDigEncoder.ucEncoderMode)) { if (dp_clock == 270000) args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; args.v1.sDigEncoder.ucLaneNum = dp_lane_count; } else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v1.sDigEncoder.ucLaneNum = 8; else args.v1.sDigEncoder.ucLaneNum = 4; break; case 3: args.v3.sExtEncoder.ucAction = action; if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id); else args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); if (ENCODER_MODE_IS_DP(args.v3.sExtEncoder.ucEncoderMode)) { if (dp_clock == 270000) args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; else if (dp_clock == 540000) args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ; args.v3.sExtEncoder.ucLaneNum = dp_lane_count; } else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v3.sExtEncoder.ucLaneNum = 8; else args.v3.sExtEncoder.ucLaneNum = 4; switch (ext_enum) { case GRAPH_OBJECT_ENUM_ID1: args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1; break; case GRAPH_OBJECT_ENUM_ID2: args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2; break; case GRAPH_OBJECT_ENUM_ID3: args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3; break; } args.v3.sExtEncoder.ucBitPerColor = radeon_atom_get_bpc(encoder); break; default: DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); return; } break; default: DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); return; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } static void atombios_yuv_setup(struct drm_encoder *encoder, bool enable) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); ENABLE_YUV_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, EnableYUV); uint32_t temp, reg; memset(&args, 0, sizeof(args)); if (rdev->family >= CHIP_R600) reg = R600_BIOS_3_SCRATCH; else reg = RADEON_BIOS_3_SCRATCH; /* XXX: fix up scratch reg handling */ temp = RREG32(reg); if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) WREG32(reg, (ATOM_S3_TV1_ACTIVE | (radeon_crtc->crtc_id << 18))); else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24))); else WREG32(reg, 0); if (enable) args.ucEnable = ATOM_ENABLE; args.ucCRTC = radeon_crtc->crtc_id; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); WREG32(reg, temp); } static void radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; int index = 0; memset(&args, 0, sizeof(args)); switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_TMDS1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl); break; case ENCODER_OBJECT_ID_INTERNAL_DVO1: case ENCODER_OBJECT_ID_INTERNAL_DDI: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); break; case ENCODER_OBJECT_ID_INTERNAL_LVDS: index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); break; case ENCODER_OBJECT_ID_INTERNAL_LVTM1: if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); else index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl); break; case ENCODER_OBJECT_ID_INTERNAL_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); else index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl); break; case ENCODER_OBJECT_ID_INTERNAL_DAC2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); else index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl); break; default: return; } switch (mode) { case DRM_MODE_DPMS_ON: args.ucAction = ATOM_ENABLE; /* workaround for DVOOutputControl on some RS690 systems */ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) { u32 reg = RREG32(RADEON_BIOS_3_SCRATCH); WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE); atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); WREG32(RADEON_BIOS_3_SCRATCH, reg); } else atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { args.ucAction = ATOM_LCD_BLON; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: args.ucAction = ATOM_DISABLE; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { args.ucAction = ATOM_LCD_BLOFF; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } break; } } static void radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); struct radeon_connector *radeon_connector = NULL; struct radeon_connector_atom_dig *radeon_dig_connector = NULL; if (connector) { radeon_connector = to_radeon_connector(connector); radeon_dig_connector = radeon_connector->con_priv; } switch (mode) { case DRM_MODE_DPMS_ON: if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { if (!connector) dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; else dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector); /* setup and enable the encoder */ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP_PANEL_MODE, dig->panel_mode); if (ext_encoder) { if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) atombios_external_encoder_setup(encoder, ext_encoder, EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); } atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); } else if (ASIC_IS_DCE4(rdev)) { /* setup and enable the encoder */ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); /* enable the transmitter */ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); } else { /* setup and enable the encoder and transmitter */ atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); /* some early dce3.2 boards have a bug in their transmitter control table */ if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730)) atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); } if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { atombios_set_edp_panel_power(connector, ATOM_TRANSMITTER_ACTION_POWER_ON); radeon_dig_connector->edp_on = true; } radeon_dp_link_train(encoder, connector); if (ASIC_IS_DCE4(rdev)) atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); } if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { /* disable the transmitter */ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); } else if (ASIC_IS_DCE4(rdev)) { /* disable the transmitter */ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); } else { /* disable the encoder and transmitter */ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); } if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { if (ASIC_IS_DCE4(rdev)) atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { atombios_set_edp_panel_power(connector, ATOM_TRANSMITTER_ACTION_POWER_OFF); radeon_dig_connector->edp_on = false; } } if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); break; } } static void radeon_atom_encoder_dpms_ext(struct drm_encoder *encoder, struct drm_encoder *ext_encoder, int mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; switch (mode) { case DRM_MODE_DPMS_ON: default: if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) { atombios_external_encoder_setup(encoder, ext_encoder, EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT); atombios_external_encoder_setup(encoder, ext_encoder, EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF); } else atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) { atombios_external_encoder_setup(encoder, ext_encoder, EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING); atombios_external_encoder_setup(encoder, ext_encoder, EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT); } else atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE); break; } } static void radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", radeon_encoder->encoder_id, mode, radeon_encoder->devices, radeon_encoder->active_device); switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_TMDS1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: case ENCODER_OBJECT_ID_INTERNAL_LVDS: case ENCODER_OBJECT_ID_INTERNAL_LVTM1: case ENCODER_OBJECT_ID_INTERNAL_DVO1: case ENCODER_OBJECT_ID_INTERNAL_DDI: case ENCODER_OBJECT_ID_INTERNAL_DAC2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: radeon_atom_encoder_dpms_avivo(encoder, mode); break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: radeon_atom_encoder_dpms_dig(encoder, mode); break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: if (ASIC_IS_DCE5(rdev)) { switch (mode) { case DRM_MODE_DPMS_ON: atombios_dvo_setup(encoder, ATOM_ENABLE); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: atombios_dvo_setup(encoder, ATOM_DISABLE); break; } } else if (ASIC_IS_DCE3(rdev)) radeon_atom_encoder_dpms_dig(encoder, mode); else radeon_atom_encoder_dpms_avivo(encoder, mode); break; case ENCODER_OBJECT_ID_INTERNAL_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: if (ASIC_IS_DCE5(rdev)) { switch (mode) { case DRM_MODE_DPMS_ON: atombios_dac_setup(encoder, ATOM_ENABLE); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: atombios_dac_setup(encoder, ATOM_DISABLE); break; } } else radeon_atom_encoder_dpms_avivo(encoder, mode); break; default: return; } if (ext_encoder) radeon_atom_encoder_dpms_ext(encoder, ext_encoder, mode); radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); } union crtc_source_param { SELECT_CRTC_SOURCE_PS_ALLOCATION v1; SELECT_CRTC_SOURCE_PARAMETERS_V2 v2; }; static void atombios_set_encoder_crtc_source(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); union crtc_source_param args; int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source); uint8_t frev, crev; struct radeon_encoder_atom_dig *dig; memset(&args, 0, sizeof(args)); if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return; switch (frev) { case 1: switch (crev) { case 1: default: if (ASIC_IS_AVIVO(rdev)) args.v1.ucCRTC = radeon_crtc->crtc_id; else { if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) { args.v1.ucCRTC = radeon_crtc->crtc_id; } else { args.v1.ucCRTC = radeon_crtc->crtc_id << 2; } } switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_TMDS1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX; break; case ENCODER_OBJECT_ID_INTERNAL_LVDS: case ENCODER_OBJECT_ID_INTERNAL_LVTM1: if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX; else args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX; break; case ENCODER_OBJECT_ID_INTERNAL_DVO1: case ENCODER_OBJECT_ID_INTERNAL_DDI: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX; break; case ENCODER_OBJECT_ID_INTERNAL_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX; else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) args.v1.ucDevice = ATOM_DEVICE_CV_INDEX; else args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX; break; case ENCODER_OBJECT_ID_INTERNAL_DAC2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX; else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) args.v1.ucDevice = ATOM_DEVICE_CV_INDEX; else args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX; break; } break; case 2: args.v2.ucCRTC = radeon_crtc->crtc_id; if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) { struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS; else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA) args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT; else args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); } else args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: dig = radeon_encoder->enc_priv; switch (dig->dig_encoder) { case 0: args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; break; case 1: args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; break; case 2: args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID; break; case 3: args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID; break; case 4: args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID; break; case 5: args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID; break; } break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; else args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID; break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; else args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID; break; } break; } break; default: DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); return; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); /* update scratch regs with new routing */ radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); } static void atombios_apply_encoder_quirks(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); /* Funky macbooks */ if ((dev->pci_device == 0x71C5) && (dev->pci_subvendor == 0x106b) && (dev->pci_subdevice == 0x0080)) { if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) { uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL); lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN; lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN; WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, lvtma_bit_depth_control); } } /* set scaler clears this on some chips */ if (ASIC_IS_AVIVO(rdev) && (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) { if (ASIC_IS_DCE4(rdev)) { if (mode->flags & DRM_MODE_FLAG_INTERLACE) WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, EVERGREEN_INTERLEAVE_EN); else WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0); } else { if (mode->flags & DRM_MODE_FLAG_INTERLACE) WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, AVIVO_D1MODE_INTERLEAVE_EN); else WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0); } } } static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_encoder *test_encoder; struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; uint32_t dig_enc_in_use = 0; if (ASIC_IS_DCE6(rdev)) { /* DCE6 */ switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: if (dig->linkb) return 1; else return 0; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: if (dig->linkb) return 3; else return 2; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: if (dig->linkb) return 5; else return 4; break; } } else if (ASIC_IS_DCE4(rdev)) { /* DCE4/5 */ if (ASIC_IS_DCE41(rdev) && !ASIC_IS_DCE61(rdev)) { /* ontario follows DCE4 */ if (rdev->family == CHIP_PALM) { if (dig->linkb) return 1; else return 0; } else /* llano follows DCE3.2 */ return radeon_crtc->crtc_id; } else { switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: if (dig->linkb) return 1; else return 0; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: if (dig->linkb) return 3; else return 2; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: if (dig->linkb) return 5; else return 4; break; } } } /* on DCE32 and encoder can driver any block so just crtc id */ if (ASIC_IS_DCE32(rdev)) { return radeon_crtc->crtc_id; } /* on DCE3 - LVTMA can only be driven by DIGB */ list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) { struct radeon_encoder *radeon_test_encoder; if (encoder == test_encoder) continue; if (!radeon_encoder_is_digital(test_encoder)) continue; radeon_test_encoder = to_radeon_encoder(test_encoder); dig = radeon_test_encoder->enc_priv; if (dig->dig_encoder >= 0) dig_enc_in_use |= (1 << dig->dig_encoder); } if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) { if (dig_enc_in_use & 0x2) DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n"); return 1; } if (!(dig_enc_in_use & 1)) return 0; return 1; } /* This only needs to be called once at startup */ void radeon_atom_encoder_init(struct radeon_device *rdev) { struct drm_device *dev = rdev->ddev; struct drm_encoder *encoder; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); break; default: break; } if (ext_encoder && (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))) atombios_external_encoder_setup(encoder, ext_encoder, EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT); } } static void radeon_atom_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); radeon_encoder->pixel_clock = adjusted_mode->clock; /* need to call this here rather than in prepare() since we need some crtc info */ radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) { if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) atombios_yuv_setup(encoder, true); else atombios_yuv_setup(encoder, false); } switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_TMDS1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: case ENCODER_OBJECT_ID_INTERNAL_LVDS: case ENCODER_OBJECT_ID_INTERNAL_LVTM1: atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE); break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: /* handled in dpms */ break; case ENCODER_OBJECT_ID_INTERNAL_DDI: case ENCODER_OBJECT_ID_INTERNAL_DVO1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: atombios_dvo_setup(encoder, ATOM_ENABLE); break; case ENCODER_OBJECT_ID_INTERNAL_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: case ENCODER_OBJECT_ID_INTERNAL_DAC2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: atombios_dac_setup(encoder, ATOM_ENABLE); if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) { if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) atombios_tv_setup(encoder, ATOM_ENABLE); else atombios_tv_setup(encoder, ATOM_DISABLE); } break; } atombios_apply_encoder_quirks(encoder, adjusted_mode); if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { r600_hdmi_enable(encoder); if (ASIC_IS_DCE6(rdev)) ; /* TODO (use pointers instead of if-s?) */ else if (ASIC_IS_DCE4(rdev)) evergreen_hdmi_setmode(encoder, adjusted_mode); else r600_hdmi_setmode(encoder, adjusted_mode); } } static bool atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_connector *radeon_connector = to_radeon_connector(connector); if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_CRT_SUPPORT)) { DAC_LOAD_DETECTION_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection); uint8_t frev, crev; memset(&args, 0, sizeof(args)); if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return false; args.sDacload.ucMisc = 0; if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) || (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1)) args.sDacload.ucDacType = ATOM_DAC_A; else args.sDacload.ucDacType = ATOM_DAC_B; if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT); else if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT); else if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) { args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT); if (crev >= 3) args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb; } else if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) { args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT); if (crev >= 3) args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); return true; } else return false; } static enum drm_connector_status radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_connector *radeon_connector = to_radeon_connector(connector); uint32_t bios_0_scratch; if (!atombios_dac_load_detect(encoder, connector)) { DRM_DEBUG_KMS("detect returned false \n"); return connector_status_unknown; } if (rdev->family >= CHIP_R600) bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH); else bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH); DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices); if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) { if (bios_0_scratch & ATOM_S0_CRT1_MASK) return connector_status_connected; } if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) { if (bios_0_scratch & ATOM_S0_CRT2_MASK) return connector_status_connected; } if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) { if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A)) return connector_status_connected; } if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) { if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A)) return connector_status_connected; /* CTV */ else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A)) return connector_status_connected; /* STV */ } return connector_status_disconnected; } static enum drm_connector_status radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); u32 bios_0_scratch; if (!ASIC_IS_DCE4(rdev)) return connector_status_unknown; if (!ext_encoder) return connector_status_unknown; if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0) return connector_status_unknown; /* load detect on the dp bridge */ atombios_external_encoder_setup(encoder, ext_encoder, EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION); bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH); DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices); if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) { if (bios_0_scratch & ATOM_S0_CRT1_MASK) return connector_status_connected; } if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) { if (bios_0_scratch & ATOM_S0_CRT2_MASK) return connector_status_connected; } if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) { if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A)) return connector_status_connected; } if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) { if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A)) return connector_status_connected; /* CTV */ else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A)) return connector_status_connected; /* STV */ } return connector_status_disconnected; } void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder) { struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); if (ext_encoder) /* ddc_setup on the dp bridge */ atombios_external_encoder_setup(encoder, ext_encoder, EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP); } static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); if ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) { struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; if (dig) { dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder); if (radeon_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) { if (rdev->family >= CHIP_R600) dig->afmt = rdev->mode_info.afmt[dig->dig_encoder]; else /* RS600/690/740 have only 1 afmt block */ dig->afmt = rdev->mode_info.afmt[0]; } } } radeon_atom_output_lock(encoder, true); if (connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); /* select the clock/data port if it uses a router */ if (radeon_connector->router.cd_valid) radeon_router_select_cd_port(radeon_connector); /* turn eDP panel on for mode set */ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) atombios_set_edp_panel_power(connector, ATOM_TRANSMITTER_ACTION_POWER_ON); } /* this is needed for the pll/ss setup to work correctly in some cases */ atombios_set_encoder_crtc_source(encoder); } static void radeon_atom_encoder_commit(struct drm_encoder *encoder) { /* need to call this here as we need the crtc set up */ radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON); radeon_atom_output_lock(encoder, false); } static void radeon_atom_encoder_disable(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig; /* check for pre-DCE3 cards with shared encoders; * can't really use the links individually, so don't disable * the encoder if it's in use by another connector */ if (!ASIC_IS_DCE3(rdev)) { struct drm_encoder *other_encoder; struct radeon_encoder *other_radeon_encoder; list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) { other_radeon_encoder = to_radeon_encoder(other_encoder); if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) && drm_helper_encoder_in_use(other_encoder)) goto disable_done; } } radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_TMDS1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: case ENCODER_OBJECT_ID_INTERNAL_LVDS: case ENCODER_OBJECT_ID_INTERNAL_LVTM1: atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE); break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: /* handled in dpms */ break; case ENCODER_OBJECT_ID_INTERNAL_DDI: case ENCODER_OBJECT_ID_INTERNAL_DVO1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: atombios_dvo_setup(encoder, ATOM_DISABLE); break; case ENCODER_OBJECT_ID_INTERNAL_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: case ENCODER_OBJECT_ID_INTERNAL_DAC2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: atombios_dac_setup(encoder, ATOM_DISABLE); if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) atombios_tv_setup(encoder, ATOM_DISABLE); break; } disable_done: if (radeon_encoder_is_digital(encoder)) { if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) r600_hdmi_disable(encoder); dig = radeon_encoder->enc_priv; dig->dig_encoder = -1; } radeon_encoder->active_device = 0; } /* these are handled by the primary encoders */ static void radeon_atom_ext_prepare(struct drm_encoder *encoder) { } static void radeon_atom_ext_commit(struct drm_encoder *encoder) { } static void radeon_atom_ext_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { } static void radeon_atom_ext_disable(struct drm_encoder *encoder) { } static void radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode) { } static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, + struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; } static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = { .dpms = radeon_atom_ext_dpms, .mode_fixup = radeon_atom_ext_mode_fixup, .prepare = radeon_atom_ext_prepare, .mode_set = radeon_atom_ext_mode_set, .commit = radeon_atom_ext_commit, .disable = radeon_atom_ext_disable, /* no detect for TMDS/LVDS yet */ }; static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = { .dpms = radeon_atom_encoder_dpms, .mode_fixup = radeon_atom_mode_fixup, .prepare = radeon_atom_encoder_prepare, .mode_set = radeon_atom_encoder_mode_set, .commit = radeon_atom_encoder_commit, .disable = radeon_atom_encoder_disable, .detect = radeon_atom_dig_detect, }; static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = { .dpms = radeon_atom_encoder_dpms, .mode_fixup = radeon_atom_mode_fixup, .prepare = radeon_atom_encoder_prepare, .mode_set = radeon_atom_encoder_mode_set, .commit = radeon_atom_encoder_commit, .detect = radeon_atom_dac_detect, }; void radeon_enc_destroy(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) radeon_atom_backlight_exit(radeon_encoder); free(radeon_encoder->enc_priv, DRM_MEM_DRIVER); drm_encoder_cleanup(encoder); free(radeon_encoder, DRM_MEM_DRIVER); } static const struct drm_encoder_funcs radeon_atom_enc_funcs = { .destroy = radeon_enc_destroy, }; static struct radeon_encoder_atom_dac * radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder) { struct drm_device *dev = radeon_encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder_atom_dac *dac = malloc(sizeof(struct radeon_encoder_atom_dac), DRM_MEM_DRIVER, M_ZERO | M_WAITOK); if (!dac) return NULL; dac->tv_std = radeon_atombios_get_tv_info(rdev); return dac; } static struct radeon_encoder_atom_dig * radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) { int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; struct radeon_encoder_atom_dig *dig = malloc(sizeof(struct radeon_encoder_atom_dig), DRM_MEM_DRIVER, M_ZERO | M_WAITOK); if (!dig) return NULL; /* coherent mode by default */ dig->coherent_mode = true; dig->dig_encoder = -1; if (encoder_enum == 2) dig->linkb = true; else dig->linkb = false; return dig; } void radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device, u16 caps) { struct radeon_device *rdev = dev->dev_private; struct drm_encoder *encoder; struct radeon_encoder *radeon_encoder; /* see if we already added it */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->encoder_enum == encoder_enum) { radeon_encoder->devices |= supported_device; return; } } /* add a new one */ radeon_encoder = malloc(sizeof(struct radeon_encoder), DRM_MEM_DRIVER, M_ZERO | M_WAITOK); if (!radeon_encoder) return; encoder = &radeon_encoder->base; switch (rdev->num_crtc) { case 1: encoder->possible_crtcs = 0x1; break; case 2: default: encoder->possible_crtcs = 0x3; break; case 4: encoder->possible_crtcs = 0xf; break; case 6: encoder->possible_crtcs = 0x3f; break; } radeon_encoder->enc_priv = NULL; radeon_encoder->encoder_enum = encoder_enum; radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; radeon_encoder->devices = supported_device; radeon_encoder->rmx_type = RMX_OFF; radeon_encoder->underscan_type = UNDERSCAN_OFF; radeon_encoder->is_ext_encoder = false; radeon_encoder->caps = caps; switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_LVDS: case ENCODER_OBJECT_ID_INTERNAL_TMDS1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: case ENCODER_OBJECT_ID_INTERNAL_LVTM1: if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { radeon_encoder->rmx_type = RMX_FULL; drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); } else { drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); } drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); break; case ENCODER_OBJECT_ID_INTERNAL_DAC1: drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder); drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); break; case ENCODER_OBJECT_ID_INTERNAL_DAC2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC); radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder); drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); break; case ENCODER_OBJECT_ID_INTERNAL_DVO1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: case ENCODER_OBJECT_ID_INTERNAL_DDI: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { radeon_encoder->rmx_type = RMX_FULL; drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); } else { drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); } drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); break; case ENCODER_OBJECT_ID_SI170B: case ENCODER_OBJECT_ID_CH7303: case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: case ENCODER_OBJECT_ID_TITFP513: case ENCODER_OBJECT_ID_VT1623: case ENCODER_OBJECT_ID_HDMI_SI1930: case ENCODER_OBJECT_ID_TRAVIS: case ENCODER_OBJECT_ID_NUTMEG: /* these are handled by the primary encoders */ radeon_encoder->is_ext_encoder = true; if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); else drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs); break; } } Index: head/sys/dev/drm2/radeon/radeon_legacy_encoders.c =================================================================== --- head/sys/dev/drm2/radeon/radeon_legacy_encoders.c (revision 277486) +++ head/sys/dev/drm2/radeon/radeon_legacy_encoders.c (revision 277487) @@ -1,1817 +1,1817 @@ /* * Copyright 2007-8 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher */ #include __FBSDID("$FreeBSD$"); #include #include #include #include "radeon.h" #include "radeon_asic.h" #include "atom.h" static void radeon_legacy_encoder_disable(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_encoder_helper_funcs *encoder_funcs; encoder_funcs = encoder->helper_private; encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); radeon_encoder->active_device = 0; } static void radeon_legacy_lvds_update(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man; int panel_pwr_delay = 2000; bool is_mac = false; uint8_t backlight_level; DRM_DEBUG_KMS("\n"); lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); backlight_level = (lvds_gen_cntl >> RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff; if (radeon_encoder->enc_priv) { if (rdev->is_atom_bios) { struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv; panel_pwr_delay = lvds->panel_pwr_delay; if (lvds->bl_dev) backlight_level = lvds->backlight_level; } else { struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv; panel_pwr_delay = lvds->panel_pwr_delay; if (lvds->bl_dev) backlight_level = lvds->backlight_level; } } /* macs (and possibly some x86 oem systems?) wire up LVDS strangely * Taken from radeonfb. */ if ((rdev->mode_info.connector_table == CT_IBOOK) || (rdev->mode_info.connector_table == CT_POWERBOOK_EXTERNAL) || (rdev->mode_info.connector_table == CT_POWERBOOK_INTERNAL) || (rdev->mode_info.connector_table == CT_POWERBOOK_VGA)) is_mac = true; switch (mode) { case DRM_MODE_DPMS_ON: disp_pwr_man = RREG32(RADEON_DISP_PWR_MAN); disp_pwr_man |= RADEON_AUTO_PWRUP_EN; WREG32(RADEON_DISP_PWR_MAN, disp_pwr_man); lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL); lvds_pll_cntl |= RADEON_LVDS_PLL_EN; WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl); DRM_MDELAY(1); lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL); lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET; WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl); lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS | RADEON_LVDS_BL_MOD_LEVEL_MASK); lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN | RADEON_LVDS_DIGON | RADEON_LVDS_BLON | (backlight_level << RADEON_LVDS_BL_MOD_LEVEL_SHIFT)); if (is_mac) lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN; DRM_MDELAY(panel_pwr_delay); WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL); WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb); lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS; if (is_mac) { lvds_gen_cntl &= ~RADEON_LVDS_BL_MOD_EN; WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_EN); } else { WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON); } DRM_MDELAY(panel_pwr_delay); WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); DRM_MDELAY(panel_pwr_delay); break; } if (rdev->is_atom_bios) radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); else radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); } static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) { struct radeon_device *rdev = encoder->dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); DRM_DEBUG("\n"); if (radeon_encoder->enc_priv) { if (rdev->is_atom_bios) { struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv; lvds->dpms_mode = mode; } else { struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv; lvds->dpms_mode = mode; } } radeon_legacy_lvds_update(encoder, mode); } static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, true); else radeon_combios_output_lock(encoder, true); radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_OFF); } static void radeon_legacy_lvds_commit(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_ON); if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, false); else radeon_combios_output_lock(encoder, false); } static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t lvds_pll_cntl, lvds_gen_cntl, lvds_ss_gen_cntl; DRM_DEBUG_KMS("\n"); lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL); lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN; lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL); if (rdev->is_atom_bios) { /* LVDS_GEN_CNTL parameters are computed in LVDSEncoderControl * need to call that on resume to set up the reg properly. */ radeon_encoder->pixel_clock = adjusted_mode->clock; atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE); lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); } else { struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv; if (lvds) { DRM_DEBUG_KMS("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl); lvds_gen_cntl = lvds->lvds_gen_cntl; lvds_ss_gen_cntl &= ~((0xf << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) | (0xf << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT)); lvds_ss_gen_cntl |= ((lvds->panel_digon_delay << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) | (lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT)); } else lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); } lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS; lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_RST_FM); if (ASIC_IS_R300(rdev)) lvds_pll_cntl &= ~(R300_LVDS_SRC_SEL_MASK); if (radeon_crtc->crtc_id == 0) { if (ASIC_IS_R300(rdev)) { if (radeon_encoder->rmx_type != RMX_OFF) lvds_pll_cntl |= R300_LVDS_SRC_SEL_RMX; } else lvds_gen_cntl &= ~RADEON_LVDS_SEL_CRTC2; } else { if (ASIC_IS_R300(rdev)) lvds_pll_cntl |= R300_LVDS_SRC_SEL_CRTC2; else lvds_gen_cntl |= RADEON_LVDS_SEL_CRTC2; } WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl); WREG32(RADEON_LVDS_SS_GEN_CNTL, lvds_ss_gen_cntl); if (rdev->family == CHIP_RV410) WREG32(RADEON_CLOCK_CNTL_INDEX, 0); if (rdev->is_atom_bios) radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); else radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); } static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, + struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); /* set the active encoder to connector routing */ radeon_encoder_set_active_device(encoder); drm_mode_set_crtcinfo(adjusted_mode, 0); /* get the native mode for LVDS */ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) radeon_panel_mode_fixup(encoder, adjusted_mode); return true; } static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = { .dpms = radeon_legacy_lvds_dpms, .mode_fixup = radeon_legacy_mode_fixup, .prepare = radeon_legacy_lvds_prepare, .mode_set = radeon_legacy_lvds_mode_set, .commit = radeon_legacy_lvds_commit, .disable = radeon_legacy_encoder_disable, }; u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder) { struct drm_device *dev = radeon_encoder->base.dev; struct radeon_device *rdev = dev->dev_private; u8 backlight_level; backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >> RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff; return backlight_level; } void radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level) { struct drm_device *dev = radeon_encoder->base.dev; struct radeon_device *rdev = dev->dev_private; int dpms_mode = DRM_MODE_DPMS_ON; if (radeon_encoder->enc_priv) { if (rdev->is_atom_bios) { struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv; if (lvds->backlight_level > 0) dpms_mode = lvds->dpms_mode; else dpms_mode = DRM_MODE_DPMS_OFF; lvds->backlight_level = level; } else { struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv; if (lvds->backlight_level > 0) dpms_mode = lvds->dpms_mode; else dpms_mode = DRM_MODE_DPMS_OFF; lvds->backlight_level = level; } } radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode); } #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd) { struct radeon_backlight_privdata *pdata = bl_get_data(bd); uint8_t level; /* Convert brightness to hardware level */ if (bd->props.brightness < 0) level = 0; else if (bd->props.brightness > RADEON_MAX_BL_LEVEL) level = RADEON_MAX_BL_LEVEL; else level = bd->props.brightness; if (pdata->negative) level = RADEON_MAX_BL_LEVEL - level; return level; } static int radeon_legacy_backlight_update_status(struct backlight_device *bd) { struct radeon_backlight_privdata *pdata = bl_get_data(bd); struct radeon_encoder *radeon_encoder = pdata->encoder; radeon_legacy_set_backlight_level(radeon_encoder, radeon_legacy_lvds_level(bd)); return 0; } static int radeon_legacy_backlight_get_brightness(struct backlight_device *bd) { struct radeon_backlight_privdata *pdata = bl_get_data(bd); struct radeon_encoder *radeon_encoder = pdata->encoder; struct drm_device *dev = radeon_encoder->base.dev; struct radeon_device *rdev = dev->dev_private; uint8_t backlight_level; backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >> RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff; return pdata->negative ? RADEON_MAX_BL_LEVEL - backlight_level : backlight_level; } static const struct backlight_ops radeon_backlight_ops = { .get_brightness = radeon_legacy_backlight_get_brightness, .update_status = radeon_legacy_backlight_update_status, }; void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, struct drm_connector *drm_connector) { struct drm_device *dev = radeon_encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct backlight_device *bd; struct backlight_properties props; struct radeon_backlight_privdata *pdata; uint8_t backlight_level; char bl_name[16]; if (!radeon_encoder->enc_priv) return; #ifdef CONFIG_PMAC_BACKLIGHT if (!pmac_has_backlight_type("ati") && !pmac_has_backlight_type("mnca")) return; #endif pdata = malloc(sizeof(struct radeon_backlight_privdata), DRM_MEM_DRIVER, M_WAITOK); if (!pdata) { DRM_ERROR("Memory allocation failed\n"); goto error; } memset(&props, 0, sizeof(props)); props.max_brightness = RADEON_MAX_BL_LEVEL; props.type = BACKLIGHT_RAW; snprintf(bl_name, sizeof(bl_name), "radeon_bl%d", dev->primary->index); bd = backlight_device_register(bl_name, &drm_connector->kdev, pdata, &radeon_backlight_ops, &props); if (IS_ERR(bd)) { DRM_ERROR("Backlight registration failed\n"); goto error; } pdata->encoder = radeon_encoder; backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >> RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff; /* First, try to detect backlight level sense based on the assumption * that firmware set it up at full brightness */ if (backlight_level == 0) pdata->negative = true; else if (backlight_level == 0xff) pdata->negative = false; else { /* XXX hack... maybe some day we can figure out in what direction * backlight should work on a given panel? */ pdata->negative = (rdev->family != CHIP_RV200 && rdev->family != CHIP_RV250 && rdev->family != CHIP_RV280 && rdev->family != CHIP_RV350); #ifdef CONFIG_PMAC_BACKLIGHT pdata->negative = (pdata->negative || of_machine_is_compatible("PowerBook4,3") || of_machine_is_compatible("PowerBook6,3") || of_machine_is_compatible("PowerBook6,5")); #endif } if (rdev->is_atom_bios) { struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv; lvds->bl_dev = bd; } else { struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv; lvds->bl_dev = bd; } bd->props.brightness = radeon_legacy_backlight_get_brightness(bd); bd->props.power = FB_BLANK_UNBLANK; backlight_update_status(bd); DRM_INFO("radeon legacy LVDS backlight initialized\n"); return; error: free(pdata, DRM_MEM_DRIVER); return; } static void radeon_legacy_backlight_exit(struct radeon_encoder *radeon_encoder) { struct drm_device *dev = radeon_encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct backlight_device *bd = NULL; if (!radeon_encoder->enc_priv) return; if (rdev->is_atom_bios) { struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv; bd = lvds->bl_dev; lvds->bl_dev = NULL; } else { struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv; bd = lvds->bl_dev; lvds->bl_dev = NULL; } if (bd) { struct radeon_backlight_privdata *pdata; pdata = bl_get_data(bd); backlight_device_unregister(bd); free(pdata, DRM_MEM_DRIVER); DRM_INFO("radeon legacy LVDS backlight unloaded\n"); } } #else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */ void radeon_legacy_backlight_init(struct radeon_encoder *encoder, struct drm_connector *drm_connector) { } static void radeon_legacy_backlight_exit(struct radeon_encoder *encoder) { } #endif static void radeon_lvds_enc_destroy(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->enc_priv) { radeon_legacy_backlight_exit(radeon_encoder); free(radeon_encoder->enc_priv, DRM_MEM_DRIVER); } drm_encoder_cleanup(encoder); free(radeon_encoder, DRM_MEM_DRIVER); } static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = { .destroy = radeon_lvds_enc_destroy, }; static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); uint32_t dac_cntl = RREG32(RADEON_DAC_CNTL); uint32_t dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL); DRM_DEBUG_KMS("\n"); switch (mode) { case DRM_MODE_DPMS_ON: crtc_ext_cntl |= RADEON_CRTC_CRT_ON; dac_cntl &= ~RADEON_DAC_PDWN; dac_macro_cntl &= ~(RADEON_DAC_PDWN_R | RADEON_DAC_PDWN_G | RADEON_DAC_PDWN_B); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: crtc_ext_cntl &= ~RADEON_CRTC_CRT_ON; dac_cntl |= RADEON_DAC_PDWN; dac_macro_cntl |= (RADEON_DAC_PDWN_R | RADEON_DAC_PDWN_G | RADEON_DAC_PDWN_B); break; } /* handled in radeon_crtc_dpms() */ if (!(rdev->flags & RADEON_SINGLE_CRTC)) WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl); WREG32(RADEON_DAC_CNTL, dac_cntl); WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl); if (rdev->is_atom_bios) radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); else radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); } static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, true); else radeon_combios_output_lock(encoder, true); radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_OFF); } static void radeon_legacy_primary_dac_commit(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_ON); if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, false); else radeon_combios_output_lock(encoder, false); } static void radeon_legacy_primary_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t disp_output_cntl, dac_cntl, dac2_cntl, dac_macro_cntl; DRM_DEBUG_KMS("\n"); if (radeon_crtc->crtc_id == 0) { if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) { disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) & ~(RADEON_DISP_DAC_SOURCE_MASK); WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); } else { dac2_cntl = RREG32(RADEON_DAC_CNTL2) & ~(RADEON_DAC2_DAC_CLK_SEL); WREG32(RADEON_DAC_CNTL2, dac2_cntl); } } else { if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) { disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) & ~(RADEON_DISP_DAC_SOURCE_MASK); disp_output_cntl |= RADEON_DISP_DAC_SOURCE_CRTC2; WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); } else { dac2_cntl = RREG32(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC_CLK_SEL; WREG32(RADEON_DAC_CNTL2, dac2_cntl); } } dac_cntl = (RADEON_DAC_MASK_ALL | RADEON_DAC_VGA_ADR_EN | /* TODO 6-bits */ RADEON_DAC_8BIT_EN); WREG32_P(RADEON_DAC_CNTL, dac_cntl, RADEON_DAC_RANGE_CNTL | RADEON_DAC_BLANKING); if (radeon_encoder->enc_priv) { struct radeon_encoder_primary_dac *p_dac = (struct radeon_encoder_primary_dac *)radeon_encoder->enc_priv; dac_macro_cntl = p_dac->ps2_pdac_adj; } else dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL); dac_macro_cntl |= RADEON_DAC_PDWN_R | RADEON_DAC_PDWN_G | RADEON_DAC_PDWN_B; WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl); if (rdev->is_atom_bios) radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); else radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); } static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t vclk_ecp_cntl, crtc_ext_cntl; uint32_t dac_ext_cntl, dac_cntl, dac_macro_cntl, tmp; enum drm_connector_status found = connector_status_disconnected; bool color = true; /* just don't bother on RN50 those chip are often connected to remoting * console hw and often we get failure to load detect those. So to make * everyone happy report the encoder as always connected. */ if (ASIC_IS_RN50(rdev)) { return connector_status_connected; } /* save the regs we need */ vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL); crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL); dac_cntl = RREG32(RADEON_DAC_CNTL); dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL); tmp = vclk_ecp_cntl & ~(RADEON_PIXCLK_ALWAYS_ONb | RADEON_PIXCLK_DAC_ALWAYS_ONb); WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp); tmp = crtc_ext_cntl | RADEON_CRTC_CRT_ON; WREG32(RADEON_CRTC_EXT_CNTL, tmp); tmp = RADEON_DAC_FORCE_BLANK_OFF_EN | RADEON_DAC_FORCE_DATA_EN; if (color) tmp |= RADEON_DAC_FORCE_DATA_SEL_RGB; else tmp |= RADEON_DAC_FORCE_DATA_SEL_G; if (ASIC_IS_R300(rdev)) tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT); else if (ASIC_IS_RV100(rdev)) tmp |= (0x1ac << RADEON_DAC_FORCE_DATA_SHIFT); else tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT); WREG32(RADEON_DAC_EXT_CNTL, tmp); tmp = dac_cntl & ~(RADEON_DAC_RANGE_CNTL_MASK | RADEON_DAC_PDWN); tmp |= RADEON_DAC_RANGE_CNTL_PS2 | RADEON_DAC_CMP_EN; WREG32(RADEON_DAC_CNTL, tmp); tmp = dac_macro_cntl; tmp &= ~(RADEON_DAC_PDWN_R | RADEON_DAC_PDWN_G | RADEON_DAC_PDWN_B); WREG32(RADEON_DAC_MACRO_CNTL, tmp); DRM_MDELAY(2); if (RREG32(RADEON_DAC_CNTL) & RADEON_DAC_CMP_OUTPUT) found = connector_status_connected; /* restore the regs we used */ WREG32(RADEON_DAC_CNTL, dac_cntl); WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl); WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl); WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl); WREG32_PLL(RADEON_VCLK_ECP_CNTL, vclk_ecp_cntl); return found; } static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = { .dpms = radeon_legacy_primary_dac_dpms, .mode_fixup = radeon_legacy_mode_fixup, .prepare = radeon_legacy_primary_dac_prepare, .mode_set = radeon_legacy_primary_dac_mode_set, .commit = radeon_legacy_primary_dac_commit, .detect = radeon_legacy_primary_dac_detect, .disable = radeon_legacy_encoder_disable, }; static const struct drm_encoder_funcs radeon_legacy_primary_dac_enc_funcs = { .destroy = radeon_enc_destroy, }; static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t fp_gen_cntl = RREG32(RADEON_FP_GEN_CNTL); DRM_DEBUG_KMS("\n"); switch (mode) { case DRM_MODE_DPMS_ON: fp_gen_cntl |= (RADEON_FP_FPON | RADEON_FP_TMDS_EN); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN); break; } WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl); if (rdev->is_atom_bios) radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); else radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); } static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, true); else radeon_combios_output_lock(encoder, true); radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_OFF); } static void radeon_legacy_tmds_int_commit(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_ON); if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, true); else radeon_combios_output_lock(encoder, true); } static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t tmp, tmds_pll_cntl, tmds_transmitter_cntl, fp_gen_cntl; int i; DRM_DEBUG_KMS("\n"); tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL); tmp &= 0xfffff; if (rdev->family == CHIP_RV280) { /* bit 22 of TMDS_PLL_CNTL is read-back inverted */ tmp ^= (1 << 22); tmds_pll_cntl ^= (1 << 22); } if (radeon_encoder->enc_priv) { struct radeon_encoder_int_tmds *tmds = (struct radeon_encoder_int_tmds *)radeon_encoder->enc_priv; for (i = 0; i < 4; i++) { if (tmds->tmds_pll[i].freq == 0) break; if ((uint32_t)(mode->clock / 10) < tmds->tmds_pll[i].freq) { tmp = tmds->tmds_pll[i].value ; break; } } } if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV280)) { if (tmp & 0xfff00000) tmds_pll_cntl = tmp; else { tmds_pll_cntl &= 0xfff00000; tmds_pll_cntl |= tmp; } } else tmds_pll_cntl = tmp; tmds_transmitter_cntl = RREG32(RADEON_TMDS_TRANSMITTER_CNTL) & ~(RADEON_TMDS_TRANSMITTER_PLLRST); if (rdev->family == CHIP_R200 || rdev->family == CHIP_R100 || ASIC_IS_R300(rdev)) tmds_transmitter_cntl &= ~(RADEON_TMDS_TRANSMITTER_PLLEN); else /* RV chips got this bit reversed */ tmds_transmitter_cntl |= RADEON_TMDS_TRANSMITTER_PLLEN; fp_gen_cntl = (RREG32(RADEON_FP_GEN_CNTL) | (RADEON_FP_CRTC_DONT_SHADOW_VPAR | RADEON_FP_CRTC_DONT_SHADOW_HEND)); fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN); fp_gen_cntl &= ~(RADEON_FP_RMX_HVSYNC_CONTROL_EN | RADEON_FP_DFP_SYNC_SEL | RADEON_FP_CRT_SYNC_SEL | RADEON_FP_CRTC_LOCK_8DOT | RADEON_FP_USE_SHADOW_EN | RADEON_FP_CRTC_USE_SHADOW_VEND | RADEON_FP_CRT_SYNC_ALT); if (1) /* FIXME rgbBits == 8 */ fp_gen_cntl |= RADEON_FP_PANEL_FORMAT; /* 24 bit format */ else fp_gen_cntl &= ~RADEON_FP_PANEL_FORMAT;/* 18 bit format */ if (radeon_crtc->crtc_id == 0) { if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) { fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; if (radeon_encoder->rmx_type != RMX_OFF) fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX; else fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1; } else fp_gen_cntl &= ~RADEON_FP_SEL_CRTC2; } else { if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) { fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC2; } else fp_gen_cntl |= RADEON_FP_SEL_CRTC2; } WREG32(RADEON_TMDS_PLL_CNTL, tmds_pll_cntl); WREG32(RADEON_TMDS_TRANSMITTER_CNTL, tmds_transmitter_cntl); WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl); if (rdev->is_atom_bios) radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); else radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); } static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = { .dpms = radeon_legacy_tmds_int_dpms, .mode_fixup = radeon_legacy_mode_fixup, .prepare = radeon_legacy_tmds_int_prepare, .mode_set = radeon_legacy_tmds_int_mode_set, .commit = radeon_legacy_tmds_int_commit, .disable = radeon_legacy_encoder_disable, }; static const struct drm_encoder_funcs radeon_legacy_tmds_int_enc_funcs = { .destroy = radeon_enc_destroy, }; static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); DRM_DEBUG_KMS("\n"); switch (mode) { case DRM_MODE_DPMS_ON: fp2_gen_cntl &= ~RADEON_FP2_BLANK_EN; fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: fp2_gen_cntl |= RADEON_FP2_BLANK_EN; fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN); break; } WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); if (rdev->is_atom_bios) radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); else radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); } static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, true); else radeon_combios_output_lock(encoder, true); radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_OFF); } static void radeon_legacy_tmds_ext_commit(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_ON); if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, false); else radeon_combios_output_lock(encoder, false); } static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t fp2_gen_cntl; DRM_DEBUG_KMS("\n"); if (rdev->is_atom_bios) { radeon_encoder->pixel_clock = adjusted_mode->clock; atombios_dvo_setup(encoder, ATOM_ENABLE); fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); } else { fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); if (1) /* FIXME rgbBits == 8 */ fp2_gen_cntl |= RADEON_FP2_PANEL_FORMAT; /* 24 bit format, */ else fp2_gen_cntl &= ~RADEON_FP2_PANEL_FORMAT;/* 18 bit format, */ fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN | RADEON_FP2_DVO_RATE_SEL_SDR); /* XXX: these are oem specific */ if (ASIC_IS_R300(rdev)) { if ((dev->pci_device == 0x4850) && (dev->pci_subvendor == 0x1028) && (dev->pci_subdevice == 0x2001)) /* Dell Inspiron 8600 */ fp2_gen_cntl |= R300_FP2_DVO_CLOCK_MODE_SINGLE; else fp2_gen_cntl |= RADEON_FP2_PAD_FLOP_EN | R300_FP2_DVO_CLOCK_MODE_SINGLE; /*if (mode->clock > 165000) fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/ } if (!radeon_combios_external_tmds_setup(encoder)) radeon_external_tmds_setup(encoder); } if (radeon_crtc->crtc_id == 0) { if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) { fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK; if (radeon_encoder->rmx_type != RMX_OFF) fp2_gen_cntl |= R200_FP2_SOURCE_SEL_RMX; else fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC1; } else fp2_gen_cntl &= ~RADEON_FP2_SRC_SEL_CRTC2; } else { if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) { fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK; fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2; } else fp2_gen_cntl |= RADEON_FP2_SRC_SEL_CRTC2; } WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); if (rdev->is_atom_bios) radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); else radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); } static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); /* don't destroy the i2c bus record here, this will be done in radeon_i2c_fini */ free(radeon_encoder->enc_priv, DRM_MEM_DRIVER); drm_encoder_cleanup(encoder); free(radeon_encoder, DRM_MEM_DRIVER); } static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = { .dpms = radeon_legacy_tmds_ext_dpms, .mode_fixup = radeon_legacy_mode_fixup, .prepare = radeon_legacy_tmds_ext_prepare, .mode_set = radeon_legacy_tmds_ext_mode_set, .commit = radeon_legacy_tmds_ext_commit, .disable = radeon_legacy_encoder_disable, }; static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = { .destroy = radeon_ext_tmds_enc_destroy, }; static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t fp2_gen_cntl = 0, crtc2_gen_cntl = 0, tv_dac_cntl = 0; uint32_t tv_master_cntl = 0; bool is_tv; DRM_DEBUG_KMS("\n"); is_tv = radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT ? true : false; if (rdev->family == CHIP_R200) fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); else { if (is_tv) tv_master_cntl = RREG32(RADEON_TV_MASTER_CNTL); else crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); } switch (mode) { case DRM_MODE_DPMS_ON: if (rdev->family == CHIP_R200) { fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN); } else { if (is_tv) tv_master_cntl |= RADEON_TV_ON; else crtc2_gen_cntl |= RADEON_CRTC2_CRT2_ON; if (rdev->family == CHIP_R420 || rdev->family == CHIP_R423 || rdev->family == CHIP_RV410) tv_dac_cntl &= ~(R420_TV_DAC_RDACPD | R420_TV_DAC_GDACPD | R420_TV_DAC_BDACPD | RADEON_TV_DAC_BGSLEEP); else tv_dac_cntl &= ~(RADEON_TV_DAC_RDACPD | RADEON_TV_DAC_GDACPD | RADEON_TV_DAC_BDACPD | RADEON_TV_DAC_BGSLEEP); } break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: if (rdev->family == CHIP_R200) fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN); else { if (is_tv) tv_master_cntl &= ~RADEON_TV_ON; else crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON; if (rdev->family == CHIP_R420 || rdev->family == CHIP_R423 || rdev->family == CHIP_RV410) tv_dac_cntl |= (R420_TV_DAC_RDACPD | R420_TV_DAC_GDACPD | R420_TV_DAC_BDACPD | RADEON_TV_DAC_BGSLEEP); else tv_dac_cntl |= (RADEON_TV_DAC_RDACPD | RADEON_TV_DAC_GDACPD | RADEON_TV_DAC_BDACPD | RADEON_TV_DAC_BGSLEEP); } break; } if (rdev->family == CHIP_R200) { WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); } else { if (is_tv) WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl); /* handled in radeon_crtc_dpms() */ else if (!(rdev->flags & RADEON_SINGLE_CRTC)) WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); } if (rdev->is_atom_bios) radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); else radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); } static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, true); else radeon_combios_output_lock(encoder, true); radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_OFF); } static void radeon_legacy_tv_dac_commit(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_ON); if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, true); else radeon_combios_output_lock(encoder, true); } static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv; uint32_t tv_dac_cntl, gpiopad_a = 0, dac2_cntl, disp_output_cntl = 0; uint32_t disp_hw_debug = 0, fp2_gen_cntl = 0, disp_tv_out_cntl = 0; bool is_tv = false; DRM_DEBUG_KMS("\n"); is_tv = radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT ? true : false; if (rdev->family != CHIP_R200) { tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); if (rdev->family == CHIP_R420 || rdev->family == CHIP_R423 || rdev->family == CHIP_RV410) { tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | RADEON_TV_DAC_BGADJ_MASK | R420_TV_DAC_DACADJ_MASK | R420_TV_DAC_RDACPD | R420_TV_DAC_GDACPD | R420_TV_DAC_BDACPD | R420_TV_DAC_TVENABLE); } else { tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | RADEON_TV_DAC_BGADJ_MASK | RADEON_TV_DAC_DACADJ_MASK | RADEON_TV_DAC_RDACPD | RADEON_TV_DAC_GDACPD | RADEON_TV_DAC_BDACPD); } tv_dac_cntl |= RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD; if (is_tv) { if (tv_dac->tv_std == TV_STD_NTSC || tv_dac->tv_std == TV_STD_NTSC_J || tv_dac->tv_std == TV_STD_PAL_M || tv_dac->tv_std == TV_STD_PAL_60) tv_dac_cntl |= tv_dac->ntsc_tvdac_adj; else tv_dac_cntl |= tv_dac->pal_tvdac_adj; if (tv_dac->tv_std == TV_STD_NTSC || tv_dac->tv_std == TV_STD_NTSC_J) tv_dac_cntl |= RADEON_TV_DAC_STD_NTSC; else tv_dac_cntl |= RADEON_TV_DAC_STD_PAL; } else tv_dac_cntl |= (RADEON_TV_DAC_STD_PS2 | tv_dac->ps2_tvdac_adj); WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); } if (ASIC_IS_R300(rdev)) { gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1; disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL); } else if (rdev->family != CHIP_R200) disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); else if (rdev->family == CHIP_R200) fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); if (rdev->family >= CHIP_R200) disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL); if (is_tv) { uint32_t dac_cntl; dac_cntl = RREG32(RADEON_DAC_CNTL); dac_cntl &= ~RADEON_DAC_TVO_EN; WREG32(RADEON_DAC_CNTL, dac_cntl); if (ASIC_IS_R300(rdev)) gpiopad_a = RREG32(RADEON_GPIOPAD_A) & ~1; dac2_cntl = RREG32(RADEON_DAC_CNTL2) & ~RADEON_DAC2_DAC2_CLK_SEL; if (radeon_crtc->crtc_id == 0) { if (ASIC_IS_R300(rdev)) { disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK; disp_output_cntl |= (RADEON_DISP_TVDAC_SOURCE_CRTC | RADEON_DISP_TV_SOURCE_CRTC); } if (rdev->family >= CHIP_R200) { disp_tv_out_cntl &= ~RADEON_DISP_TV_PATH_SRC_CRTC2; } else { disp_hw_debug |= RADEON_CRT2_DISP1_SEL; } } else { if (ASIC_IS_R300(rdev)) { disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK; disp_output_cntl |= RADEON_DISP_TV_SOURCE_CRTC; } if (rdev->family >= CHIP_R200) { disp_tv_out_cntl |= RADEON_DISP_TV_PATH_SRC_CRTC2; } else { disp_hw_debug &= ~RADEON_CRT2_DISP1_SEL; } } WREG32(RADEON_DAC_CNTL2, dac2_cntl); } else { dac2_cntl = RREG32(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC2_CLK_SEL; if (radeon_crtc->crtc_id == 0) { if (ASIC_IS_R300(rdev)) { disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK; disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC; } else if (rdev->family == CHIP_R200) { fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK | RADEON_FP2_DVO_RATE_SEL_SDR); } else disp_hw_debug |= RADEON_CRT2_DISP1_SEL; } else { if (ASIC_IS_R300(rdev)) { disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK; disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC2; } else if (rdev->family == CHIP_R200) { fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK | RADEON_FP2_DVO_RATE_SEL_SDR); fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2; } else disp_hw_debug &= ~RADEON_CRT2_DISP1_SEL; } WREG32(RADEON_DAC_CNTL2, dac2_cntl); } if (ASIC_IS_R300(rdev)) { WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1); WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); } else if (rdev->family != CHIP_R200) WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); else if (rdev->family == CHIP_R200) WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); if (rdev->family >= CHIP_R200) WREG32(RADEON_DISP_TV_OUT_CNTL, disp_tv_out_cntl); if (is_tv) radeon_legacy_tv_mode_set(encoder, mode, adjusted_mode); if (rdev->is_atom_bios) radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); else radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); } static bool r300_legacy_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t crtc2_gen_cntl, tv_dac_cntl, dac_cntl2, dac_ext_cntl; uint32_t disp_output_cntl, gpiopad_a, tmp; bool found = false; /* save regs needed */ gpiopad_a = RREG32(RADEON_GPIOPAD_A); dac_cntl2 = RREG32(RADEON_DAC_CNTL2); crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL); tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL); WREG32_P(RADEON_GPIOPAD_A, 0, ~1); WREG32(RADEON_DAC_CNTL2, RADEON_DAC2_DAC2_CLK_SEL); WREG32(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_CRT2_ON | RADEON_CRTC2_VSYNC_TRISTAT); tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK; tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2; WREG32(RADEON_DISP_OUTPUT_CNTL, tmp); WREG32(RADEON_DAC_EXT_CNTL, RADEON_DAC2_FORCE_BLANK_OFF_EN | RADEON_DAC2_FORCE_DATA_EN | RADEON_DAC_FORCE_DATA_SEL_RGB | (0xec << RADEON_DAC_FORCE_DATA_SHIFT)); WREG32(RADEON_TV_DAC_CNTL, RADEON_TV_DAC_STD_NTSC | (8 << RADEON_TV_DAC_BGADJ_SHIFT) | (6 << RADEON_TV_DAC_DACADJ_SHIFT)); RREG32(RADEON_TV_DAC_CNTL); DRM_MDELAY(4); WREG32(RADEON_TV_DAC_CNTL, RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD | RADEON_TV_MONITOR_DETECT_EN | RADEON_TV_DAC_STD_NTSC | (8 << RADEON_TV_DAC_BGADJ_SHIFT) | (6 << RADEON_TV_DAC_DACADJ_SHIFT)); RREG32(RADEON_TV_DAC_CNTL); DRM_MDELAY(6); tmp = RREG32(RADEON_TV_DAC_CNTL); if ((tmp & RADEON_TV_DAC_GDACDET) != 0) { found = true; DRM_DEBUG_KMS("S-video TV connection detected\n"); } else if ((tmp & RADEON_TV_DAC_BDACDET) != 0) { found = true; DRM_DEBUG_KMS("Composite TV connection detected\n"); } WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl); WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); WREG32(RADEON_DAC_CNTL2, dac_cntl2); WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1); return found; } static bool radeon_legacy_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t tv_dac_cntl, dac_cntl2; uint32_t config_cntl, tv_pre_dac_mux_cntl, tv_master_cntl, tmp; bool found = false; if (ASIC_IS_R300(rdev)) return r300_legacy_tv_detect(encoder, connector); dac_cntl2 = RREG32(RADEON_DAC_CNTL2); tv_master_cntl = RREG32(RADEON_TV_MASTER_CNTL); tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); config_cntl = RREG32(RADEON_CONFIG_CNTL); tv_pre_dac_mux_cntl = RREG32(RADEON_TV_PRE_DAC_MUX_CNTL); tmp = dac_cntl2 & ~RADEON_DAC2_DAC2_CLK_SEL; WREG32(RADEON_DAC_CNTL2, tmp); tmp = tv_master_cntl | RADEON_TV_ON; tmp &= ~(RADEON_TV_ASYNC_RST | RADEON_RESTART_PHASE_FIX | RADEON_CRT_FIFO_CE_EN | RADEON_TV_FIFO_CE_EN | RADEON_RE_SYNC_NOW_SEL_MASK); tmp |= RADEON_TV_FIFO_ASYNC_RST | RADEON_CRT_ASYNC_RST; WREG32(RADEON_TV_MASTER_CNTL, tmp); tmp = RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD | RADEON_TV_MONITOR_DETECT_EN | RADEON_TV_DAC_STD_NTSC | (8 << RADEON_TV_DAC_BGADJ_SHIFT); if (config_cntl & RADEON_CFG_ATI_REV_ID_MASK) tmp |= (4 << RADEON_TV_DAC_DACADJ_SHIFT); else tmp |= (8 << RADEON_TV_DAC_DACADJ_SHIFT); WREG32(RADEON_TV_DAC_CNTL, tmp); tmp = RADEON_C_GRN_EN | RADEON_CMP_BLU_EN | RADEON_RED_MX_FORCE_DAC_DATA | RADEON_GRN_MX_FORCE_DAC_DATA | RADEON_BLU_MX_FORCE_DAC_DATA | (0x109 << RADEON_TV_FORCE_DAC_DATA_SHIFT); WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, tmp); DRM_MDELAY(3); tmp = RREG32(RADEON_TV_DAC_CNTL); if (tmp & RADEON_TV_DAC_GDACDET) { found = true; DRM_DEBUG_KMS("S-video TV connection detected\n"); } else if ((tmp & RADEON_TV_DAC_BDACDET) != 0) { found = true; DRM_DEBUG_KMS("Composite TV connection detected\n"); } WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, tv_pre_dac_mux_cntl); WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl); WREG32(RADEON_DAC_CNTL2, dac_cntl2); return found; } static bool radeon_legacy_ext_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t gpio_monid, fp2_gen_cntl, disp_output_cntl, crtc2_gen_cntl; uint32_t disp_lin_trans_grph_a, disp_lin_trans_grph_b, disp_lin_trans_grph_c; uint32_t disp_lin_trans_grph_d, disp_lin_trans_grph_e, disp_lin_trans_grph_f; uint32_t tmp, crtc2_h_total_disp, crtc2_v_total_disp; uint32_t crtc2_h_sync_strt_wid, crtc2_v_sync_strt_wid; bool found = false; int i; /* save the regs we need */ gpio_monid = RREG32(RADEON_GPIO_MONID); fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL); crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); disp_lin_trans_grph_a = RREG32(RADEON_DISP_LIN_TRANS_GRPH_A); disp_lin_trans_grph_b = RREG32(RADEON_DISP_LIN_TRANS_GRPH_B); disp_lin_trans_grph_c = RREG32(RADEON_DISP_LIN_TRANS_GRPH_C); disp_lin_trans_grph_d = RREG32(RADEON_DISP_LIN_TRANS_GRPH_D); disp_lin_trans_grph_e = RREG32(RADEON_DISP_LIN_TRANS_GRPH_E); disp_lin_trans_grph_f = RREG32(RADEON_DISP_LIN_TRANS_GRPH_F); crtc2_h_total_disp = RREG32(RADEON_CRTC2_H_TOTAL_DISP); crtc2_v_total_disp = RREG32(RADEON_CRTC2_V_TOTAL_DISP); crtc2_h_sync_strt_wid = RREG32(RADEON_CRTC2_H_SYNC_STRT_WID); crtc2_v_sync_strt_wid = RREG32(RADEON_CRTC2_V_SYNC_STRT_WID); tmp = RREG32(RADEON_GPIO_MONID); tmp &= ~RADEON_GPIO_A_0; WREG32(RADEON_GPIO_MONID, tmp); WREG32(RADEON_FP2_GEN_CNTL, (RADEON_FP2_ON | RADEON_FP2_PANEL_FORMAT | R200_FP2_SOURCE_SEL_TRANS_UNIT | RADEON_FP2_DVO_EN | R200_FP2_DVO_RATE_SEL_SDR)); WREG32(RADEON_DISP_OUTPUT_CNTL, (RADEON_DISP_DAC_SOURCE_RMX | RADEON_DISP_TRANS_MATRIX_GRAPHICS)); WREG32(RADEON_CRTC2_GEN_CNTL, (RADEON_CRTC2_EN | RADEON_CRTC2_DISP_REQ_EN_B)); WREG32(RADEON_DISP_LIN_TRANS_GRPH_A, 0x00000000); WREG32(RADEON_DISP_LIN_TRANS_GRPH_B, 0x000003f0); WREG32(RADEON_DISP_LIN_TRANS_GRPH_C, 0x00000000); WREG32(RADEON_DISP_LIN_TRANS_GRPH_D, 0x000003f0); WREG32(RADEON_DISP_LIN_TRANS_GRPH_E, 0x00000000); WREG32(RADEON_DISP_LIN_TRANS_GRPH_F, 0x000003f0); WREG32(RADEON_CRTC2_H_TOTAL_DISP, 0x01000008); WREG32(RADEON_CRTC2_H_SYNC_STRT_WID, 0x00000800); WREG32(RADEON_CRTC2_V_TOTAL_DISP, 0x00080001); WREG32(RADEON_CRTC2_V_SYNC_STRT_WID, 0x00000080); for (i = 0; i < 200; i++) { tmp = RREG32(RADEON_GPIO_MONID); if (tmp & RADEON_GPIO_Y_0) found = true; if (found) break; DRM_MDELAY(1); if (!drm_can_sleep()) DRM_MDELAY(1); else DRM_MSLEEP(1); } /* restore the regs we used */ WREG32(RADEON_DISP_LIN_TRANS_GRPH_A, disp_lin_trans_grph_a); WREG32(RADEON_DISP_LIN_TRANS_GRPH_B, disp_lin_trans_grph_b); WREG32(RADEON_DISP_LIN_TRANS_GRPH_C, disp_lin_trans_grph_c); WREG32(RADEON_DISP_LIN_TRANS_GRPH_D, disp_lin_trans_grph_d); WREG32(RADEON_DISP_LIN_TRANS_GRPH_E, disp_lin_trans_grph_e); WREG32(RADEON_DISP_LIN_TRANS_GRPH_F, disp_lin_trans_grph_f); WREG32(RADEON_CRTC2_H_TOTAL_DISP, crtc2_h_total_disp); WREG32(RADEON_CRTC2_V_TOTAL_DISP, crtc2_v_total_disp); WREG32(RADEON_CRTC2_H_SYNC_STRT_WID, crtc2_h_sync_strt_wid); WREG32(RADEON_CRTC2_V_SYNC_STRT_WID, crtc2_v_sync_strt_wid); WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); WREG32(RADEON_GPIO_MONID, gpio_monid); return found; } static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t crtc2_gen_cntl = 0, tv_dac_cntl, dac_cntl2, dac_ext_cntl; uint32_t gpiopad_a = 0, pixclks_cntl, tmp; uint32_t disp_output_cntl = 0, disp_hw_debug = 0, crtc_ext_cntl = 0; enum drm_connector_status found = connector_status_disconnected; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv; bool color = true; struct drm_crtc *crtc; /* find out if crtc2 is in use or if this encoder is using it */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); if ((radeon_crtc->crtc_id == 1) && crtc->enabled) { if (encoder->crtc != crtc) { return connector_status_disconnected; } } } if (connector->connector_type == DRM_MODE_CONNECTOR_SVIDEO || connector->connector_type == DRM_MODE_CONNECTOR_Composite || connector->connector_type == DRM_MODE_CONNECTOR_9PinDIN) { bool tv_detect; if (radeon_encoder->active_device && !(radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT)) return connector_status_disconnected; tv_detect = radeon_legacy_tv_detect(encoder, connector); if (tv_detect && tv_dac) found = connector_status_connected; return found; } /* don't probe if the encoder is being used for something else not CRT related */ if (radeon_encoder->active_device && !(radeon_encoder->active_device & ATOM_DEVICE_CRT_SUPPORT)) { DRM_INFO("not detecting due to %08x\n", radeon_encoder->active_device); return connector_status_disconnected; } /* R200 uses an external DAC for secondary DAC */ if (rdev->family == CHIP_R200) { if (radeon_legacy_ext_dac_detect(encoder, connector)) found = connector_status_connected; return found; } /* save the regs we need */ pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL); if (rdev->flags & RADEON_SINGLE_CRTC) { crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); } else { if (ASIC_IS_R300(rdev)) { gpiopad_a = RREG32(RADEON_GPIOPAD_A); disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL); } else { disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); } crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); } tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL); dac_cntl2 = RREG32(RADEON_DAC_CNTL2); tmp = pixclks_cntl & ~(RADEON_PIX2CLK_ALWAYS_ONb | RADEON_PIX2CLK_DAC_ALWAYS_ONb); WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); if (rdev->flags & RADEON_SINGLE_CRTC) { tmp = crtc_ext_cntl | RADEON_CRTC_CRT_ON; WREG32(RADEON_CRTC_EXT_CNTL, tmp); } else { tmp = crtc2_gen_cntl & ~RADEON_CRTC2_PIX_WIDTH_MASK; tmp |= RADEON_CRTC2_CRT2_ON | (2 << RADEON_CRTC2_PIX_WIDTH_SHIFT); WREG32(RADEON_CRTC2_GEN_CNTL, tmp); if (ASIC_IS_R300(rdev)) { WREG32_P(RADEON_GPIOPAD_A, 1, ~1); tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK; tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2; WREG32(RADEON_DISP_OUTPUT_CNTL, tmp); } else { tmp = disp_hw_debug & ~RADEON_CRT2_DISP1_SEL; WREG32(RADEON_DISP_HW_DEBUG, tmp); } } tmp = RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD | RADEON_TV_MONITOR_DETECT_EN | RADEON_TV_DAC_STD_PS2; WREG32(RADEON_TV_DAC_CNTL, tmp); tmp = RADEON_DAC2_FORCE_BLANK_OFF_EN | RADEON_DAC2_FORCE_DATA_EN; if (color) tmp |= RADEON_DAC_FORCE_DATA_SEL_RGB; else tmp |= RADEON_DAC_FORCE_DATA_SEL_G; if (ASIC_IS_R300(rdev)) tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT); else tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT); WREG32(RADEON_DAC_EXT_CNTL, tmp); tmp = dac_cntl2 | RADEON_DAC2_DAC2_CLK_SEL | RADEON_DAC2_CMP_EN; WREG32(RADEON_DAC_CNTL2, tmp); DRM_MDELAY(10); if (ASIC_IS_R300(rdev)) { if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUT_B) found = connector_status_connected; } else { if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUTPUT) found = connector_status_connected; } /* restore regs we used */ WREG32(RADEON_DAC_CNTL2, dac_cntl2); WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl); WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); if (rdev->flags & RADEON_SINGLE_CRTC) { WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl); } else { WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); if (ASIC_IS_R300(rdev)) { WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1); } else { WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); } } WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); return found; } static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = { .dpms = radeon_legacy_tv_dac_dpms, .mode_fixup = radeon_legacy_mode_fixup, .prepare = radeon_legacy_tv_dac_prepare, .mode_set = radeon_legacy_tv_dac_mode_set, .commit = radeon_legacy_tv_dac_commit, .detect = radeon_legacy_tv_dac_detect, .disable = radeon_legacy_encoder_disable, }; static const struct drm_encoder_funcs radeon_legacy_tv_dac_enc_funcs = { .destroy = radeon_enc_destroy, }; static struct radeon_encoder_int_tmds *radeon_legacy_get_tmds_info(struct radeon_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder_int_tmds *tmds = NULL; bool ret; tmds = malloc(sizeof(struct radeon_encoder_int_tmds), DRM_MEM_DRIVER, M_ZERO | M_WAITOK); if (!tmds) return NULL; if (rdev->is_atom_bios) ret = radeon_atombios_get_tmds_info(encoder, tmds); else ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds); if (ret == false) radeon_legacy_get_tmds_info_from_table(encoder, tmds); return tmds; } static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct radeon_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder_ext_tmds *tmds = NULL; bool ret; if (rdev->is_atom_bios) return NULL; tmds = malloc(sizeof(struct radeon_encoder_ext_tmds), DRM_MEM_DRIVER, M_ZERO | M_WAITOK); if (!tmds) return NULL; ret = radeon_legacy_get_ext_tmds_info_from_combios(encoder, tmds); if (ret == false) radeon_legacy_get_ext_tmds_info_from_table(encoder, tmds); return tmds; } void radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device) { struct radeon_device *rdev = dev->dev_private; struct drm_encoder *encoder; struct radeon_encoder *radeon_encoder; /* see if we already added it */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->encoder_enum == encoder_enum) { radeon_encoder->devices |= supported_device; return; } } /* add a new one */ radeon_encoder = malloc(sizeof(struct radeon_encoder), DRM_MEM_DRIVER, M_ZERO | M_WAITOK); if (!radeon_encoder) return; encoder = &radeon_encoder->base; if (rdev->flags & RADEON_SINGLE_CRTC) encoder->possible_crtcs = 0x1; else encoder->possible_crtcs = 0x3; radeon_encoder->enc_priv = NULL; radeon_encoder->encoder_enum = encoder_enum; radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; radeon_encoder->devices = supported_device; radeon_encoder->rmx_type = RMX_OFF; switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_LVDS: encoder->possible_crtcs = 0x1; drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs); if (rdev->is_atom_bios) radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); else radeon_encoder->enc_priv = radeon_combios_get_lvds_info(radeon_encoder); radeon_encoder->rmx_type = RMX_FULL; break; case ENCODER_OBJECT_ID_INTERNAL_TMDS1: drm_encoder_init(dev, encoder, &radeon_legacy_tmds_int_enc_funcs, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(encoder, &radeon_legacy_tmds_int_helper_funcs); radeon_encoder->enc_priv = radeon_legacy_get_tmds_info(radeon_encoder); break; case ENCODER_OBJECT_ID_INTERNAL_DAC1: drm_encoder_init(dev, encoder, &radeon_legacy_primary_dac_enc_funcs, DRM_MODE_ENCODER_DAC); drm_encoder_helper_add(encoder, &radeon_legacy_primary_dac_helper_funcs); if (rdev->is_atom_bios) radeon_encoder->enc_priv = radeon_atombios_get_primary_dac_info(radeon_encoder); else radeon_encoder->enc_priv = radeon_combios_get_primary_dac_info(radeon_encoder); break; case ENCODER_OBJECT_ID_INTERNAL_DAC2: drm_encoder_init(dev, encoder, &radeon_legacy_tv_dac_enc_funcs, DRM_MODE_ENCODER_TVDAC); drm_encoder_helper_add(encoder, &radeon_legacy_tv_dac_helper_funcs); if (rdev->is_atom_bios) radeon_encoder->enc_priv = radeon_atombios_get_tv_dac_info(radeon_encoder); else radeon_encoder->enc_priv = radeon_combios_get_tv_dac_info(radeon_encoder); break; case ENCODER_OBJECT_ID_INTERNAL_DVO1: drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs); if (!rdev->is_atom_bios) radeon_encoder->enc_priv = radeon_legacy_get_ext_tmds_info(radeon_encoder); break; } } Index: head/sys/modules/drm2/i915kms/Makefile =================================================================== --- head/sys/modules/drm2/i915kms/Makefile (revision 277486) +++ head/sys/modules/drm2/i915kms/Makefile (revision 277487) @@ -1,55 +1,58 @@ # $FreeBSD$ .PATH: ${.CURDIR}/../../../dev/drm2/i915 KMOD = i915kms SRCS = \ i915_debug.c \ i915_dma.c \ i915_drv.c \ i915_gem.c \ i915_gem_context.c \ i915_gem_execbuffer.c \ i915_gem_evict.c \ i915_gem_gtt.c \ + i915_gem_stolen.c \ i915_gem_tiling.c \ i915_irq.c \ i915_suspend.c \ intel_bios.c \ intel_crt.c \ + intel_ddi.c \ intel_display.c \ intel_dp.c \ intel_fb.c \ intel_hdmi.c \ intel_iic.c \ intel_lvds.c \ intel_modes.c \ intel_opregion.c \ intel_overlay.c \ intel_panel.c \ + intel_pm.c \ intel_ringbuffer.c \ intel_sdvo.c \ intel_sprite.c \ intel_tv.c .if ${MACHINE_CPUARCH} == "amd64" SRCS += i915_ioc32.c .endif SRCS += \ opt_acpi.h \ opt_compat.h \ opt_drm.h \ opt_syscons.h \ acpi_if.h \ bus_if.h \ fb_if.h \ device_if.h \ iicbb_if.h \ iicbus_if.h \ pci_if.h .include CWARNFLAGS.i915_debug.c= -Wno-unused-function CWARNFLAGS.intel_lvds.c= -Wno-unused CWARNFLAGS.intel_tv.c= -Wno-unused