Index: stable/11/sys/dev/drm2/drm_os_freebsd.h =================================================================== --- stable/11/sys/dev/drm2/drm_os_freebsd.h (revision 303688) +++ stable/11/sys/dev/drm2/drm_os_freebsd.h (revision 303689) @@ -1,700 +1,703 @@ /** * \file drm_os_freebsd.h * OS abstraction macros. */ #include __FBSDID("$FreeBSD$"); #ifndef _DRM_OS_FREEBSD_H_ #define _DRM_OS_FREEBSD_H_ #include #include #if _BYTE_ORDER == _BIG_ENDIAN #define __BIG_ENDIAN 4321 #else #define __LITTLE_ENDIAN 1234 #endif #ifdef __LP64__ #define BITS_PER_LONG 64 #else #define BITS_PER_LONG 32 #endif #ifndef __user #define __user #endif #ifndef __iomem #define __iomem #endif #ifndef __always_unused #define __always_unused #endif #ifndef __must_check #define __must_check #endif #ifndef __force #define __force #endif #ifndef uninitialized_var #define uninitialized_var(x) x #endif #define cpu_to_le16(x) htole16(x) #define le16_to_cpu(x) le16toh(x) #define cpu_to_le32(x) htole32(x) #define le32_to_cpu(x) le32toh(x) #define cpu_to_be16(x) htobe16(x) #define be16_to_cpu(x) be16toh(x) #define cpu_to_be32(x) htobe32(x) #define be32_to_cpu(x) be32toh(x) #define be32_to_cpup(x) be32toh(*x) typedef vm_paddr_t dma_addr_t; typedef vm_paddr_t resource_size_t; #define wait_queue_head_t atomic_t typedef uint64_t u64; typedef uint32_t u32; typedef uint16_t u16; typedef uint8_t u8; typedef int64_t s64; typedef int32_t s32; typedef int16_t s16; typedef int8_t s8; typedef uint16_t __le16; typedef uint32_t __le32; typedef uint64_t __le64; typedef uint16_t __be16; typedef uint32_t __be32; typedef uint64_t __be64; #define DRM_IRQ_ARGS void *arg typedef void irqreturn_t; #define IRQ_HANDLED /* nothing */ #define IRQ_NONE /* nothing */ #define __init #define __exit #define __read_mostly #define BUILD_BUG_ON(x) CTASSERT(!(x)) #define BUILD_BUG_ON_NOT_POWER_OF_2(x) #ifndef WARN #define WARN(condition, format, ...) ({ \ int __ret_warn_on = !!(condition); \ if (unlikely(__ret_warn_on)) \ DRM_ERROR(format, ##__VA_ARGS__); \ unlikely(__ret_warn_on); \ }) #endif #define WARN_ONCE(condition, format, ...) \ WARN(condition, format, ##__VA_ARGS__) #define WARN_ON(cond) WARN(cond, "WARN ON: " #cond) #define WARN_ON_SMP(cond) WARN_ON(cond) #define BUG() panic("BUG") #define BUG_ON(cond) KASSERT(!(cond), ("BUG ON: " #cond " -> 0x%jx", (uintmax_t)(cond))) #define unlikely(x) __builtin_expect(!!(x), 0) #define likely(x) __builtin_expect(!!(x), 1) #define container_of(ptr, type, member) ({ \ __typeof( ((type *)0)->member ) *__mptr = (ptr); \ (type *)( (char *)__mptr - offsetof(type,member) );}) #define KHZ2PICOS(a) (1000000000UL/(a)) #define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) #define HZ hz #define DRM_HZ hz #define DRM_CURRENTPID curthread->td_proc->p_pid #define DRM_SUSER(p) (priv_check(p, PRIV_DRIVER) == 0) #define udelay(usecs) DELAY(usecs) #define mdelay(msecs) do { int loops = (msecs); \ while (loops--) DELAY(1000); \ } while (0) #define DRM_UDELAY(udelay) DELAY(udelay) #define drm_msleep(x, msg) pause((msg), ((int64_t)(x)) * hz / 1000) #define DRM_MSLEEP(msecs) drm_msleep((msecs), "drm_msleep") #define get_seconds() time_second #define ioread8(addr) *(volatile uint8_t *)((char *)addr) #define ioread16(addr) *(volatile uint16_t *)((char *)addr) #define ioread32(addr) *(volatile uint32_t *)((char *)addr) #define iowrite8(data, addr) *(volatile uint8_t *)((char *)addr) = data; #define iowrite16(data, addr) *(volatile uint16_t *)((char *)addr) = data; #define iowrite32(data, addr) *(volatile uint32_t *)((char *)addr) = data; #define DRM_READ8(map, offset) \ *(volatile u_int8_t *)(((vm_offset_t)(map)->handle) + \ (vm_offset_t)(offset)) #define DRM_READ16(map, offset) \ le16toh(*(volatile u_int16_t *)(((vm_offset_t)(map)->handle) + \ (vm_offset_t)(offset))) #define DRM_READ32(map, offset) \ le32toh(*(volatile u_int32_t *)(((vm_offset_t)(map)->handle) + \ (vm_offset_t)(offset))) #define DRM_READ64(map, offset) \ le64toh(*(volatile u_int64_t *)(((vm_offset_t)(map)->handle) + \ (vm_offset_t)(offset))) #define DRM_WRITE8(map, offset, val) \ *(volatile u_int8_t *)(((vm_offset_t)(map)->handle) + \ (vm_offset_t)(offset)) = val #define DRM_WRITE16(map, offset, val) \ *(volatile u_int16_t *)(((vm_offset_t)(map)->handle) + \ (vm_offset_t)(offset)) = htole16(val) #define DRM_WRITE32(map, offset, val) \ *(volatile u_int32_t *)(((vm_offset_t)(map)->handle) + \ (vm_offset_t)(offset)) = htole32(val) #define DRM_WRITE64(map, offset, val) \ *(volatile u_int64_t *)(((vm_offset_t)(map)->handle) + \ (vm_offset_t)(offset)) = htole64(val) /* DRM_READMEMORYBARRIER() prevents reordering of reads. * DRM_WRITEMEMORYBARRIER() prevents reordering of writes. * DRM_MEMORYBARRIER() prevents reordering of reads and writes. */ #define DRM_READMEMORYBARRIER() rmb() #define DRM_WRITEMEMORYBARRIER() wmb() #define DRM_MEMORYBARRIER() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() #define smp_mb__before_atomic_inc() mb() #define smp_mb__after_atomic_inc() mb() #define barrier() __compiler_membar() #define do_div(a, b) ((a) /= (b)) #define div64_u64(a, b) ((a) / (b)) #define lower_32_bits(n) ((u32)(n)) #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) #define __set_bit(n, s) set_bit((n), (s)) #define __clear_bit(n, s) clear_bit((n), (s)) #define min_t(type, x, y) ({ \ type __min1 = (x); \ type __min2 = (y); \ __min1 < __min2 ? __min1 : __min2; }) #define max_t(type, x, y) ({ \ type __max1 = (x); \ type __max2 = (y); \ __max1 > __max2 ? __max1 : __max2; }) #define memset_io(a, b, c) memset((a), (b), (c)) #define memcpy_fromio(a, b, c) memcpy((a), (b), (c)) #define memcpy_toio(a, b, c) memcpy((a), (b), (c)) #define VERIFY_READ VM_PROT_READ #define VERIFY_WRITE VM_PROT_WRITE #define access_ok(prot, p, l) useracc((p), (l), (prot)) /* XXXKIB what is the right code for the FreeBSD ? */ /* kib@ used ENXIO here -- dumbbell@ */ #define EREMOTEIO EIO #define ERESTARTSYS 512 /* Same value as Linux. */ #define KTR_DRM KTR_DEV #define KTR_DRM_REG KTR_SPARE3 #define DRM_AGP_KERN struct agp_info #define DRM_AGP_MEM void #define PCI_VENDOR_ID_APPLE 0x106b #define PCI_VENDOR_ID_ASUSTEK 0x1043 #define PCI_VENDOR_ID_ATI 0x1002 #define PCI_VENDOR_ID_DELL 0x1028 #define PCI_VENDOR_ID_HP 0x103c #define PCI_VENDOR_ID_IBM 0x1014 #define PCI_VENDOR_ID_INTEL 0x8086 #define PCI_VENDOR_ID_SERVERWORKS 0x1166 #define PCI_VENDOR_ID_SONY 0x104d #define PCI_VENDOR_ID_VIA 0x1106 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) #define DIV_ROUND_CLOSEST(n,d) (((n) + (d) / 2) / (d)) #define div_u64(n, d) ((n) / (d)) #define hweight32(i) bitcount32(i) static inline unsigned long roundup_pow_of_two(unsigned long x) { return (1UL << flsl(x - 1)); } /** * ror32 - rotate a 32-bit value right * @word: value to rotate * @shift: bits to roll * * Source: include/linux/bitops.h */ static inline uint32_t ror32(uint32_t word, unsigned int shift) { return (word >> shift) | (word << (32 - shift)); } #define IS_ALIGNED(x, y) (((x) & ((y) - 1)) == 0) #define round_down(x, y) rounddown2((x), (y)) #define round_up(x, y) roundup2((x), (y)) #define get_unaligned(ptr) \ ({ __typeof__(*(ptr)) __tmp; \ memcpy(&__tmp, (ptr), sizeof(*(ptr))); __tmp; }) #if _BYTE_ORDER == _LITTLE_ENDIAN /* Taken from linux/include/linux/unaligned/le_struct.h. */ struct __una_u32 { u32 x; } __packed; static inline u32 __get_unaligned_cpu32(const void *p) { const struct __una_u32 *ptr = (const struct __una_u32 *)p; return (ptr->x); } static inline u32 get_unaligned_le32(const void *p) { return (__get_unaligned_cpu32((const u8 *)p)); } #else /* Taken from linux/include/linux/unaligned/le_byteshift.h. */ static inline u32 __get_unaligned_le32(const u8 *p) { return (p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24); } static inline u32 get_unaligned_le32(const void *p) { return (__get_unaligned_le32((const u8 *)p)); } #endif static inline unsigned long ilog2(unsigned long x) { return (flsl(x) - 1); } static inline int64_t abs64(int64_t x) { return (x < 0 ? -x : x); } int64_t timeval_to_ns(const struct timeval *tv); struct timeval ns_to_timeval(const int64_t nsec); #define PAGE_ALIGN(addr) round_page(addr) #define page_to_phys(x) VM_PAGE_TO_PHYS(x) #define offset_in_page(x) ((x) & PAGE_MASK) #define drm_get_device_from_kdev(_kdev) (((struct drm_minor *)(_kdev)->si_drv1)->dev) #define DRM_IOC_VOID IOC_VOID #define DRM_IOC_READ IOC_OUT #define DRM_IOC_WRITE IOC_IN #define DRM_IOC_READWRITE IOC_INOUT #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) static inline long __copy_to_user(void __user *to, const void *from, unsigned long n) { return (copyout(from, to, n) != 0 ? n : 0); } #define copy_to_user(to, from, n) __copy_to_user((to), (from), (n)) static inline int __put_user(size_t size, void *ptr, void *x) { size = copy_to_user(ptr, x, size); return (size ? -EFAULT : size); } #define put_user(x, ptr) __put_user(sizeof(*ptr), (ptr), &(x)) static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { return ((copyin(__DECONST(void *, from), to, n) != 0 ? n : 0)); } #define copy_from_user(to, from, n) __copy_from_user((to), (from), (n)) static inline int __get_user(size_t size, const void *ptr, void *x) { size = copy_from_user(x, ptr, size); return (size ? -EFAULT : size); } #define get_user(x, ptr) __get_user(sizeof(*ptr), (ptr), &(x)) static inline int __copy_to_user_inatomic(void __user *to, const void *from, unsigned n) { return (copyout_nofault(from, to, n) != 0 ? n : 0); } #define __copy_to_user_inatomic_nocache(to, from, n) \ __copy_to_user_inatomic((to), (from), (n)) static inline unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { /* * XXXKIB. Equivalent Linux function is implemented using * MOVNTI for aligned moves. For unaligned head and tail, * normal move is performed. As such, it is not incorrect, if * only somewhat slower, to use normal copyin. All uses * except shmem_pwrite_fast() have the destination mapped WC. */ return ((copyin_nofault(__DECONST(void *, from), to, n) != 0 ? n : 0)); } #define __copy_from_user_inatomic_nocache(to, from, n) \ __copy_from_user_inatomic((to), (from), (n)) static inline int fault_in_multipages_readable(const char __user *uaddr, int size) { char c; int ret = 0; const char __user *end = uaddr + size - 1; if (unlikely(size == 0)) return ret; while (uaddr <= end) { ret = -copyin(uaddr, &c, 1); if (ret != 0) return -EFAULT; uaddr += PAGE_SIZE; } /* Check whether the range spilled into the next page. */ if (((unsigned long)uaddr & ~PAGE_MASK) == ((unsigned long)end & ~PAGE_MASK)) { ret = -copyin(end, &c, 1); } return ret; } static inline int fault_in_multipages_writeable(char __user *uaddr, int size) { int ret = 0; char __user *end = uaddr + size - 1; if (unlikely(size == 0)) return ret; /* * Writing zeroes into userspace here is OK, because we know that if * the zero gets there, we'll be overwriting it. */ while (uaddr <= end) { ret = subyte(uaddr, 0); if (ret != 0) return -EFAULT; uaddr += PAGE_SIZE; } /* Check whether the range spilled into the next page. */ if (((unsigned long)uaddr & ~PAGE_MASK) == ((unsigned long)end & ~PAGE_MASK)) ret = subyte(end, 0); return ret; } enum __drm_capabilities { CAP_SYS_ADMIN }; static inline bool capable(enum __drm_capabilities cap) { switch (cap) { case CAP_SYS_ADMIN: return DRM_SUSER(curthread); + default: + panic("%s: unhandled capability: %0x", __func__, cap); + return (false); } } #define to_user_ptr(x) ((void *)(uintptr_t)(x)) #define sigemptyset(set) SIGEMPTYSET(set) #define sigaddset(set, sig) SIGADDSET(set, sig) #define DRM_LOCK(dev) sx_xlock(&(dev)->dev_struct_lock) #define DRM_UNLOCK(dev) sx_xunlock(&(dev)->dev_struct_lock) extern unsigned long drm_linux_timer_hz_mask; #define jiffies ticks #define jiffies_to_msecs(x) (((int64_t)(x)) * 1000 / hz) #define msecs_to_jiffies(x) (((int64_t)(x)) * hz / 1000) #define timespec_to_jiffies(x) (((x)->tv_sec * 1000000 + (x)->tv_nsec) * hz / 1000000) #define time_after(a,b) ((long)(b) - (long)(a) < 0) #define time_after_eq(a,b) ((long)(b) - (long)(a) <= 0) #define round_jiffies(j) ((unsigned long)(((j) + drm_linux_timer_hz_mask) & ~drm_linux_timer_hz_mask)) #define round_jiffies_up(j) round_jiffies(j) /* TODO */ #define round_jiffies_up_relative(j) round_jiffies_up(j) /* TODO */ #define getrawmonotonic(ts) getnanouptime(ts) #define wake_up(queue) wakeup_one((void *)queue) #define wake_up_interruptible(queue) wakeup_one((void *)queue) #define wake_up_all(queue) wakeup((void *)queue) #define wake_up_interruptible_all(queue) wakeup((void *)queue) struct completion { unsigned int done; struct mtx lock; }; #define INIT_COMPLETION(c) ((c).done = 0); static inline void init_completion(struct completion *c) { mtx_init(&c->lock, "drmcompl", NULL, MTX_DEF); c->done = 0; } static inline void free_completion(struct completion *c) { mtx_destroy(&c->lock); } static inline void complete_all(struct completion *c) { mtx_lock(&c->lock); c->done++; mtx_unlock(&c->lock); wakeup(c); } static inline long wait_for_completion_interruptible_timeout(struct completion *c, unsigned long timeout) { unsigned long start_jiffies, elapsed_jiffies; bool timeout_expired = false, awakened = false; long ret = timeout; start_jiffies = ticks; mtx_lock(&c->lock); while (c->done == 0 && !timeout_expired) { ret = -msleep(c, &c->lock, PCATCH, "drmwco", timeout); switch(ret) { case -EWOULDBLOCK: timeout_expired = true; ret = 0; break; case -EINTR: case -ERESTART: ret = -ERESTARTSYS; break; case 0: awakened = true; break; } } mtx_unlock(&c->lock); if (awakened) { elapsed_jiffies = ticks - start_jiffies; ret = timeout > elapsed_jiffies ? timeout - elapsed_jiffies : 1; } return (ret); } MALLOC_DECLARE(DRM_MEM_DMA); MALLOC_DECLARE(DRM_MEM_SAREA); MALLOC_DECLARE(DRM_MEM_DRIVER); MALLOC_DECLARE(DRM_MEM_MAGIC); MALLOC_DECLARE(DRM_MEM_MINOR); MALLOC_DECLARE(DRM_MEM_IOCTLS); MALLOC_DECLARE(DRM_MEM_MAPS); MALLOC_DECLARE(DRM_MEM_BUFS); MALLOC_DECLARE(DRM_MEM_SEGS); MALLOC_DECLARE(DRM_MEM_PAGES); MALLOC_DECLARE(DRM_MEM_FILES); MALLOC_DECLARE(DRM_MEM_QUEUES); MALLOC_DECLARE(DRM_MEM_CMDS); MALLOC_DECLARE(DRM_MEM_MAPPINGS); MALLOC_DECLARE(DRM_MEM_BUFLISTS); MALLOC_DECLARE(DRM_MEM_AGPLISTS); MALLOC_DECLARE(DRM_MEM_CTXBITMAP); MALLOC_DECLARE(DRM_MEM_SGLISTS); MALLOC_DECLARE(DRM_MEM_MM); MALLOC_DECLARE(DRM_MEM_HASHTAB); MALLOC_DECLARE(DRM_MEM_KMS); MALLOC_DECLARE(DRM_MEM_VBLANK); #define simple_strtol(a, b, c) strtol((a), (b), (c)) typedef struct drm_pci_id_list { int vendor; int device; long driver_private; char *name; } drm_pci_id_list_t; #ifdef __i386__ #define CONFIG_X86 1 #endif #ifdef __amd64__ #define CONFIG_X86 1 #define CONFIG_X86_64 1 #endif #ifdef __ia64__ #define CONFIG_IA64 1 #endif #if defined(__i386__) || defined(__amd64__) #define CONFIG_ACPI #define CONFIG_DRM_I915_KMS #undef CONFIG_INTEL_IOMMU #endif #ifdef COMPAT_FREEBSD32 #define CONFIG_COMPAT #endif #define CONFIG_AGP 1 #define CONFIG_MTRR 1 #define CONFIG_FB 1 extern const char *fb_mode_option; #undef CONFIG_DEBUG_FS #undef CONFIG_VGA_CONSOLE #define EXPORT_SYMBOL(x) #define EXPORT_SYMBOL_GPL(x) #define MODULE_AUTHOR(author) #define MODULE_DESCRIPTION(desc) #define MODULE_LICENSE(license) #define MODULE_PARM_DESC(name, desc) #define MODULE_DEVICE_TABLE(name, list) #define module_param_named(name, var, type, perm) #define printk printf #define pr_err DRM_ERROR #define pr_warn DRM_WARNING #define pr_warn_once DRM_WARNING #define KERN_DEBUG "" /* I2C compatibility. */ #define I2C_M_RD IIC_M_RD #define I2C_M_WR IIC_M_WR #define I2C_M_NOSTART IIC_M_NOSTART struct fb_info * framebuffer_alloc(void); void framebuffer_release(struct fb_info *info); #define console_lock() #define console_unlock() #define console_trylock() true #define PM_EVENT_SUSPEND 0x0002 #define PM_EVENT_QUIESCE 0x0008 #define PM_EVENT_PRETHAW PM_EVENT_QUIESCE typedef struct pm_message { int event; } pm_message_t; static inline int pci_read_config_byte(device_t kdev, int where, u8 *val) { *val = (u8)pci_read_config(kdev, where, 1); return (0); } static inline int pci_write_config_byte(device_t kdev, int where, u8 val) { pci_write_config(kdev, where, val, 1); return (0); } static inline int pci_read_config_word(device_t kdev, int where, uint16_t *val) { *val = (uint16_t)pci_read_config(kdev, where, 2); return (0); } static inline int pci_write_config_word(device_t kdev, int where, uint16_t val) { pci_write_config(kdev, where, val, 2); return (0); } static inline int pci_read_config_dword(device_t kdev, int where, uint32_t *val) { *val = (uint32_t)pci_read_config(kdev, where, 4); return (0); } static inline int pci_write_config_dword(device_t kdev, int where, uint32_t val) { pci_write_config(kdev, where, val, 4); return (0); } static inline void on_each_cpu(void callback(void *data), void *data, int wait) { smp_rendezvous(NULL, callback, NULL, data); } void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize, char *linebuf, size_t linebuflen, bool ascii); #define KIB_NOTYET() \ do { \ if (drm_debug && drm_notyet) \ printf("NOTYET: %s at %s:%d\n", __func__, __FILE__, __LINE__); \ } while (0) #endif /* _DRM_OS_FREEBSD_H_ */ Index: stable/11/sys/dev/drm2/i915/i915_drv.h =================================================================== --- stable/11/sys/dev/drm2/i915/i915_drv.h (revision 303688) +++ stable/11/sys/dev/drm2/i915/i915_drv.h (revision 303689) @@ -1,1757 +1,1754 @@ /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- */ /* * * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include __FBSDID("$FreeBSD$"); #ifndef _I915_DRV_H_ #define _I915_DRV_H_ #include #include #include #include #include /* General customization: */ #define DRIVER_AUTHOR "Tungsten Graphics, Inc." #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" #define DRIVER_DATE "20080730" MALLOC_DECLARE(DRM_I915_GEM); enum pipe { PIPE_A = 0, PIPE_B, PIPE_C, I915_MAX_PIPES }; #define pipe_name(p) ((p) + 'A') enum transcoder { TRANSCODER_A = 0, TRANSCODER_B, TRANSCODER_C, TRANSCODER_EDP = 0xF, }; #define transcoder_name(t) ((t) + 'A') enum plane { PLANE_A = 0, PLANE_B, PLANE_C, }; #define plane_name(p) ((p) + 'A') enum port { PORT_A = 0, PORT_B, PORT_C, PORT_D, PORT_E, I915_MAX_PORTS }; #define port_name(p) ((p) + 'A') #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) #define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ if ((intel_encoder)->base.crtc == (__crtc)) struct intel_pch_pll { int refcount; /* count of number of CRTCs sharing this PLL */ int active; /* count of number of active CRTCs (i.e. DPMS on) */ bool on; /* is the PLL actually active? Disabled during modeset */ int pll_reg; int fp0_reg; int fp1_reg; }; #define I915_NUM_PLLS 2 struct intel_ddi_plls { int spll_refcount; int wrpll1_refcount; int wrpll2_refcount; }; /* Interface history: * * 1.1: Original. * 1.2: Add Power Management * 1.3: Add vblank support * 1.4: Fix cmdbuffer path, add heap destroy * 1.5: Add vblank pipe configuration * 1.6: - New ioctl for scheduling buffer swaps on vertical blank * - Support vertical blank on secondary display pipe */ #define DRIVER_MAJOR 1 #define DRIVER_MINOR 6 #define DRIVER_PATCHLEVEL 0 #define WATCH_COHERENCY 0 #define WATCH_LISTS 0 #define WATCH_GTT 0 #define I915_GEM_PHYS_CURSOR_0 1 #define I915_GEM_PHYS_CURSOR_1 2 #define I915_GEM_PHYS_OVERLAY_REGS 3 #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) struct drm_i915_gem_phys_object { int id; drm_dma_handle_t *handle; struct drm_i915_gem_object *cur_obj; }; struct opregion_header; struct opregion_acpi; struct opregion_swsci; struct opregion_asle; struct drm_i915_private; struct intel_opregion { struct opregion_header __iomem *header; struct opregion_acpi __iomem *acpi; struct opregion_swsci __iomem *swsci; struct opregion_asle __iomem *asle; void __iomem *vbt; u32 __iomem *lid_state; }; #define OPREGION_SIZE (8*1024) struct intel_overlay; struct intel_overlay_error_state; struct drm_i915_master_private { drm_local_map_t *sarea; struct _drm_i915_sarea *sarea_priv; }; #define I915_FENCE_REG_NONE -1 #define I915_MAX_NUM_FENCES 16 /* 16 fences + sign bit for FENCE_REG_NONE */ #define I915_MAX_NUM_FENCE_BITS 5 struct drm_i915_fence_reg { struct list_head lru_list; struct drm_i915_gem_object *obj; int pin_count; }; struct sdvo_device_mapping { u8 initialized; u8 dvo_port; u8 slave_addr; u8 dvo_wiring; u8 i2c_pin; u8 ddc_pin; }; struct intel_display_error_state; struct drm_i915_error_state { u_int ref; u32 eir; u32 pgtbl_er; u32 ier; u32 ccid; u32 derrmr; u32 forcewake; bool waiting[I915_NUM_RINGS]; u32 pipestat[I915_MAX_PIPES]; u32 tail[I915_NUM_RINGS]; u32 head[I915_NUM_RINGS]; u32 ctl[I915_NUM_RINGS]; u32 ipeir[I915_NUM_RINGS]; u32 ipehr[I915_NUM_RINGS]; u32 instdone[I915_NUM_RINGS]; u32 acthd[I915_NUM_RINGS]; u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1]; u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1]; u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */ /* our own tracking of ring head and tail */ u32 cpu_ring_head[I915_NUM_RINGS]; u32 cpu_ring_tail[I915_NUM_RINGS]; u32 error; /* gen6+ */ u32 err_int; /* gen7 */ u32 instpm[I915_NUM_RINGS]; u32 instps[I915_NUM_RINGS]; u32 extra_instdone[I915_NUM_INSTDONE_REG]; u32 seqno[I915_NUM_RINGS]; u64 bbaddr; u32 fault_reg[I915_NUM_RINGS]; u32 done_reg; u32 faddr[I915_NUM_RINGS]; u64 fence[I915_MAX_NUM_FENCES]; struct timeval time; struct drm_i915_error_ring { struct drm_i915_error_object { int page_count; u32 gtt_offset; u32 *pages[0]; } *ringbuffer, *batchbuffer; struct drm_i915_error_request { long jiffies; u32 seqno; u32 tail; } *requests; int num_requests; } ring[I915_NUM_RINGS]; struct drm_i915_error_buffer { u32 size; u32 name; u32 rseqno, wseqno; u32 gtt_offset; u32 read_domains; u32 write_domain; s32 fence_reg:I915_MAX_NUM_FENCE_BITS; s32 pinned:2; u32 tiling:2; u32 dirty:1; u32 purgeable:1; s32 ring:4; u32 cache_level:2; } *active_bo, *pinned_bo; u32 active_bo_count, pinned_bo_count; struct intel_overlay_error_state *overlay; struct intel_display_error_state *display; }; struct drm_i915_display_funcs { bool (*fbc_enabled)(struct drm_device *dev); void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); void (*disable_fbc)(struct drm_device *dev); int (*get_display_clock_speed)(struct drm_device *dev); int (*get_fifo_size)(struct drm_device *dev, int plane); void (*update_wm)(struct drm_device *dev); void (*update_sprite_wm)(struct drm_device *dev, int pipe, uint32_t sprite_width, int pixel_size); void (*update_linetime_wm)(struct drm_device *dev, int pipe, struct drm_display_mode *mode); void (*modeset_global_resources)(struct drm_device *dev); int (*crtc_mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb); void (*crtc_enable)(struct drm_crtc *crtc); void (*crtc_disable)(struct drm_crtc *crtc); void (*off)(struct drm_crtc *crtc); void (*write_eld)(struct drm_connector *connector, struct drm_crtc *crtc); void (*fdi_link_train)(struct drm_crtc *crtc); void (*init_clock_gating)(struct drm_device *dev); int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj); int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y); /* clock updates for mode set */ /* cursor updates */ /* render clock increase/decrease */ /* display clock increase/decrease */ /* pll clock increase/decrease */ }; struct drm_i915_gt_funcs { void (*force_wake_get)(struct drm_i915_private *dev_priv); void (*force_wake_put)(struct drm_i915_private *dev_priv); }; #define DEV_INFO_FLAGS \ DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \ DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \ DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \ DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \ DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \ DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \ DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \ DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \ DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \ DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \ DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \ DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \ DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \ DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \ DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \ DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \ DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \ DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \ DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \ DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \ DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \ DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \ DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \ DEV_INFO_FLAG(has_llc) struct intel_device_info { u8 gen; u8 is_mobile:1; u8 is_i85x:1; u8 is_i915g:1; u8 is_i945gm:1; u8 is_g33:1; u8 need_gfx_hws:1; u8 is_g4x:1; u8 is_pineview:1; u8 is_broadwater:1; u8 is_crestline:1; u8 is_ivybridge:1; u8 is_valleyview:1; u8 has_force_wake:1; u8 is_haswell:1; u8 has_fbc:1; u8 has_pipe_cxsr:1; u8 has_hotplug:1; u8 cursor_needs_physical:1; u8 has_overlay:1; u8 overlay_needs_physical:1; u8 supports_tv:1; u8 has_bsd_ring:1; u8 has_blt_ring:1; u8 has_llc:1; }; #define I915_PPGTT_PD_ENTRIES 512 #define I915_PPGTT_PT_ENTRIES 1024 struct i915_hw_ppgtt { struct drm_device *dev; unsigned num_pd_entries; vm_page_t *pt_pages; uint32_t pd_offset; vm_paddr_t *pt_dma_addr; vm_paddr_t scratch_page_dma_addr; }; /* This must match up with the value previously used for execbuf2.rsvd1. */ #define DEFAULT_CONTEXT_ID 0 struct i915_hw_context { uint32_t id; bool is_initialized; struct drm_i915_file_private *file_priv; struct intel_ring_buffer *ring; struct drm_i915_gem_object *obj; }; enum no_fbc_reason { FBC_NO_OUTPUT, /* no outputs enabled to compress */ FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ FBC_MODE_TOO_LARGE, /* mode too large for compression */ FBC_BAD_PLANE, /* fbc not supported on plane */ FBC_NOT_TILED, /* buffer not tiled */ FBC_MULTIPLE_PIPES, /* more than one pipe active */ FBC_MODULE_PARAM, }; enum intel_pch { PCH_NONE = 0, /* No PCH present */ PCH_IBX, /* Ibexpeak PCH */ PCH_CPT, /* Cougarpoint PCH */ PCH_LPT, /* Lynxpoint PCH */ }; enum intel_sbi_destination { SBI_ICLK, SBI_MPHY, }; #define QUIRK_PIPEA_FORCE (1<<0) #define QUIRK_LVDS_SSC_DISABLE (1<<1) #define QUIRK_INVERT_BRIGHTNESS (1<<2) struct intel_fbdev; struct intel_fbc_work; struct intel_gmbus { device_t gmbus_bridge; device_t gmbus; device_t bbbus_bridge; device_t bbbus; u32 force_bit; u32 reg0; u32 gpio_reg; struct drm_i915_private *dev_priv; }; struct i915_suspend_saved_registers { u8 saveLBB; u32 saveDSPACNTR; u32 saveDSPBCNTR; u32 saveDSPARB; u32 savePIPEACONF; u32 savePIPEBCONF; u32 savePIPEASRC; u32 savePIPEBSRC; u32 saveFPA0; u32 saveFPA1; u32 saveDPLL_A; u32 saveDPLL_A_MD; u32 saveHTOTAL_A; u32 saveHBLANK_A; u32 saveHSYNC_A; u32 saveVTOTAL_A; u32 saveVBLANK_A; u32 saveVSYNC_A; u32 saveBCLRPAT_A; u32 saveTRANSACONF; u32 saveTRANS_HTOTAL_A; u32 saveTRANS_HBLANK_A; u32 saveTRANS_HSYNC_A; u32 saveTRANS_VTOTAL_A; u32 saveTRANS_VBLANK_A; u32 saveTRANS_VSYNC_A; u32 savePIPEASTAT; u32 saveDSPASTRIDE; u32 saveDSPASIZE; u32 saveDSPAPOS; u32 saveDSPAADDR; u32 saveDSPASURF; u32 saveDSPATILEOFF; u32 savePFIT_PGM_RATIOS; u32 saveBLC_HIST_CTL; u32 saveBLC_PWM_CTL; u32 saveBLC_PWM_CTL2; u32 saveBLC_CPU_PWM_CTL; u32 saveBLC_CPU_PWM_CTL2; u32 saveFPB0; u32 saveFPB1; u32 saveDPLL_B; u32 saveDPLL_B_MD; u32 saveHTOTAL_B; u32 saveHBLANK_B; u32 saveHSYNC_B; u32 saveVTOTAL_B; u32 saveVBLANK_B; u32 saveVSYNC_B; u32 saveBCLRPAT_B; u32 saveTRANSBCONF; u32 saveTRANS_HTOTAL_B; u32 saveTRANS_HBLANK_B; u32 saveTRANS_HSYNC_B; u32 saveTRANS_VTOTAL_B; u32 saveTRANS_VBLANK_B; u32 saveTRANS_VSYNC_B; u32 savePIPEBSTAT; u32 saveDSPBSTRIDE; u32 saveDSPBSIZE; u32 saveDSPBPOS; u32 saveDSPBADDR; u32 saveDSPBSURF; u32 saveDSPBTILEOFF; u32 saveVGA0; u32 saveVGA1; u32 saveVGA_PD; u32 saveVGACNTRL; u32 saveADPA; u32 saveLVDS; u32 savePP_ON_DELAYS; u32 savePP_OFF_DELAYS; u32 saveDVOA; u32 saveDVOB; u32 saveDVOC; u32 savePP_ON; u32 savePP_OFF; u32 savePP_CONTROL; u32 savePP_DIVISOR; u32 savePFIT_CONTROL; u32 save_palette_a[256]; u32 save_palette_b[256]; u32 saveDPFC_CB_BASE; u32 saveFBC_CFB_BASE; u32 saveFBC_LL_BASE; u32 saveFBC_CONTROL; u32 saveFBC_CONTROL2; u32 saveIER; u32 saveIIR; u32 saveIMR; u32 saveDEIER; u32 saveDEIMR; u32 saveGTIER; u32 saveGTIMR; u32 saveFDI_RXA_IMR; u32 saveFDI_RXB_IMR; u32 saveCACHE_MODE_0; u32 saveMI_ARB_STATE; u32 saveSWF0[16]; u32 saveSWF1[16]; u32 saveSWF2[3]; u8 saveMSR; u8 saveSR[8]; u8 saveGR[25]; u8 saveAR_INDEX; u8 saveAR[21]; u8 saveDACMASK; u8 saveCR[37]; uint64_t saveFENCE[I915_MAX_NUM_FENCES]; u32 saveCURACNTR; u32 saveCURAPOS; u32 saveCURABASE; u32 saveCURBCNTR; u32 saveCURBPOS; u32 saveCURBBASE; u32 saveCURSIZE; u32 saveDP_B; u32 saveDP_C; u32 saveDP_D; u32 savePIPEA_GMCH_DATA_M; u32 savePIPEB_GMCH_DATA_M; u32 savePIPEA_GMCH_DATA_N; u32 savePIPEB_GMCH_DATA_N; u32 savePIPEA_DP_LINK_M; u32 savePIPEB_DP_LINK_M; u32 savePIPEA_DP_LINK_N; u32 savePIPEB_DP_LINK_N; u32 saveFDI_RXA_CTL; u32 saveFDI_TXA_CTL; u32 saveFDI_RXB_CTL; u32 saveFDI_TXB_CTL; u32 savePFA_CTL_1; u32 savePFB_CTL_1; u32 savePFA_WIN_SZ; u32 savePFB_WIN_SZ; u32 savePFA_WIN_POS; u32 savePFB_WIN_POS; u32 savePCH_DREF_CONTROL; u32 saveDISP_ARB_CTL; u32 savePIPEA_DATA_M1; u32 savePIPEA_DATA_N1; u32 savePIPEA_LINK_M1; u32 savePIPEA_LINK_N1; u32 savePIPEB_DATA_M1; u32 savePIPEB_DATA_N1; u32 savePIPEB_LINK_M1; u32 savePIPEB_LINK_N1; u32 saveMCHBAR_RENDER_STANDBY; u32 savePCH_PORT_HOTPLUG; }; struct intel_gen6_power_mgmt { struct task work; u32 pm_iir; /* lock - irqsave spinlock that protectects the work_struct and * pm_iir. */ struct mtx lock; /* The below variables an all the rps hw state are protected by * dev->struct mutext. */ u8 cur_delay; u8 min_delay; u8 max_delay; struct timeout_task delayed_resume_work; /* * Protects RPS/RC6 register access and PCU communication. * Must be taken after struct_mutex if nested. */ struct sx hw_lock; }; struct intel_ilk_power_mgmt { u8 cur_delay; u8 min_delay; u8 max_delay; u8 fmax; u8 fstart; u64 last_count1; unsigned long last_time1; unsigned long chipset_power; u64 last_count2; struct timespec last_time2; unsigned long gfx_power; u8 corr; int c_m; int r_t; struct drm_i915_gem_object *pwrctx; struct drm_i915_gem_object *renderctx; }; struct i915_dri1_state { unsigned allow_batchbuffer : 1; u32 __iomem *gfx_hws_cpu_addr; unsigned int cpp; int back_offset; int front_offset; int current_page; int page_flipping; uint32_t counter; }; struct intel_l3_parity { u32 *remap_info; struct task error_work; }; typedef struct drm_i915_private { struct drm_device *dev; const struct intel_device_info *info; int relative_constants_mode; /* FIXME Linux<->FreeBSD: "void *regs" on Linux. */ drm_local_map_t *mmio_map; struct drm_i915_gt_funcs gt; /** gt_fifo_count and the subsequent register write are synchronized * with dev->struct_mutex. */ unsigned gt_fifo_count; /** forcewake_count is protected by gt_lock */ unsigned forcewake_count; /** gt_lock is also taken in irq contexts. */ struct mtx gt_lock; struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; /** gmbus_mutex protects against concurrent usage of the single hw gmbus * controller on different i2c buses. */ struct sx gmbus_mutex; /** * Base address of the gmbus and gpio block. */ uint32_t gpio_mmio_base; device_t bridge_dev; struct intel_ring_buffer ring[I915_NUM_RINGS]; uint32_t next_seqno; drm_dma_handle_t *status_page_dmah; int mch_res_rid; struct resource *mch_res; atomic_t irq_received; /* protects the irq masks */ struct mtx irq_lock; /* DPIO indirect register protection */ struct sx dpio_lock; /** Cached value of IMR to avoid reads in updating the bitfield */ u32 pipestat[2]; u32 irq_mask; u32 gt_irq_mask; u32 pch_irq_mask; u32 hotplug_supported_mask; struct task hotplug_work; int num_pipe; int num_pch_pll; /* For hangcheck timer */ #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) struct callout hangcheck_timer; int hangcheck_count; uint32_t last_acthd[I915_NUM_RINGS]; uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; unsigned int stop_rings; unsigned long cfb_size; unsigned int cfb_fb; enum plane cfb_plane; int cfb_y; struct intel_fbc_work *fbc_work; struct intel_opregion opregion; /* overlay */ struct intel_overlay *overlay; bool sprite_scaling_enabled; /* LVDS info */ int backlight_level; /* restore backlight to this value */ bool backlight_enabled; struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ /* Feature bits from the VBIOS */ unsigned int int_tv_support:1; unsigned int lvds_dither:1; unsigned int lvds_vbt:1; unsigned int int_crt_support:1; unsigned int lvds_use_ssc:1; unsigned int display_clock_mode:1; unsigned int fdi_rx_polarity_inverted:1; int lvds_ssc_freq; unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ unsigned int lvds_val; /* used for checking LVDS channel mode */ struct { int rate; int lanes; int preemphasis; int vswing; bool initialized; bool support; int bpp; struct edp_power_seq pps; } edp; bool no_aux_handshake; int crt_ddc_pin; struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ int num_fence_regs; /* 8 on pre-965, 16 otherwise */ unsigned int fsb_freq, mem_freq, is_ddr3; struct mtx error_lock; /* Protected by dev->error_lock. */ struct drm_i915_error_state *first_error; struct task error_work; struct completion error_completion; struct taskqueue *wq; /* Display functions */ struct drm_i915_display_funcs display; /* PCH chipset type */ enum intel_pch pch_type; unsigned short pch_id; unsigned long quirks; /* Register state */ bool modeset_on_lid; struct { /** Bridge to intel-gtt-ko */ struct intel_gtt *gtt; /** Memory allocator for GTT stolen memory */ struct drm_mm stolen; /** Memory allocator for GTT */ struct drm_mm gtt_space; /** List of all objects in gtt_space. Used to restore gtt * mappings on resume */ struct list_head bound_list; /** * List of objects which are not bound to the GTT (thus * are idle and not used by the GPU) but still have * (presumably uncached) pages still attached. */ struct list_head unbound_list; /** Usable portion of the GTT for GEM */ unsigned long gtt_start; unsigned long gtt_mappable_end; unsigned long gtt_end; unsigned long stolen_base; /* limited to low memory (32-bit) */ #ifdef __linux__ struct io_mapping *gtt_mapping; #endif vm_paddr_t gtt_base_addr; int gtt_mtrr; /** PPGTT used for aliasing the PPGTT with the GTT */ struct i915_hw_ppgtt *aliasing_ppgtt; eventhandler_tag inactive_shrinker; bool shrinker_no_lock_stealing; /** * List of objects currently involved in rendering. * * Includes buffers having the contents of their GPU caches * flushed, not necessarily primitives. last_rendering_seqno * represents when the rendering involved will be completed. * * A reference is held on the buffer while on this list. */ struct list_head active_list; /** * LRU list of objects which are not in the ringbuffer and * are ready to unbind, but are still in the GTT. * * last_rendering_seqno is 0 while an object is in this list. * * A reference is not held on the buffer while on this list, * as merely being GTT-bound shouldn't prevent its being * freed, and we'll pull it off the list in the free path. */ struct list_head inactive_list; /** LRU list of objects with fence regs on them. */ struct list_head fence_list; /** * We leave the user IRQ off as much as possible, * but this means that requests will finish and never * be retired once the system goes idle. Set a timer to * fire periodically while the ring is running. When it * fires, go retire requests. */ struct timeout_task retire_work; /** * Are we in a non-interruptible section of code like * modesetting? */ bool interruptible; /** * Flag if the X Server, and thus DRM, is not currently in * control of the device. * * This is set between LeaveVT and EnterVT. It needs to be * replaced with a semaphore. It also needs to be * transitioned away from for kernel modesetting. */ int suspended; /** * Flag if the hardware appears to be wedged. * * This is set when attempts to idle the device timeout. * It prevents command submission from occurring and makes * every pending request fail */ atomic_t wedged; /** Bit 6 swizzling required for X tiling */ uint32_t bit_6_swizzle_x; /** Bit 6 swizzling required for Y tiling */ uint32_t bit_6_swizzle_y; /* storage for physical objects */ struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; /* accounting, useful for userland debugging */ size_t gtt_total; size_t mappable_gtt_total; size_t object_memory; u32 object_count; } mm; /* Kernel Modesetting */ struct sdvo_device_mapping sdvo_mappings[2]; /* indicate whether the LVDS_BORDER should be enabled or not */ unsigned int lvds_border_bits; /* Panel fitter placement and size for Ironlake+ */ u32 pch_pf_pos, pch_pf_size; struct drm_crtc *plane_to_crtc_mapping[3]; struct drm_crtc *pipe_to_crtc_mapping[3]; wait_queue_head_t pending_flip_queue; struct intel_pch_pll pch_plls[I915_NUM_PLLS]; struct intel_ddi_plls ddi_plls; /* Reclocking support */ bool render_reclock_avail; bool lvds_downclock_avail; /* indicates the reduced downclock for LVDS*/ int lvds_downclock; u16 orig_clock; int child_dev_num; struct child_device_config *child_dev; bool mchbar_need_disable; struct intel_l3_parity l3_parity; /* gen6+ rps state */ struct intel_gen6_power_mgmt rps; /* ilk-only ips/rps state. Everything in here is protected by the global * mchdev_lock in intel_pm.c */ struct intel_ilk_power_mgmt ips; enum no_fbc_reason no_fbc_reason; struct drm_mm_node *compressed_fb; struct drm_mm_node *compressed_llb; unsigned long last_gpu_reset; /* list of fbdev register on this device */ struct intel_fbdev *fbdev; /* * The console may be contended at resume, but we don't * want it to block on it. */ struct task console_resume_work; struct backlight_device *backlight; struct drm_property *broadcast_rgb_property; struct drm_property *force_audio_property; bool hw_contexts_disabled; uint32_t hw_context_size; u32 fdi_rx_config; struct i915_suspend_saved_registers regfile; /* Old dri1 support infrastructure, beware the dragons ya fools entering * here! */ struct i915_dri1_state dri1; } drm_i915_private_t; /* Iterate over initialised rings */ #define for_each_ring(ring__, dev_priv__, i__) \ for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__))) enum hdmi_force_audio { HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ HDMI_AUDIO_OFF, /* force turn off HDMI audio */ HDMI_AUDIO_AUTO, /* trust EDID */ HDMI_AUDIO_ON, /* force turn on HDMI audio */ }; enum i915_cache_level { I915_CACHE_NONE = 0, I915_CACHE_LLC, I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ }; struct drm_i915_gem_object_ops { /* Interface between the GEM object and its backing storage. * get_pages() is called once prior to the use of the associated set * of pages before to binding them into the GTT, and put_pages() is * called after we no longer need them. As we expect there to be * associated cost with migrating pages between the backing storage * and making them available for the GPU (e.g. clflush), we may hold * onto the pages after they are no longer referenced by the GPU * in case they may be used again shortly (for example migrating the * pages to a different memory domain within the GTT). put_pages() * will therefore most likely be called when the object itself is * being released or under memory pressure (where we attempt to * reap pages for the shrinker). */ int (*get_pages)(struct drm_i915_gem_object *); void (*put_pages)(struct drm_i915_gem_object *); }; struct drm_i915_gem_object { struct drm_gem_object base; const struct drm_i915_gem_object_ops *ops; /** Current space allocated to this object in the GTT, if any. */ struct drm_mm_node *gtt_space; struct list_head gtt_list; /** This object's place on the active/inactive lists */ struct list_head ring_list; struct list_head mm_list; /** This object's place in the batchbuffer or on the eviction list */ struct list_head exec_list; /** * This is set if the object is on the active lists (has pending * rendering and so a non-zero seqno), and is not set if it i s on * inactive (ready to be unbound) list. */ unsigned int active:1; /** * This is set if the object has been written to since last bound * to the GTT */ unsigned int dirty:1; /** * Fence register bits (if any) for this object. Will be set * as needed when mapped into the GTT. * Protected by dev->struct_mutex. */ signed int fence_reg:I915_MAX_NUM_FENCE_BITS; /** * Advice: are the backing pages purgeable? */ unsigned int madv:2; /** * Current tiling mode for the object. */ unsigned int tiling_mode:2; /** * Whether the tiling parameters for the currently associated fence * register have changed. Note that for the purposes of tracking * tiling changes we also treat the unfenced register, the register * slot that the object occupies whilst it executes a fenced * command (such as BLT on gen2/3), as a "fence". */ unsigned int fence_dirty:1; /** How many users have pinned this object in GTT space. The following * users can each hold at most one reference: pwrite/pread, pin_ioctl * (via user_pin_count), execbuffer (objects are not allowed multiple * times for the same batchbuffer), and the framebuffer code. When * switching/pageflipping, the framebuffer code has at most two buffers * pinned per crtc. * * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 * bits with absolutely no headroom. So use 4 bits. */ unsigned int pin_count:4; #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf /** * Is the object at the current location in the gtt mappable and * fenceable? Used to avoid costly recalculations. */ unsigned int map_and_fenceable:1; /** * Whether the current gtt mapping needs to be mappable (and isn't just * mappable by accident). Track pin and fault separate for a more * accurate mappable working set. */ unsigned int fault_mappable:1; unsigned int pin_mappable:1; unsigned int pin_display:1; /* * Is the GPU currently using a fence to access this buffer, */ unsigned int pending_fenced_gpu_access:1; unsigned int fenced_gpu_access:1; unsigned int cache_level:2; unsigned int has_aliasing_ppgtt_mapping:1; unsigned int has_global_gtt_mapping:1; unsigned int has_dma_mapping:1; vm_page_t *pages; int pages_pin_count; /** * Used for performing relocations during execbuffer insertion. */ struct hlist_node exec_node; unsigned long exec_handle; struct drm_i915_gem_exec_object2 *exec_entry; /** * Current offset of the object in GTT space. * * This is the same as gtt_space->start */ uint32_t gtt_offset; struct intel_ring_buffer *ring; /** Breadcrumb of last rendering to the buffer. */ uint32_t last_read_seqno; uint32_t last_write_seqno; /** Breadcrumb of last fenced GPU access to the buffer. */ uint32_t last_fenced_seqno; /** Current tiling stride for the object, if it's tiled. */ uint32_t stride; /** Record of address bit 17 of each page at last unbind. */ unsigned long *bit_17; /** User space pin count and filp owning the pin */ uint32_t user_pin_count; struct drm_file *pin_filp; /** for phy allocated objects */ struct drm_i915_gem_phys_object *phys_obj; /** * Number of crtcs where this object is currently the fb, but * will be page flipped away on the next vblank. When it * reaches 0, dev_priv->pending_flip_queue will be woken up. */ atomic_t pending_flip; }; #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base) #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) /** * Request queue structure. * * The request queue allows us to note sequence numbers that have been emitted * and may be associated with active buffers to be retired. * * By keeping this list, we can avoid having to do questionable * sequence-number comparisons on buffer last_rendering_seqnos, and associate * an emission time with seqnos for tracking how far ahead of the GPU we are. */ struct drm_i915_gem_request { /** On Which ring this request was generated */ struct intel_ring_buffer *ring; /** GEM sequence number associated with this request. */ uint32_t seqno; /** Position in the ringbuffer of the end of the request */ u32 tail; /** Time at which this request was emitted, in jiffies. */ unsigned long emitted_jiffies; /** global list entry for this request */ struct list_head list; struct drm_i915_file_private *file_priv; /** file_priv list entry for this request */ struct list_head client_list; }; struct drm_i915_file_private { struct { struct mtx lock; struct list_head request_list; } mm; struct drm_gem_names context_idr; }; #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) #define IS_I830(dev) ((dev)->pci_device == 0x3577) #define IS_845G(dev) ((dev)->pci_device == 0x2562) #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) #define IS_I865G(dev) ((dev)->pci_device == 0x2572) #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) #define IS_I945G(dev) ((dev)->pci_device == 0x2772) #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) #define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \ (dev)->pci_device == 0x0152 || \ (dev)->pci_device == 0x015a) #define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \ (dev)->pci_device == 0x0106 || \ (dev)->pci_device == 0x010A) #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) #define IS_ULT(dev) (IS_HASWELL(dev) && \ ((dev)->pci_device & 0xFF00) == 0x0A00) /* * The genX designation typically refers to the render engine, so render * capability related checks should use IS_GEN, while display and other checks * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular * chips, etc.). */ #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev)) #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) /* Early gen2 have a totally busted CS tlb and require pinned batches. */ #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte * rows, which changed the alignment requirements and fence programming. */ #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ IS_I915GM(dev))) #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) /* dsparb controlled by hw only */ #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) #define INTEL_PCH_DEVICE_ID_MASK 0xff00 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) #define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) #define GT_FREQUENCY_MULTIPLIER 50 /** * RC6 is a special power stage which allows the GPU to enter an very * low-voltage mode when idle, using down to 0V while at this stage. This * stage is entered automatically when the GPU is idle when RC6 support is * enabled, and as soon as new workload arises GPU wakes up automatically as well. * * There are different RC6 modes available in Intel GPU, which differentiate * among each other with the latency required to enter and leave RC6 and * voltage consumed by the GPU in different states. * * The combination of the following flags define which states GPU is allowed * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and * RC6pp is deepest RC6. Their support by hardware varies according to the * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one * which brings the most power savings; deeper states save more power, but * require higher latency to switch to and wake up. */ #define INTEL_RC6_ENABLE (1<<0) #define INTEL_RC6p_ENABLE (1<<1) #define INTEL_RC6pp_ENABLE (1<<2) extern struct drm_ioctl_desc i915_ioctls[]; extern int i915_max_ioctl; extern unsigned int i915_fbpercrtc __always_unused; extern int i915_panel_ignore_lid __read_mostly; extern unsigned int i915_powersave __read_mostly; extern int i915_semaphores __read_mostly; extern unsigned int i915_lvds_downclock __read_mostly; extern int i915_lvds_channel_mode __read_mostly; extern int i915_panel_use_ssc __read_mostly; extern int i915_vbt_sdvo_panel_type __read_mostly; extern int i915_enable_rc6 __read_mostly; extern int i915_enable_fbc __read_mostly; extern int i915_enable_hangcheck __read_mostly; extern int i915_enable_ppgtt __read_mostly; extern unsigned int i915_preliminary_hw_support __read_mostly; extern struct drm_driver i915_driver_info; extern struct cdev_pager_ops i915_gem_pager_ops; extern int intel_iommu_gfx_mapped; const struct intel_device_info *i915_get_device_id(int device); /* i915_debug.c */ int i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx, struct sysctl_oid *top); void i915_sysctl_cleanup(struct drm_device *dev); extern int i915_suspend(struct drm_device *dev, pm_message_t state); extern int i915_resume(struct drm_device *dev); extern int i915_master_create(struct drm_device *dev, struct drm_master *master); extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); /* i915_dma.c */ void i915_update_dri1_breadcrumb(struct drm_device *dev); extern void i915_kernel_lost_context(struct drm_device * dev); extern int i915_driver_load(struct drm_device *, unsigned long flags); extern int i915_driver_unload(struct drm_device *); extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); extern void i915_driver_lastclose(struct drm_device * dev); extern void i915_driver_preclose(struct drm_device *dev, struct drm_file *file_priv); extern void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv); extern int i915_driver_device_is_agp(struct drm_device * dev); #ifdef CONFIG_COMPAT extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); #endif extern int i915_emit_box(struct drm_device *dev, struct drm_clip_rect *box, int DR1, int DR4); extern int intel_gpu_reset(struct drm_device *dev); extern int i915_reset(struct drm_device *dev); extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); extern int i915_batchbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int i915_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int i915_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int i915_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void intel_console_resume(void *context, int pending); /* i915_irq.c */ void i915_hangcheck_elapsed(void *data); void i915_handle_error(struct drm_device *dev, bool wedged); extern void intel_irq_init(struct drm_device *dev); extern void intel_gt_init(struct drm_device *dev); extern void intel_gt_reset(struct drm_device *dev); void i915_error_state_free(struct drm_i915_error_state *error); void i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); void i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); void intel_enable_asle(struct drm_device *dev); //#ifdef CONFIG_DEBUG_FS extern void i915_destroy_error_state(struct drm_device *dev); //#else //#define i915_destroy_error_state(x) //#endif /* i915_gem.c */ int i915_gem_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_pread_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_execbuffer2(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_pin_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_get_tiling(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void i915_gem_load(struct drm_device *dev); int i915_gem_init_object(struct drm_gem_object *obj); void i915_gem_object_init(struct drm_i915_gem_object *obj, const struct drm_i915_gem_object_ops *ops); struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, size_t size); void i915_gem_free_object(struct drm_gem_object *obj); int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, uint32_t alignment, bool map_and_fenceable, bool nonblocking); void i915_gem_object_unpin(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); void i915_gem_release_mmap(struct drm_i915_gem_object *obj); void i915_gem_lastclose(struct drm_device *dev); int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); uint32_t i915_get_gem_seqno(struct drm_device *dev); static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) { /* KASSERT(obj->pages != NULL, ("pin and NULL pages")); */ obj->pages_pin_count++; } static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) { KASSERT(obj->pages_pin_count != 0, ("zero pages_pin_count")); obj->pages_pin_count--; } int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); int i915_gem_object_sync(struct drm_i915_gem_object *obj, struct intel_ring_buffer *to); void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, struct intel_ring_buffer *ring); int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset); int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle); /** * Returns true if seq1 is later than seq2. */ static inline bool i915_seqno_passed(uint32_t seq1, uint32_t seq2) { return (int32_t)(seq1 - seq2) >= 0; } extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); static inline bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) { if (obj->fence_reg != I915_FENCE_REG_NONE) { struct drm_i915_private *dev_priv = obj->base.dev->dev_private; dev_priv->fence_regs[obj->fence_reg].pin_count++; return true; } else return false; } static inline void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) { if (obj->fence_reg != I915_FENCE_REG_NONE) { struct drm_i915_private *dev_priv = obj->base.dev->dev_private; dev_priv->fence_regs[obj->fence_reg].pin_count--; } } void i915_gem_retire_requests(struct drm_device *dev); void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv, bool interruptible); void i915_gem_reset(struct drm_device *dev); void i915_gem_clflush_object(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, uint32_t read_domains, uint32_t write_domain); int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); int __must_check i915_gem_init(struct drm_device *dev); int __must_check i915_gem_init_hw(struct drm_device *dev); void i915_gem_l3_remap(struct drm_device *dev); void i915_gem_init_swizzling(struct drm_device *dev); void i915_gem_init_ppgtt(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev); int __must_check i915_gpu_idle(struct drm_device *dev); int __must_check i915_gem_idle(struct drm_device *dev); int i915_add_request(struct intel_ring_buffer *ring, struct drm_file *file, u32 *seqno); int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno); int i915_gem_fault(struct drm_device *dev, uint64_t offset, int prot, uint64_t *phys); int __must_check i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write); int __must_check i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); int __must_check i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, struct intel_ring_buffer *pipelined); void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); int i915_gem_attach_phys_object(struct drm_device *dev, struct drm_i915_gem_object *obj, int id, int align); void i915_gem_detach_phys_object(struct drm_device *dev, struct drm_i915_gem_object *obj); void i915_gem_free_all_phys_object(struct drm_device *dev); void i915_gem_release(struct drm_device *dev, struct drm_file *file); uint32_t i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, uint32_t size, int tiling_mode); int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, enum i915_cache_level cache_level); #ifdef FREEBSD_WIP struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf); struct dma_buf *i915_gem_prime_export(struct drm_device *dev, struct drm_gem_object *gem_obj, int flags); #endif /* FREEBSD_WIP */ int i915_gem_mmap(struct drm_device *dev, uint64_t offset, int prot); /* i915_gem_context.c */ void i915_gem_context_init(struct drm_device *dev); void i915_gem_context_fini(struct drm_device *dev); void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); int i915_switch_context(struct intel_ring_buffer *ring, struct drm_file *file, int to_id); int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file); /* i915_gem_gtt.c */ int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev); void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_object *obj, enum i915_cache_level cache_level); void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_object *obj); void i915_gem_restore_gtt_mappings(struct drm_device *dev); int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, enum i915_cache_level cache_level); void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); void i915_gem_init_global_gtt(struct drm_device *dev, unsigned long start, unsigned long mappable_end, unsigned long end); int i915_gem_gtt_init(struct drm_device *dev); void i915_gem_gtt_fini(struct drm_device *dev); static inline void i915_gem_chipset_flush(struct drm_device *dev) { if (INTEL_INFO(dev)->gen < 6) intel_gtt_chipset_flush(); } /* i915_gem_evict.c */ int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment, unsigned cache_level, bool mappable, bool nonblock); int i915_gem_evict_everything(struct drm_device *dev); /* i915_gem_stolen.c */ int i915_gem_init_stolen(struct drm_device *dev); void i915_gem_cleanup_stolen(struct drm_device *dev); /* i915_gem_tiling.c */ void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); void i915_gem_object_do_bit_17_swizzle_page(struct drm_i915_gem_object *obj, struct vm_page *m); /* i915_gem_debug.c */ void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, const char *where, uint32_t mark); #if WATCH_LISTS int i915_verify_lists(struct drm_device *dev); #else #define i915_verify_lists(dev) 0 #endif void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle); -void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, - const char *where, uint32_t mark); /* i915_suspend.c */ extern int i915_save_state(struct drm_device *dev); extern int i915_restore_state(struct drm_device *dev); /* intel_i2c.c */ extern int intel_setup_gmbus(struct drm_device *dev); extern void intel_teardown_gmbus(struct drm_device *dev); static inline bool intel_gmbus_is_port_valid(unsigned port) { return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD); } extern device_t intel_gmbus_get_adapter( struct drm_i915_private *dev_priv, unsigned port); extern void intel_gmbus_set_speed(device_t idev, int speed); extern void intel_gmbus_force_bit(device_t idev, bool force_bit); extern bool intel_gmbus_is_forced_bit(device_t adapter); extern void intel_i2c_reset(struct drm_device *dev); /* intel_opregion.c */ extern int intel_opregion_setup(struct drm_device *dev); #ifdef CONFIG_ACPI extern void intel_opregion_init(struct drm_device *dev); extern void intel_opregion_fini(struct drm_device *dev); extern void intel_opregion_asle_intr(struct drm_device *dev); extern void intel_opregion_gse_intr(struct drm_device *dev); extern void intel_opregion_enable_asle(struct drm_device *dev); #else static inline void intel_opregion_init(struct drm_device *dev) { return; } static inline void intel_opregion_fini(struct drm_device *dev) { return; } static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; } static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; } #endif /* intel_acpi.c */ #ifdef CONFIG_ACPI extern void intel_register_dsm_handler(void); extern void intel_unregister_dsm_handler(void); #else static inline void intel_register_dsm_handler(void) { return; } static inline void intel_unregister_dsm_handler(void) { return; } #endif /* CONFIG_ACPI */ /* modesetting */ extern void intel_modeset_init_hw(struct drm_device *dev); extern void intel_modeset_init(struct drm_device *dev); extern void intel_modeset_gem_init(struct drm_device *dev); extern void intel_modeset_cleanup(struct drm_device *dev); extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); extern void intel_modeset_setup_hw_state(struct drm_device *dev, bool force_restore); -extern bool intel_fbc_enabled(struct drm_device *dev); extern void intel_disable_fbc(struct drm_device *dev); extern bool ironlake_set_drps(struct drm_device *dev, u8 val); extern void intel_init_pch_refclk(struct drm_device *dev); extern void gen6_set_rps(struct drm_device *dev, u8 val); extern void intel_detect_pch(struct drm_device *dev); extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); extern int intel_enable_rc6(const struct drm_device *dev); extern bool i915_semaphore_is_enabled(struct drm_device *dev); int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file); /* overlay */ //#ifdef CONFIG_DEBUG_FS extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); extern void intel_overlay_print_error_state(struct sbuf *m, struct intel_overlay_error_state *error); extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); extern void intel_display_print_error_state(struct sbuf *m, struct drm_device *dev, struct intel_display_error_state *error); //#endif static inline void trace_i915_reg_rw(boolean_t rw, int reg, uint64_t val, int sz) { CTR4(KTR_DRM_REG, "[%x/%d] %c %x", reg, sz, rw ? "w" : "r", val); } /* On SNB platform, before reading ring registers forcewake bit * must be set to prevent GT core from power down and stale values being * returned. */ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); #define __i915_read(x, y) \ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); __i915_read(8, 8) __i915_read(16, 16) __i915_read(32, 32) __i915_read(64, 64) #undef __i915_read #define __i915_write(x, y) \ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val); __i915_write(8, 8) __i915_write(16, 16) __i915_write(32, 32) __i915_write(64, 64) #undef __i915_write #define I915_READ8(reg) i915_read8(dev_priv, (reg)) #define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val)) #define I915_READ16(reg) i915_read16(dev_priv, (reg)) #define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val)) #define I915_READ16_NOTRACE(reg) DRM_READ16(dev_priv->mmio_map, (reg)) #define I915_WRITE16_NOTRACE(reg, val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val)) #define I915_READ(reg) i915_read32(dev_priv, (reg)) #define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val)) #define I915_READ_NOTRACE(reg) DRM_READ32(dev_priv->mmio_map, (reg)) #define I915_WRITE_NOTRACE(reg, val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val)) #define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val)) #define I915_READ64(reg) i915_read64(dev_priv, (reg)) #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring); #endif Index: stable/11/sys/dev/drm2/radeon/radeon_acpi.c =================================================================== --- stable/11/sys/dev/drm2/radeon/radeon_acpi.c (revision 303688) +++ stable/11/sys/dev/drm2/radeon/radeon_acpi.c (revision 303689) @@ -1,641 +1,639 @@ /* * Copyright 2012 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include "radeon.h" #include "radeon_acpi.h" #include "atom.h" #define ACPI_AC_CLASS "ac_adapter" -extern void radeon_pm_acpi_event_handler(struct radeon_device *rdev); - struct atif_verify_interface { u16 size; /* structure size in bytes (includes size field) */ u16 version; /* version */ u32 notification_mask; /* supported notifications mask */ u32 function_bits; /* supported functions bit vector */ } __packed; struct atif_system_params { u16 size; /* structure size in bytes (includes size field) */ u32 valid_mask; /* valid flags mask */ u32 flags; /* flags */ u8 command_code; /* notify command code */ } __packed; struct atif_sbios_requests { u16 size; /* structure size in bytes (includes size field) */ u32 pending; /* pending sbios requests */ u8 panel_exp_mode; /* panel expansion mode */ u8 thermal_gfx; /* thermal state: target gfx controller */ u8 thermal_state; /* thermal state: state id (0: exit state, non-0: state) */ u8 forced_power_gfx; /* forced power state: target gfx controller */ u8 forced_power_state; /* forced power state: state id */ u8 system_power_src; /* system power source */ u8 backlight_level; /* panel backlight level (0-255) */ } __packed; #define ATIF_NOTIFY_MASK 0x3 #define ATIF_NOTIFY_NONE 0 #define ATIF_NOTIFY_81 1 #define ATIF_NOTIFY_N 2 struct atcs_verify_interface { u16 size; /* structure size in bytes (includes size field) */ u16 version; /* version */ u32 function_bits; /* supported functions bit vector */ } __packed; /* Call the ATIF method */ /** * radeon_atif_call - call an ATIF method * * @handle: acpi handle * @function: the ATIF function to execute * @params: ATIF function params * * Executes the requested ATIF function (all asics). * Returns a pointer to the acpi output buffer. */ static ACPI_OBJECT *radeon_atif_call(ACPI_HANDLE handle, int function, ACPI_BUFFER *params) { ACPI_STATUS status; ACPI_OBJECT atif_arg_elements[2]; ACPI_OBJECT_LIST atif_arg; ACPI_BUFFER buffer = { ACPI_ALLOCATE_BUFFER, NULL }; atif_arg.Count = 2; atif_arg.Pointer = &atif_arg_elements[0]; atif_arg_elements[0].Type = ACPI_TYPE_INTEGER; atif_arg_elements[0].Integer.Value = function; if (params) { atif_arg_elements[1].Type = ACPI_TYPE_BUFFER; atif_arg_elements[1].Buffer.Length = params->Length; atif_arg_elements[1].Buffer.Pointer = params->Pointer; } else { /* We need a second fake parameter */ atif_arg_elements[1].Type = ACPI_TYPE_INTEGER; atif_arg_elements[1].Integer.Value = 0; } status = AcpiEvaluateObject(handle, "ATIF", &atif_arg, &buffer); /* Fail only if calling the method fails and ATIF is supported */ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n", AcpiFormatException(status)); AcpiOsFree(buffer.Pointer); return NULL; } return buffer.Pointer; } /** * radeon_atif_parse_notification - parse supported notifications * * @n: supported notifications struct * @mask: supported notifications mask from ATIF * * Use the supported notifications mask from ATIF function * ATIF_FUNCTION_VERIFY_INTERFACE to determine what notifications * are supported (all asics). */ static void radeon_atif_parse_notification(struct radeon_atif_notifications *n, u32 mask) { n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED; n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED; n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED; n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED; n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED; n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED; n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED; n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED; n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED; } /** * radeon_atif_parse_functions - parse supported functions * * @f: supported functions struct * @mask: supported functions mask from ATIF * * Use the supported functions mask from ATIF function * ATIF_FUNCTION_VERIFY_INTERFACE to determine what functions * are supported (all asics). */ static void radeon_atif_parse_functions(struct radeon_atif_functions *f, u32 mask) { f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED; f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED; f->select_active_disp = mask & ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED; f->lid_state = mask & ATIF_GET_LID_STATE_SUPPORTED; f->get_tv_standard = mask & ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED; f->set_tv_standard = mask & ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED; f->get_panel_expansion_mode = mask & ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED; f->set_panel_expansion_mode = mask & ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED; f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED; f->graphics_device_types = mask & ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED; } /** * radeon_atif_verify_interface - verify ATIF * * @handle: acpi handle * @atif: radeon atif struct * * Execute the ATIF_FUNCTION_VERIFY_INTERFACE ATIF function * to initialize ATIF and determine what features are supported * (all asics). * returns 0 on success, error on failure. */ static int radeon_atif_verify_interface(ACPI_HANDLE handle, struct radeon_atif *atif) { ACPI_OBJECT *info; struct atif_verify_interface output; size_t size; int err = 0; info = radeon_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL); if (!info) return -EIO; memset(&output, 0, sizeof(output)); size = *(u16 *) info->Buffer.Pointer; if (size < 12) { DRM_INFO("ATIF buffer is too small: %zu\n", size); err = -EINVAL; goto out; } size = min(sizeof(output), size); memcpy(&output, info->Buffer.Pointer, size); /* TODO: check version? */ DRM_DEBUG_DRIVER("ATIF version %u\n", output.version); radeon_atif_parse_notification(&atif->notifications, output.notification_mask); radeon_atif_parse_functions(&atif->functions, output.function_bits); out: AcpiOsFree(info); return err; } /** * radeon_atif_get_notification_params - determine notify configuration * * @handle: acpi handle * @n: atif notification configuration struct * * Execute the ATIF_FUNCTION_GET_SYSTEM_PARAMETERS ATIF function * to determine if a notifier is used and if so which one * (all asics). This is either Notify(VGA, 0x81) or Notify(VGA, n) * where n is specified in the result if a notifier is used. * Returns 0 on success, error on failure. */ static int radeon_atif_get_notification_params(ACPI_HANDLE handle, struct radeon_atif_notification_cfg *n) { ACPI_OBJECT *info; struct atif_system_params params; size_t size; int err = 0; info = radeon_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL); if (!info) { err = -EIO; goto out; } size = *(u16 *) info->Buffer.Pointer; if (size < 10) { err = -EINVAL; goto out; } memset(¶ms, 0, sizeof(params)); size = min(sizeof(params), size); memcpy(¶ms, info->Buffer.Pointer, size); DRM_DEBUG_DRIVER("SYSTEM_PARAMS: mask = %#x, flags = %#x\n", params.flags, params.valid_mask); params.flags = params.flags & params.valid_mask; if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_NONE) { n->enabled = false; n->command_code = 0; } else if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_81) { n->enabled = true; n->command_code = 0x81; } else { if (size < 11) { err = -EINVAL; goto out; } n->enabled = true; n->command_code = params.command_code; } out: DRM_DEBUG_DRIVER("Notification %s, command code = %#x\n", (n->enabled ? "enabled" : "disabled"), n->command_code); AcpiOsFree(info); return err; } /** * radeon_atif_get_sbios_requests - get requested sbios event * * @handle: acpi handle * @req: atif sbios request struct * * Execute the ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS ATIF function * to determine what requests the sbios is making to the driver * (all asics). * Returns 0 on success, error on failure. */ static int radeon_atif_get_sbios_requests(ACPI_HANDLE handle, struct atif_sbios_requests *req) { ACPI_OBJECT *info; size_t size; int count = 0; info = radeon_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL); if (!info) return -EIO; size = *(u16 *)info->Buffer.Pointer; if (size < 0xd) { count = -EINVAL; goto out; } memset(req, 0, sizeof(*req)); size = min(sizeof(*req), size); memcpy(req, info->Buffer.Pointer, size); DRM_DEBUG_DRIVER("SBIOS pending requests: %#x\n", req->pending); count = hweight32(req->pending); out: AcpiOsFree(info); return count; } /** * radeon_atif_handler - handle ATIF notify requests * * @rdev: radeon_device pointer * @event: atif sbios request struct * * Checks the acpi event and if it matches an atif event, * handles it. * Returns NOTIFY code */ void radeon_atif_handler(struct radeon_device *rdev, UINT32 type) { struct radeon_atif *atif = &rdev->atif; struct atif_sbios_requests req; ACPI_HANDLE handle; int count; DRM_DEBUG_DRIVER("event, type = %#x\n", type); if (!atif->notification_cfg.enabled || type != atif->notification_cfg.command_code) /* Not our event */ return; /* Check pending SBIOS requests */ handle = rdev->acpi.handle; count = radeon_atif_get_sbios_requests(handle, &req); if (count <= 0) return; DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count); if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) { struct radeon_encoder *enc = atif->encoder_for_bl; if (enc) { DRM_DEBUG_DRIVER("Changing brightness to %d\n", req.backlight_level); radeon_set_backlight_level(rdev, enc, req.backlight_level); #ifdef FREEBSD_WIP if (rdev->is_atom_bios) { struct radeon_encoder_atom_dig *dig = enc->enc_priv; backlight_force_update(dig->bl_dev, BACKLIGHT_UPDATE_HOTKEY); } else { struct radeon_encoder_lvds *dig = enc->enc_priv; backlight_force_update(dig->bl_dev, BACKLIGHT_UPDATE_HOTKEY); } #endif /* FREEBSD_WIP */ } } /* TODO: check other events */ /* We've handled the event, stop the notifier chain. The ACPI interface * overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to * userspace if the event was generated only to signal a SBIOS * request. */ } /* Call the ATCS method */ /** * radeon_atcs_call - call an ATCS method * * @handle: acpi handle * @function: the ATCS function to execute * @params: ATCS function params * * Executes the requested ATCS function (all asics). * Returns a pointer to the acpi output buffer. */ static union acpi_object *radeon_atcs_call(ACPI_HANDLE handle, int function, ACPI_BUFFER *params) { ACPI_STATUS status; ACPI_OBJECT atcs_arg_elements[2]; ACPI_OBJECT_LIST atcs_arg; ACPI_BUFFER buffer = { ACPI_ALLOCATE_BUFFER, NULL }; atcs_arg.Count = 2; atcs_arg.Pointer = &atcs_arg_elements[0]; atcs_arg_elements[0].Type = ACPI_TYPE_INTEGER; atcs_arg_elements[0].Integer.Value = function; if (params) { atcs_arg_elements[1].Type = ACPI_TYPE_BUFFER; atcs_arg_elements[1].Buffer.Length = params->Length; atcs_arg_elements[1].Buffer.Pointer = params->Pointer; } else { /* We need a second fake parameter */ atcs_arg_elements[1].Type = ACPI_TYPE_INTEGER; atcs_arg_elements[1].Integer.Value = 0; } status = AcpiEvaluateObject(handle, "ATCS", &atcs_arg, &buffer); /* Fail only if calling the method fails and ATIF is supported */ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { DRM_DEBUG_DRIVER("failed to evaluate ATCS got %s\n", AcpiFormatException(status)); AcpiOsFree(buffer.Pointer); return NULL; } return buffer.Pointer; } /** * radeon_atcs_parse_functions - parse supported functions * * @f: supported functions struct * @mask: supported functions mask from ATCS * * Use the supported functions mask from ATCS function * ATCS_FUNCTION_VERIFY_INTERFACE to determine what functions * are supported (all asics). */ static void radeon_atcs_parse_functions(struct radeon_atcs_functions *f, u32 mask) { f->get_ext_state = mask & ATCS_GET_EXTERNAL_STATE_SUPPORTED; f->pcie_perf_req = mask & ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED; f->pcie_dev_rdy = mask & ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED; f->pcie_bus_width = mask & ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED; } /** * radeon_atcs_verify_interface - verify ATCS * * @handle: acpi handle * @atcs: radeon atcs struct * * Execute the ATCS_FUNCTION_VERIFY_INTERFACE ATCS function * to initialize ATCS and determine what features are supported * (all asics). * returns 0 on success, error on failure. */ static int radeon_atcs_verify_interface(ACPI_HANDLE handle, struct radeon_atcs *atcs) { ACPI_OBJECT *info; struct atcs_verify_interface output; size_t size; int err = 0; info = radeon_atcs_call(handle, ATCS_FUNCTION_VERIFY_INTERFACE, NULL); if (!info) return -EIO; memset(&output, 0, sizeof(output)); size = *(u16 *) info->Buffer.Pointer; if (size < 8) { DRM_INFO("ATCS buffer is too small: %zu\n", size); err = -EINVAL; goto out; } size = min(sizeof(output), size); memcpy(&output, info->Buffer.Pointer, size); /* TODO: check version? */ DRM_DEBUG_DRIVER("ATCS version %u\n", output.version); radeon_atcs_parse_functions(&atcs->functions, output.function_bits); out: AcpiOsFree(info); return err; } /** * radeon_acpi_event - handle notify events * * @nb: notifier block * @val: val * @data: acpi event * * Calls relevant radeon functions in response to various * acpi events. * Returns NOTIFY code */ static void radeon_acpi_event(ACPI_HANDLE handle, UINT32 type, void *context) { struct radeon_device *rdev = (struct radeon_device *)context; #ifdef FREEBSD_WIP if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) { if (power_supply_is_system_supplied() > 0) DRM_DEBUG_DRIVER("pm: AC\n"); else DRM_DEBUG_DRIVER("pm: DC\n"); radeon_pm_acpi_event_handler(rdev); } #endif /* FREEBSD_WIP */ /* Check for pending SBIOS requests */ radeon_atif_handler(rdev, type); } /* Call all ACPI methods here */ /** * radeon_acpi_init - init driver acpi support * * @rdev: radeon_device pointer * * Verifies the AMD ACPI interfaces and registers with the acpi * notifier chain (all asics). * Returns 0 on success, error on failure. */ int radeon_acpi_init(struct radeon_device *rdev) { ACPI_HANDLE handle; struct radeon_atif *atif = &rdev->atif; struct radeon_atcs *atcs = &rdev->atcs; int ret; /* Get the device handle */ handle = acpi_get_handle(rdev->dev); /* No need to proceed if we're sure that ATIF is not supported */ if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle) return 0; /* Call the ATCS method */ ret = radeon_atcs_verify_interface(handle, atcs); if (ret) { DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret); } /* Call the ATIF method */ ret = radeon_atif_verify_interface(handle, atif); if (ret) { DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret); goto out; } if (atif->notifications.brightness_change) { struct drm_encoder *tmp; struct radeon_encoder *target = NULL; /* Find the encoder controlling the brightness */ list_for_each_entry(tmp, &rdev->ddev->mode_config.encoder_list, head) { struct radeon_encoder *enc = to_radeon_encoder(tmp); if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) && enc->enc_priv) { if (rdev->is_atom_bios) { struct radeon_encoder_atom_dig *dig = enc->enc_priv; if (dig->bl_dev) { target = enc; break; } } else { struct radeon_encoder_lvds *dig = enc->enc_priv; if (dig->bl_dev) { target = enc; break; } } } } atif->encoder_for_bl = target; if (!target) { /* Brightness change notification is enabled, but we * didn't find a backlight controller, this should * never happen. */ DRM_ERROR("Cannot find a backlight controller\n"); } } if (atif->functions.sbios_requests && !atif->functions.system_params) { /* XXX check this workraround, if sbios request function is * present we have to see how it's configured in the system * params */ atif->functions.system_params = true; } if (atif->functions.system_params) { ret = radeon_atif_get_notification_params(handle, &atif->notification_cfg); if (ret) { DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n", ret); /* Disable notification */ atif->notification_cfg.enabled = false; } } out: rdev->acpi.handle = handle; rdev->acpi.notifier_call = radeon_acpi_event; AcpiInstallNotifyHandler(handle, ACPI_DEVICE_NOTIFY, rdev->acpi.notifier_call, rdev); return ret; } /** * radeon_acpi_fini - tear down driver acpi support * * @rdev: radeon_device pointer * * Unregisters with the acpi notifier chain (all asics). */ void radeon_acpi_fini(struct radeon_device *rdev) { AcpiRemoveNotifyHandler(rdev->acpi.handle, ACPI_DEVICE_NOTIFY, rdev->acpi.notifier_call); } Index: stable/11 =================================================================== --- stable/11 (revision 303688) +++ stable/11 (revision 303689) Property changes on: stable/11 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r302571-302572,302577,302841