Index: sys/arm/conf/GENERIC =================================================================== --- sys/arm/conf/GENERIC +++ sys/arm/conf/GENERIC @@ -244,6 +244,9 @@ device hdmi device vchiq +# DRM Video Controllers +device drm + # Pinmux device fdt_pinctrl Index: sys/arm64/conf/GENERIC =================================================================== --- sys/arm64/conf/GENERIC +++ sys/arm64/conf/GENERIC @@ -304,6 +304,9 @@ device vt_efifb +# DRM Video Controllers +device drm + # EVDEV support device evdev # input event device support options EVDEV_SUPPORT # evdev support in legacy drivers Index: sys/conf/Makefile.arm =================================================================== --- sys/conf/Makefile.arm +++ sys/conf/Makefile.arm @@ -19,6 +19,16 @@ # Which version of config(8) is required. %VERSREQ= 600013 +DRM_INCLUDES+= -I${S}/dev/drm/core/include +DRM_INCLUDES+= -I${S}/dev/drm/core/include/uapi +DRM_INCLUDES+= -I${S}/dev/drm/drmkpi/include/ + +DRM_CFLAGS= ${CFLAGS} ${DRM_INCLUDES} +DRM_CFLAGS+= -include ${S}/dev/drm/drmkpi/include/drm/drm_os_freebsd.h +DRM_CFLAGS+= '-DKBUILD_MODNAME="DRMv5.4"' +DRM_CFLAGS+= -Wno-cast-qual -Wno-pointer-arith -Wno-missing-prototypes +DRM_C= ${CC} -c ${DRM_CFLAGS} ${WERROR} ${PROF} ${.IMPSRC} + STD8X16FONT?= iso .if !defined(S) Index: sys/conf/Makefile.arm64 =================================================================== --- sys/conf/Makefile.arm64 +++ sys/conf/Makefile.arm64 @@ -20,6 +20,18 @@ # Which version of config(8) is required. %VERSREQ= 600012 +DRM_INCLUDES+= -I${S}/dev/drm/core/include +DRM_INCLUDES+= -I${S}/dev/drm/core/include/uapi +DRM_INCLUDES+= -I${S}/dev/drm/drmkpi/include/ + +DRM_CFLAGS= ${CFLAGS} ${DRM_INCLUDES} +DRM_CFLAGS+= -include ${S}/dev/drm/drmkpi/include/drm/drm_os_freebsd.h +DRM_CFLAGS+= '-DKBUILD_MODNAME="DRMv5.4"' +DRM_CFLAGS+= -Wno-cast-qual -Wno-pointer-arith -Wno-missing-prototypes +# Complain about unsigned long long versus uint64_t, remove -Wformat for now +DRM_CFLAGS+= -Wno-format +DRM_C= ${CC} -c ${DRM_CFLAGS} ${WERROR} ${PROF} ${.IMPSRC} + .if !defined(S) S= ../../.. .endif Index: sys/conf/files.arm =================================================================== --- sys/conf/files.arm +++ sys/conf/files.arm @@ -174,3 +174,82 @@ arm/annapurna/alpine/alpine_serdes.c optional al_serdes fdt \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" + +# DRM +dev/drm/core/drm_atomic.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_atomic_helper.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_atomic_state_helper.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_atomic_uapi.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_auth.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_blend.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_bridge.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_cache.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_color_mgmt.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_connector.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_crtc.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_crtc_helper.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_damage_helper.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_drv.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_dumb_buffers.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_edid.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_encoder.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_fb_helper.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_file.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_flip_work.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_fourcc.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_framebuffer.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_gem.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_hashtab.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_ioc32.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_ioctl.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_kms_helper_common.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_memory.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_mm.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_mode_config.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_mode_object.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_modes.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_modeset_helper.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_modeset_lock.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_panel.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_panel_orientation_quirks.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_pci.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_plane.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_plane_helper.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_prime.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_print.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_probe_helper.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_property.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_rect.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_self_refresh_helper.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_scdc_helper.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_syncobj.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_vblank.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_vma_manager.c optional drm compile-with "${DRM_C}" + +# FreeBSD drm files +dev/drm/freebsd/drm_gem_cma_helper.c optional drm compile-with "${DRM_C}" +dev/drm/freebsd/drm_gem_framebuffer_helper.c optional drm compile-with "${DRM_C}" +dev/drm/freebsd/drm_fb_cma_helper.c optional drm compile-with "${DRM_C}" +dev/drm/freebsd/drm_fbdev.c optional drm compile-with "${DRM_C}" +dev/drm/freebsd/drm_os_freebsd.c optional drm compile-with "${DRM_C}" +dev/drm/freebsd/drm_sysctl.c optional drm compile-with "${DRM_C}" +dev/drm/freebsd/drm_sysfs.c optional drm compile-with "${DRM_C}" +dev/drm/bridges/drm_bridge_if.m optional drm + +# DRMKPI files +dev/drm/drmkpi/drmkpi_compat.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_current.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_dma_buf.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_dma_fence.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_idr.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_lock.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_page.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_page1.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_rcu.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_reservation.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_schedule.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_sync_file.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_tasklet.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_work.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/hdmi.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_sort.c optional drm compile-with "${DRM_C}" Index: sys/conf/files.arm64 =================================================================== --- sys/conf/files.arm64 +++ sys/conf/files.arm64 @@ -328,3 +328,82 @@ arm64/rockchip/clk/rk3328_cru.c optional fdt soc_rockchip_rk3328 arm64/rockchip/clk/rk3399_cru.c optional fdt soc_rockchip_rk3399 arm64/rockchip/clk/rk3399_pmucru.c optional fdt soc_rockchip_rk3399 + +# DRM +dev/drm/core/drm_atomic.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_atomic_helper.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_atomic_state_helper.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_atomic_uapi.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_auth.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_blend.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_bridge.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_cache.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_color_mgmt.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_connector.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_crtc.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_crtc_helper.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_damage_helper.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_drv.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_dumb_buffers.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_edid.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_encoder.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_fb_helper.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_file.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_flip_work.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_fourcc.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_framebuffer.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_gem.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_hashtab.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_ioc32.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_ioctl.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_kms_helper_common.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_memory.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_mm.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_mode_config.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_mode_object.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_modes.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_modeset_helper.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_modeset_lock.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_panel.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_panel_orientation_quirks.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_pci.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_plane.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_plane_helper.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_prime.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_print.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_probe_helper.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_property.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_rect.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_self_refresh_helper.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_scdc_helper.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_syncobj.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_vblank.c optional drm compile-with "${DRM_C}" +dev/drm/core/drm_vma_manager.c optional drm compile-with "${DRM_C}" + +# FreeBSD drm files +dev/drm/freebsd/drm_gem_cma_helper.c optional drm compile-with "${DRM_C}" +dev/drm/freebsd/drm_gem_framebuffer_helper.c optional drm compile-with "${DRM_C}" +dev/drm/freebsd/drm_fb_cma_helper.c optional drm compile-with "${DRM_C}" +dev/drm/freebsd/drm_fbdev.c optional drm compile-with "${DRM_C}" +dev/drm/freebsd/drm_os_freebsd.c optional drm compile-with "${DRM_C}" +dev/drm/freebsd/drm_sysctl.c optional drm compile-with "${DRM_C}" +dev/drm/freebsd/drm_sysfs.c optional drm compile-with "${DRM_C}" +dev/drm/bridges/drm_bridge_if.m optional drm + +# DRMKPI files +dev/drm/drmkpi/drmkpi_compat.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_current.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_dma_buf.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_dma_fence.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_idr.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_lock.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_page.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_page1.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_rcu.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_reservation.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_schedule.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_sync_file.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_tasklet.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_work.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/hdmi.c optional drm compile-with "${DRM_C}" +dev/drm/drmkpi/drmkpi_sort.c optional drm compile-with "${DRM_C}" Index: sys/dev/drm/bridges/drm_bridge_if.m =================================================================== --- /dev/null +++ sys/dev/drm/bridges/drm_bridge_if.m @@ -0,0 +1,42 @@ +#- +# Copyright (c) 2019 Emmanuel Vadot +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. +# +# $FreeBSD$ +# + +INTERFACE drm_bridge; + +HEADER { + struct drm_encoder; + struct drm_device; +}; + +# +# Add the bridge to the drm pipeline +# +METHOD int add_bridge { + device_t dev; + struct drm_encoder *encoder; + struct drm_device *drm; +}; Index: sys/dev/drm/core/drm_atomic.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_atomic.c @@ -0,0 +1,1531 @@ +/* + * Copyright (C) 2014 Red Hat + * Copyright (C) 2014 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark + * Daniel Vetter + */ + + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "drm_crtc_internal.h" +#include "drm_internal.h" + +void __drm_crtc_commit_free(struct kref *kref) +{ + struct drm_crtc_commit *commit = + container_of(kref, struct drm_crtc_commit, ref); + + kfree(commit); +} +EXPORT_SYMBOL(__drm_crtc_commit_free); + +/** + * drm_atomic_state_default_release - + * release memory initialized by drm_atomic_state_init + * @state: atomic state + * + * Free all the memory allocated by drm_atomic_state_init. + * This should only be used by drivers which are still subclassing + * &drm_atomic_state and haven't switched to &drm_private_state yet. + */ +void drm_atomic_state_default_release(struct drm_atomic_state *state) +{ + kfree(state->connectors); + kfree(state->crtcs); + kfree(state->planes); + kfree(state->private_objs); +} +EXPORT_SYMBOL(drm_atomic_state_default_release); + +/** + * drm_atomic_state_init - init new atomic state + * @dev: DRM device + * @state: atomic state + * + * Default implementation for filling in a new atomic state. + * This should only be used by drivers which are still subclassing + * &drm_atomic_state and haven't switched to &drm_private_state yet. + */ +int +drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state) +{ + kref_init(&state->ref); + + /* TODO legacy paths should maybe do a better job about + * setting this appropriately? + */ + state->allow_modeset = true; + + state->crtcs = kcalloc(dev->mode_config.num_crtc, + sizeof(*state->crtcs), GFP_KERNEL); + if (!state->crtcs) + goto fail; + state->planes = kcalloc(dev->mode_config.num_total_plane, + sizeof(*state->planes), GFP_KERNEL); + if (!state->planes) + goto fail; + + state->dev = dev; + + DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state); + + return 0; +fail: + drm_atomic_state_default_release(state); + return -ENOMEM; +} +EXPORT_SYMBOL(drm_atomic_state_init); + +/** + * drm_atomic_state_alloc - allocate atomic state + * @dev: DRM device + * + * This allocates an empty atomic state to track updates. + */ +struct drm_atomic_state * +drm_atomic_state_alloc(struct drm_device *dev) +{ + struct drm_mode_config *config = &dev->mode_config; + + if (!config->funcs->atomic_state_alloc) { + struct drm_atomic_state *state; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + if (drm_atomic_state_init(dev, state) < 0) { + kfree(state); + return NULL; + } + return state; + } + + return config->funcs->atomic_state_alloc(dev); +} +EXPORT_SYMBOL(drm_atomic_state_alloc); + +/** + * drm_atomic_state_default_clear - clear base atomic state + * @state: atomic state + * + * Default implementation for clearing atomic state. + * This should only be used by drivers which are still subclassing + * &drm_atomic_state and haven't switched to &drm_private_state yet. + */ +void drm_atomic_state_default_clear(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + struct drm_mode_config *config = &dev->mode_config; + int i; + + DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state); + + for (i = 0; i < state->num_connector; i++) { + struct drm_connector *connector = state->connectors[i].ptr; + + if (!connector) + continue; + + connector->funcs->atomic_destroy_state(connector, + state->connectors[i].state); + state->connectors[i].ptr = NULL; + state->connectors[i].state = NULL; + state->connectors[i].old_state = NULL; + state->connectors[i].new_state = NULL; + drm_connector_put(connector); + } + + for (i = 0; i < config->num_crtc; i++) { + struct drm_crtc *crtc = state->crtcs[i].ptr; + + if (!crtc) + continue; + + crtc->funcs->atomic_destroy_state(crtc, + state->crtcs[i].state); + + state->crtcs[i].ptr = NULL; + state->crtcs[i].state = NULL; + state->crtcs[i].old_state = NULL; + state->crtcs[i].new_state = NULL; + + if (state->crtcs[i].commit) { + drm_crtc_commit_put(state->crtcs[i].commit); + state->crtcs[i].commit = NULL; + } + } + + for (i = 0; i < config->num_total_plane; i++) { + struct drm_plane *plane = state->planes[i].ptr; + + if (!plane) + continue; + + plane->funcs->atomic_destroy_state(plane, + state->planes[i].state); + state->planes[i].ptr = NULL; + state->planes[i].state = NULL; + state->planes[i].old_state = NULL; + state->planes[i].new_state = NULL; + } + + for (i = 0; i < state->num_private_objs; i++) { + struct drm_private_obj *obj = state->private_objs[i].ptr; + + obj->funcs->atomic_destroy_state(obj, + state->private_objs[i].state); + state->private_objs[i].ptr = NULL; + state->private_objs[i].state = NULL; + state->private_objs[i].old_state = NULL; + state->private_objs[i].new_state = NULL; + } + state->num_private_objs = 0; + + if (state->fake_commit) { + drm_crtc_commit_put(state->fake_commit); + state->fake_commit = NULL; + } +} +EXPORT_SYMBOL(drm_atomic_state_default_clear); + +/** + * drm_atomic_state_clear - clear state object + * @state: atomic state + * + * When the w/w mutex algorithm detects a deadlock we need to back off and drop + * all locks. So someone else could sneak in and change the current modeset + * configuration. Which means that all the state assembled in @state is no + * longer an atomic update to the current state, but to some arbitrary earlier + * state. Which could break assumptions the driver's + * &drm_mode_config_funcs.atomic_check likely relies on. + * + * Hence we must clear all cached state and completely start over, using this + * function. + */ +void drm_atomic_state_clear(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + struct drm_mode_config *config = &dev->mode_config; + + if (config->funcs->atomic_state_clear) + config->funcs->atomic_state_clear(state); + else + drm_atomic_state_default_clear(state); +} +EXPORT_SYMBOL(drm_atomic_state_clear); + +/** + * __drm_atomic_state_free - free all memory for an atomic state + * @ref: This atomic state to deallocate + * + * This frees all memory associated with an atomic state, including all the + * per-object state for planes, crtcs and connectors. + */ +void __drm_atomic_state_free(struct kref *ref) +{ + struct drm_atomic_state *state = container_of(ref, typeof(*state), ref); + struct drm_mode_config *config = &state->dev->mode_config; + + drm_atomic_state_clear(state); + + DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state); + + if (config->funcs->atomic_state_free) { + config->funcs->atomic_state_free(state); + } else { + drm_atomic_state_default_release(state); + kfree(state); + } +} +EXPORT_SYMBOL(__drm_atomic_state_free); + +/** + * drm_atomic_get_crtc_state - get crtc state + * @state: global atomic state object + * @crtc: crtc to get state object for + * + * This function returns the crtc state for the given crtc, allocating it if + * needed. It will also grab the relevant crtc lock to make sure that the state + * is consistent. + * + * Returns: + * + * Either the allocated state or the error code encoded into the pointer. When + * the error is EDEADLK then the w/w mutex code has detected a deadlock and the + * entire atomic sequence must be restarted. All other errors are fatal. + */ +struct drm_crtc_state * +drm_atomic_get_crtc_state(struct drm_atomic_state *state, + struct drm_crtc *crtc) +{ + int ret, index = drm_crtc_index(crtc); + struct drm_crtc_state *crtc_state; + + WARN_ON(!state->acquire_ctx); + + crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); + if (crtc_state) + return crtc_state; + + ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx); + if (ret) + return ERR_PTR(ret); + + crtc_state = crtc->funcs->atomic_duplicate_state(crtc); + if (!crtc_state) + return ERR_PTR(-ENOMEM); + + state->crtcs[index].state = crtc_state; + state->crtcs[index].old_state = crtc->state; + state->crtcs[index].new_state = crtc_state; + state->crtcs[index].ptr = crtc; + crtc_state->state = state; + + DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n", + crtc->base.id, crtc->name, crtc_state, state); + + return crtc_state; +} +EXPORT_SYMBOL(drm_atomic_get_crtc_state); + +static int drm_atomic_crtc_check(const struct drm_crtc_state *old_crtc_state, + const struct drm_crtc_state *new_crtc_state) +{ + struct drm_crtc *crtc = new_crtc_state->crtc; + + /* NOTE: we explicitly don't enforce constraints such as primary + * layer covering entire screen, since that is something we want + * to allow (on hw that supports it). For hw that does not, it + * should be checked in driver's crtc->atomic_check() vfunc. + * + * TODO: Add generic modeset state checks once we support those. + */ + + if (new_crtc_state->active && !new_crtc_state->enable) { + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n", + crtc->base.id, crtc->name); + return -EINVAL; + } + + /* The state->enable vs. state->mode_blob checks can be WARN_ON, + * as this is a kernel-internal detail that userspace should never + * be able to trigger. */ + if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && + WARN_ON(new_crtc_state->enable && !new_crtc_state->mode_blob)) { + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n", + crtc->base.id, crtc->name); + return -EINVAL; + } + + if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && + WARN_ON(!new_crtc_state->enable && new_crtc_state->mode_blob)) { + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n", + crtc->base.id, crtc->name); + return -EINVAL; + } + + /* + * Reject event generation for when a CRTC is off and stays off. + * It wouldn't be hard to implement this, but userspace has a track + * record of happily burning through 100% cpu (or worse, crash) when the + * display pipe is suspended. To avoid all that fun just reject updates + * that ask for events since likely that indicates a bug in the + * compositor's drawing loop. This is consistent with the vblank IOCTL + * and legacy page_flip IOCTL which also reject service on a disabled + * pipe. + */ + if (new_crtc_state->event && + !new_crtc_state->active && !old_crtc_state->active) { + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n", + crtc->base.id, crtc->name); + return -EINVAL; + } + + return 0; +} + +static void drm_atomic_crtc_print_state(struct drm_printer *p, + const struct drm_crtc_state *state) +{ + struct drm_crtc *crtc = state->crtc; + + drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name); + drm_printf(p, "\tenable=%d\n", state->enable); + drm_printf(p, "\tactive=%d\n", state->active); + drm_printf(p, "\tself_refresh_active=%d\n", state->self_refresh_active); + drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed); + drm_printf(p, "\tmode_changed=%d\n", state->mode_changed); + drm_printf(p, "\tactive_changed=%d\n", state->active_changed); + drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed); + drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed); + drm_printf(p, "\tplane_mask=%x\n", state->plane_mask); + drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask); + drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask); + drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode)); + + if (crtc->funcs->atomic_print_state) + crtc->funcs->atomic_print_state(p, state); +} + +static int drm_atomic_connector_check(struct drm_connector *connector, + struct drm_connector_state *state) +{ + struct drm_crtc_state *crtc_state; + struct drm_writeback_job *writeback_job = state->writeback_job; + const struct drm_display_info *info = &connector->display_info; + + state->max_bpc = info->bpc ? info->bpc : 8; + if (connector->max_bpc_property) + state->max_bpc = min(state->max_bpc, state->max_requested_bpc); + + if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job) + return 0; + + if (writeback_job->fb && !state->crtc) { + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] framebuffer without CRTC\n", + connector->base.id, connector->name); + return -EINVAL; + } + + if (state->crtc) + crtc_state = drm_atomic_get_existing_crtc_state(state->state, + state->crtc); + + if (writeback_job->fb && !crtc_state->active) { + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n", + connector->base.id, connector->name, + state->crtc->base.id); + return -EINVAL; + } + + if (!writeback_job->fb) { + if (writeback_job->out_fence) { + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n", + connector->base.id, connector->name); + return -EINVAL; + } + + drm_writeback_cleanup_job(writeback_job); + state->writeback_job = NULL; + } + + return 0; +} + +/** + * drm_atomic_get_plane_state - get plane state + * @state: global atomic state object + * @plane: plane to get state object for + * + * This function returns the plane state for the given plane, allocating it if + * needed. It will also grab the relevant plane lock to make sure that the state + * is consistent. + * + * Returns: + * + * Either the allocated state or the error code encoded into the pointer. When + * the error is EDEADLK then the w/w mutex code has detected a deadlock and the + * entire atomic sequence must be restarted. All other errors are fatal. + */ +struct drm_plane_state * +drm_atomic_get_plane_state(struct drm_atomic_state *state, + struct drm_plane *plane) +{ + int ret, index = drm_plane_index(plane); + struct drm_plane_state *plane_state; + + WARN_ON(!state->acquire_ctx); + + /* the legacy pointers should never be set */ + WARN_ON(plane->fb); + WARN_ON(plane->old_fb); + WARN_ON(plane->crtc); + + plane_state = drm_atomic_get_existing_plane_state(state, plane); + if (plane_state) + return plane_state; + + ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx); + if (ret) + return ERR_PTR(ret); + + plane_state = plane->funcs->atomic_duplicate_state(plane); + if (!plane_state) + return ERR_PTR(-ENOMEM); + + state->planes[index].state = plane_state; + state->planes[index].ptr = plane; + state->planes[index].old_state = plane->state; + state->planes[index].new_state = plane_state; + plane_state->state = state; + + DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n", + plane->base.id, plane->name, plane_state, state); + + if (plane_state->crtc) { + struct drm_crtc_state *crtc_state; + + crtc_state = drm_atomic_get_crtc_state(state, + plane_state->crtc); + if (IS_ERR(crtc_state)) + return ERR_CAST(crtc_state); + } + + return plane_state; +} +EXPORT_SYMBOL(drm_atomic_get_plane_state); + +static bool +plane_switching_crtc(const struct drm_plane_state *old_plane_state, + const struct drm_plane_state *new_plane_state) +{ + if (!old_plane_state->crtc || !new_plane_state->crtc) + return false; + + if (old_plane_state->crtc == new_plane_state->crtc) + return false; + + /* This could be refined, but currently there's no helper or driver code + * to implement direct switching of active planes nor userspace to take + * advantage of more direct plane switching without the intermediate + * full OFF state. + */ + return true; +} + +/** + * drm_atomic_plane_check - check plane state + * @old_plane_state: old plane state to check + * @new_plane_state: new plane state to check + * + * Provides core sanity checks for plane state. + * + * RETURNS: + * Zero on success, error code on failure + */ +static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, + const struct drm_plane_state *new_plane_state) +{ + struct drm_plane *plane = new_plane_state->plane; + struct drm_crtc *crtc = new_plane_state->crtc; + const struct drm_framebuffer *fb = new_plane_state->fb; + unsigned int fb_width, fb_height; + struct drm_mode_rect *clips; + uint32_t num_clips; + int ret; + + /* either *both* CRTC and FB must be set, or neither */ + if (crtc && !fb) { + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n", + plane->base.id, plane->name); + return -EINVAL; + } else if (fb && !crtc) { + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n", + plane->base.id, plane->name); + return -EINVAL; + } + + /* if disabled, we don't care about the rest of the state: */ + if (!crtc) + return 0; + + /* Check whether this plane is usable on this CRTC */ + if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) { + DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n", + crtc->base.id, crtc->name, + plane->base.id, plane->name); + return -EINVAL; + } + + /* Check whether this plane supports the fb pixel format. */ + ret = drm_plane_check_pixel_format(plane, fb->format->format, + fb->modifier); + if (ret) { + struct drm_format_name_buf format_name; + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n", + plane->base.id, plane->name, + drm_get_format_name(fb->format->format, + &format_name), + fb->modifier); + return ret; + } + + /* Give drivers some help against integer overflows */ + if (new_plane_state->crtc_w > INT_MAX || + new_plane_state->crtc_x > INT_MAX - (int32_t) new_plane_state->crtc_w || + new_plane_state->crtc_h > INT_MAX || + new_plane_state->crtc_y > INT_MAX - (int32_t) new_plane_state->crtc_h) { + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n", + plane->base.id, plane->name, + new_plane_state->crtc_w, new_plane_state->crtc_h, + new_plane_state->crtc_x, new_plane_state->crtc_y); + return -ERANGE; + } + + fb_width = fb->width << 16; + fb_height = fb->height << 16; + + /* Make sure source coordinates are inside the fb. */ + if (new_plane_state->src_w > fb_width || + new_plane_state->src_x > fb_width - new_plane_state->src_w || + new_plane_state->src_h > fb_height || + new_plane_state->src_y > fb_height - new_plane_state->src_h) { + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates " + "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n", + plane->base.id, plane->name, + new_plane_state->src_w >> 16, + ((new_plane_state->src_w & 0xffff) * 15625) >> 10, + new_plane_state->src_h >> 16, + ((new_plane_state->src_h & 0xffff) * 15625) >> 10, + new_plane_state->src_x >> 16, + ((new_plane_state->src_x & 0xffff) * 15625) >> 10, + new_plane_state->src_y >> 16, + ((new_plane_state->src_y & 0xffff) * 15625) >> 10, + fb->width, fb->height); + return -ENOSPC; + } + + clips = drm_plane_get_damage_clips(new_plane_state); + num_clips = drm_plane_get_damage_clips_count(new_plane_state); + + /* Make sure damage clips are valid and inside the fb. */ + while (num_clips > 0) { + if (clips->x1 >= clips->x2 || + clips->y1 >= clips->y2 || + clips->x1 < 0 || + clips->y1 < 0 || + clips->x2 > fb_width || + clips->y2 > fb_height) { + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid damage clip %d %d %d %d\n", + plane->base.id, plane->name, clips->x1, + clips->y1, clips->x2, clips->y2); + return -EINVAL; + } + clips++; + num_clips--; + } + + if (plane_switching_crtc(old_plane_state, new_plane_state)) { + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n", + plane->base.id, plane->name); + return -EINVAL; + } + + return 0; +} + +static void drm_atomic_plane_print_state(struct drm_printer *p, + const struct drm_plane_state *state) +{ + struct drm_plane *plane = state->plane; + struct drm_rect src = drm_plane_state_src(state); + struct drm_rect dest = drm_plane_state_dest(state); + + drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name); + drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); + drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0); + if (state->fb) + drm_framebuffer_print_info(p, 2, state->fb); + drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest)); + drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src)); + drm_printf(p, "\trotation=%x\n", state->rotation); + drm_printf(p, "\tnormalized-zpos=%x\n", state->normalized_zpos); + drm_printf(p, "\tcolor-encoding=%s\n", + drm_get_color_encoding_name(state->color_encoding)); + drm_printf(p, "\tcolor-range=%s\n", + drm_get_color_range_name(state->color_range)); + + if (plane->funcs->atomic_print_state) + plane->funcs->atomic_print_state(p, state); +} + +/** + * DOC: handling driver private state + * + * Very often the DRM objects exposed to userspace in the atomic modeset api + * (&drm_connector, &drm_crtc and &drm_plane) do not map neatly to the + * underlying hardware. Especially for any kind of shared resources (e.g. shared + * clocks, scaler units, bandwidth and fifo limits shared among a group of + * planes or CRTCs, and so on) it makes sense to model these as independent + * objects. Drivers then need to do similar state tracking and commit ordering for + * such private (since not exposed to userpace) objects as the atomic core and + * helpers already provide for connectors, planes and CRTCs. + * + * To make this easier on drivers the atomic core provides some support to track + * driver private state objects using struct &drm_private_obj, with the + * associated state struct &drm_private_state. + * + * Similar to userspace-exposed objects, private state structures can be + * acquired by calling drm_atomic_get_private_obj_state(). Since this function + * does not take care of locking, drivers should wrap it for each type of + * private state object they have with the required call to drm_modeset_lock() + * for the corresponding &drm_modeset_lock. + * + * All private state structures contained in a &drm_atomic_state update can be + * iterated using for_each_oldnew_private_obj_in_state(), + * for_each_new_private_obj_in_state() and for_each_old_private_obj_in_state(). + * Drivers are recommended to wrap these for each type of driver private state + * object they have, filtering on &drm_private_obj.funcs using for_each_if(), at + * least if they want to iterate over all objects of a given type. + * + * An earlier way to handle driver private state was by subclassing struct + * &drm_atomic_state. But since that encourages non-standard ways to implement + * the check/commit split atomic requires (by using e.g. "check and rollback or + * commit instead" of "duplicate state, check, then either commit or release + * duplicated state) it is deprecated in favour of using &drm_private_state. + */ + +/** + * drm_atomic_private_obj_init - initialize private object + * @dev: DRM device this object will be attached to + * @obj: private object + * @state: initial private object state + * @funcs: pointer to the struct of function pointers that identify the object + * type + * + * Initialize the private object, which can be embedded into any + * driver private object that needs its own atomic state. + */ +void +drm_atomic_private_obj_init(struct drm_device *dev, + struct drm_private_obj *obj, + struct drm_private_state *state, + const struct drm_private_state_funcs *funcs) +{ + memset(obj, 0, sizeof(*obj)); + + drm_modeset_lock_init(&obj->lock); + + obj->state = state; + obj->funcs = funcs; + list_add_tail(&obj->head, &dev->mode_config.privobj_list); +} +EXPORT_SYMBOL(drm_atomic_private_obj_init); + +/** + * drm_atomic_private_obj_fini - finalize private object + * @obj: private object + * + * Finalize the private object. + */ +void +drm_atomic_private_obj_fini(struct drm_private_obj *obj) +{ + list_del(&obj->head); + obj->funcs->atomic_destroy_state(obj, obj->state); + drm_modeset_lock_fini(&obj->lock); +} +EXPORT_SYMBOL(drm_atomic_private_obj_fini); + +/** + * drm_atomic_get_private_obj_state - get private object state + * @state: global atomic state + * @obj: private object to get the state for + * + * This function returns the private object state for the given private object, + * allocating the state if needed. It will also grab the relevant private + * object lock to make sure that the state is consistent. + * + * RETURNS: + * + * Either the allocated state or the error code encoded into a pointer. + */ +struct drm_private_state * +drm_atomic_get_private_obj_state(struct drm_atomic_state *state, + struct drm_private_obj *obj) +{ + int index, num_objs, i, ret; + size_t size; + struct __drm_private_objs_state *arr; + struct drm_private_state *obj_state; + + for (i = 0; i < state->num_private_objs; i++) + if (obj == state->private_objs[i].ptr) + return state->private_objs[i].state; + + ret = drm_modeset_lock(&obj->lock, state->acquire_ctx); + if (ret) + return ERR_PTR(ret); + + num_objs = state->num_private_objs + 1; + size = sizeof(*state->private_objs) * num_objs; + arr = krealloc(state->private_objs, size, GFP_KERNEL); + if (!arr) + return ERR_PTR(-ENOMEM); + + state->private_objs = arr; + index = state->num_private_objs; + memset(&state->private_objs[index], 0, sizeof(*state->private_objs)); + + obj_state = obj->funcs->atomic_duplicate_state(obj); + if (!obj_state) + return ERR_PTR(-ENOMEM); + + state->private_objs[index].state = obj_state; + state->private_objs[index].old_state = obj->state; + state->private_objs[index].new_state = obj_state; + state->private_objs[index].ptr = obj; + obj_state->state = state; + + state->num_private_objs = num_objs; + + DRM_DEBUG_ATOMIC("Added new private object %p state %p to %p\n", + obj, obj_state, state); + + return obj_state; +} +EXPORT_SYMBOL(drm_atomic_get_private_obj_state); + +/** + * drm_atomic_get_old_private_obj_state + * @state: global atomic state object + * @obj: private_obj to grab + * + * This function returns the old private object state for the given private_obj, + * or NULL if the private_obj is not part of the global atomic state. + */ +struct drm_private_state * +drm_atomic_get_old_private_obj_state(struct drm_atomic_state *state, + struct drm_private_obj *obj) +{ + int i; + + for (i = 0; i < state->num_private_objs; i++) + if (obj == state->private_objs[i].ptr) + return state->private_objs[i].old_state; + + return NULL; +} +EXPORT_SYMBOL(drm_atomic_get_old_private_obj_state); + +/** + * drm_atomic_get_new_private_obj_state + * @state: global atomic state object + * @obj: private_obj to grab + * + * This function returns the new private object state for the given private_obj, + * or NULL if the private_obj is not part of the global atomic state. + */ +struct drm_private_state * +drm_atomic_get_new_private_obj_state(struct drm_atomic_state *state, + struct drm_private_obj *obj) +{ + int i; + + for (i = 0; i < state->num_private_objs; i++) + if (obj == state->private_objs[i].ptr) + return state->private_objs[i].new_state; + + return NULL; +} +EXPORT_SYMBOL(drm_atomic_get_new_private_obj_state); + +/** + * drm_atomic_get_old_connector_for_encoder - Get old connector for an encoder + * @state: Atomic state + * @encoder: The encoder to fetch the connector state for + * + * This function finds and returns the connector that was connected to @encoder + * as specified by the @state. + * + * If there is no connector in @state which previously had @encoder connected to + * it, this function will return NULL. While this may seem like an invalid use + * case, it is sometimes useful to differentiate commits which had no prior + * connectors attached to @encoder vs ones that did (and to inspect their + * state). This is especially true in enable hooks because the pipeline has + * changed. + * + * Returns: The old connector connected to @encoder, or NULL if the encoder is + * not connected. + */ +struct drm_connector * +drm_atomic_get_old_connector_for_encoder(struct drm_atomic_state *state, + struct drm_encoder *encoder) +{ + struct drm_connector_state *conn_state; + struct drm_connector *connector; + unsigned int i; + + for_each_old_connector_in_state(state, connector, conn_state, i) { + if (conn_state->best_encoder == encoder) + return connector; + } + + return NULL; +} +EXPORT_SYMBOL(drm_atomic_get_old_connector_for_encoder); + +/** + * drm_atomic_get_new_connector_for_encoder - Get new connector for an encoder + * @state: Atomic state + * @encoder: The encoder to fetch the connector state for + * + * This function finds and returns the connector that will be connected to + * @encoder as specified by the @state. + * + * If there is no connector in @state which will have @encoder connected to it, + * this function will return NULL. While this may seem like an invalid use case, + * it is sometimes useful to differentiate commits which have no connectors + * attached to @encoder vs ones that do (and to inspect their state). This is + * especially true in disable hooks because the pipeline will change. + * + * Returns: The new connector connected to @encoder, or NULL if the encoder is + * not connected. + */ +struct drm_connector * +drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state *state, + struct drm_encoder *encoder) +{ + struct drm_connector_state *conn_state; + struct drm_connector *connector; + unsigned int i; + + for_each_new_connector_in_state(state, connector, conn_state, i) { + if (conn_state->best_encoder == encoder) + return connector; + } + + return NULL; +} +EXPORT_SYMBOL(drm_atomic_get_new_connector_for_encoder); + +/** + * drm_atomic_get_connector_state - get connector state + * @state: global atomic state object + * @connector: connector to get state object for + * + * This function returns the connector state for the given connector, + * allocating it if needed. It will also grab the relevant connector lock to + * make sure that the state is consistent. + * + * Returns: + * + * Either the allocated state or the error code encoded into the pointer. When + * the error is EDEADLK then the w/w mutex code has detected a deadlock and the + * entire atomic sequence must be restarted. All other errors are fatal. + */ +struct drm_connector_state * +drm_atomic_get_connector_state(struct drm_atomic_state *state, + struct drm_connector *connector) +{ + int ret, index; + struct drm_mode_config *config = &connector->dev->mode_config; + struct drm_connector_state *connector_state; + + WARN_ON(!state->acquire_ctx); + + ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); + if (ret) + return ERR_PTR(ret); + + index = drm_connector_index(connector); + + if (index >= state->num_connector) { + struct __drm_connnectors_state *c; + int alloc = max(index + 1, config->num_connector); + + c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL); + if (!c) + return ERR_PTR(-ENOMEM); + + state->connectors = c; + memset(&state->connectors[state->num_connector], 0, + sizeof(*state->connectors) * (alloc - state->num_connector)); + + state->num_connector = alloc; + } + + if (state->connectors[index].state) + return state->connectors[index].state; + + connector_state = connector->funcs->atomic_duplicate_state(connector); + if (!connector_state) + return ERR_PTR(-ENOMEM); + + drm_connector_get(connector); + state->connectors[index].state = connector_state; + state->connectors[index].old_state = connector->state; + state->connectors[index].new_state = connector_state; + state->connectors[index].ptr = connector; + connector_state->state = state; + + DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d:%s] %p state to %p\n", + connector->base.id, connector->name, + connector_state, state); + + if (connector_state->crtc) { + struct drm_crtc_state *crtc_state; + + crtc_state = drm_atomic_get_crtc_state(state, + connector_state->crtc); + if (IS_ERR(crtc_state)) + return ERR_CAST(crtc_state); + } + + return connector_state; +} +EXPORT_SYMBOL(drm_atomic_get_connector_state); + +static void drm_atomic_connector_print_state(struct drm_printer *p, + const struct drm_connector_state *state) +{ + struct drm_connector *connector = state->connector; + + drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name); + drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); + drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware); + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + if (state->writeback_job && state->writeback_job->fb) + drm_printf(p, "\tfb=%d\n", state->writeback_job->fb->base.id); + + if (connector->funcs->atomic_print_state) + connector->funcs->atomic_print_state(p, state); +} + +/** + * drm_atomic_add_affected_connectors - add connectors for crtc + * @state: atomic state + * @crtc: DRM crtc + * + * This function walks the current configuration and adds all connectors + * currently using @crtc to the atomic configuration @state. Note that this + * function must acquire the connection mutex. This can potentially cause + * unneeded seralization if the update is just for the planes on one crtc. Hence + * drivers and helpers should only call this when really needed (e.g. when a + * full modeset needs to happen due to some change). + * + * Returns: + * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK + * then the w/w mutex code has detected a deadlock and the entire atomic + * sequence must be restarted. All other errors are fatal. + */ +int +drm_atomic_add_affected_connectors(struct drm_atomic_state *state, + struct drm_crtc *crtc) +{ + struct drm_mode_config *config = &state->dev->mode_config; + struct drm_connector *connector; + struct drm_connector_state *conn_state; + struct drm_connector_list_iter conn_iter; + struct drm_crtc_state *crtc_state; + int ret; + + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); + if (ret) + return ret; + + DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n", + crtc->base.id, crtc->name, state); + + /* + * Changed connectors are already in @state, so only need to look + * at the connector_mask in crtc_state. + */ + drm_connector_list_iter_begin(state->dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (!(crtc_state->connector_mask & drm_connector_mask(connector))) + continue; + + conn_state = drm_atomic_get_connector_state(state, connector); + if (IS_ERR(conn_state)) { + drm_connector_list_iter_end(&conn_iter); + return PTR_ERR(conn_state); + } + } + drm_connector_list_iter_end(&conn_iter); + + return 0; +} +EXPORT_SYMBOL(drm_atomic_add_affected_connectors); + +/** + * drm_atomic_add_affected_planes - add planes for crtc + * @state: atomic state + * @crtc: DRM crtc + * + * This function walks the current configuration and adds all planes + * currently used by @crtc to the atomic configuration @state. This is useful + * when an atomic commit also needs to check all currently enabled plane on + * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC + * to avoid special code to force-enable all planes. + * + * Since acquiring a plane state will always also acquire the w/w mutex of the + * current CRTC for that plane (if there is any) adding all the plane states for + * a CRTC will not reduce parallism of atomic updates. + * + * Returns: + * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK + * then the w/w mutex code has detected a deadlock and the entire atomic + * sequence must be restarted. All other errors are fatal. + */ +int +drm_atomic_add_affected_planes(struct drm_atomic_state *state, + struct drm_crtc *crtc) +{ + const struct drm_crtc_state *old_crtc_state = + drm_atomic_get_old_crtc_state(state, crtc); + struct drm_plane *plane; + + WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc)); + + DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n", + crtc->base.id, crtc->name, state); + + drm_for_each_plane_mask(plane, state->dev, old_crtc_state->plane_mask) { + struct drm_plane_state *plane_state = + drm_atomic_get_plane_state(state, plane); + + if (IS_ERR(plane_state)) + return PTR_ERR(plane_state); + } + return 0; +} +EXPORT_SYMBOL(drm_atomic_add_affected_planes); + +/** + * drm_atomic_check_only - check whether a given config would work + * @state: atomic configuration to check + * + * Note that this function can return -EDEADLK if the driver needed to acquire + * more locks but encountered a deadlock. The caller must then do the usual w/w + * backoff dance and restart. All other errors are fatal. + * + * Returns: + * 0 on success, negative error code on failure. + */ +int drm_atomic_check_only(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + struct drm_mode_config *config = &dev->mode_config; + struct drm_plane *plane; + struct drm_plane_state *old_plane_state; + struct drm_plane_state *new_plane_state; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; + struct drm_crtc_state *new_crtc_state; + struct drm_connector *conn; + struct drm_connector_state *conn_state; + int i, ret = 0; + + DRM_DEBUG_ATOMIC("checking %p\n", state); + + for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { + ret = drm_atomic_plane_check(old_plane_state, new_plane_state); + if (ret) { + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n", + plane->base.id, plane->name); + return ret; + } + } + + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + ret = drm_atomic_crtc_check(old_crtc_state, new_crtc_state); + if (ret) { + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n", + crtc->base.id, crtc->name); + return ret; + } + } + + for_each_new_connector_in_state(state, conn, conn_state, i) { + ret = drm_atomic_connector_check(conn, conn_state); + if (ret) { + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] atomic core check failed\n", + conn->base.id, conn->name); + return ret; + } + } + + if (config->funcs->atomic_check) { + ret = config->funcs->atomic_check(state->dev, state); + + if (ret) { + DRM_DEBUG_ATOMIC("atomic driver check for %p failed: %d\n", + state, ret); + return ret; + } + } + + if (!state->allow_modeset) { + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n", + crtc->base.id, crtc->name); + return -EINVAL; + } + } + } + + return 0; +} +EXPORT_SYMBOL(drm_atomic_check_only); + +/** + * drm_atomic_commit - commit configuration atomically + * @state: atomic configuration to check + * + * Note that this function can return -EDEADLK if the driver needed to acquire + * more locks but encountered a deadlock. The caller must then do the usual w/w + * backoff dance and restart. All other errors are fatal. + * + * This function will take its own reference on @state. + * Callers should always release their reference with drm_atomic_state_put(). + * + * Returns: + * 0 on success, negative error code on failure. + */ +int drm_atomic_commit(struct drm_atomic_state *state) +{ + struct drm_mode_config *config = &state->dev->mode_config; + int ret; + + ret = drm_atomic_check_only(state); + if (ret) + return ret; + + DRM_DEBUG_ATOMIC("committing %p\n", state); + + return config->funcs->atomic_commit(state->dev, state, false); +} +EXPORT_SYMBOL(drm_atomic_commit); + +/** + * drm_atomic_nonblocking_commit - atomic nonblocking commit + * @state: atomic configuration to check + * + * Note that this function can return -EDEADLK if the driver needed to acquire + * more locks but encountered a deadlock. The caller must then do the usual w/w + * backoff dance and restart. All other errors are fatal. + * + * This function will take its own reference on @state. + * Callers should always release their reference with drm_atomic_state_put(). + * + * Returns: + * 0 on success, negative error code on failure. + */ +int drm_atomic_nonblocking_commit(struct drm_atomic_state *state) +{ + struct drm_mode_config *config = &state->dev->mode_config; + int ret; + + ret = drm_atomic_check_only(state); + if (ret) + return ret; + + DRM_DEBUG_ATOMIC("committing %p nonblocking\n", state); + + return config->funcs->atomic_commit(state->dev, state, true); +} +EXPORT_SYMBOL(drm_atomic_nonblocking_commit); + +/* just used from drm-client and atomic-helper: */ +int __drm_atomic_helper_disable_plane(struct drm_plane *plane, + struct drm_plane_state *plane_state) +{ + int ret; + + ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); + if (ret != 0) + return ret; + + drm_atomic_set_fb_for_plane(plane_state, NULL); + plane_state->crtc_x = 0; + plane_state->crtc_y = 0; + plane_state->crtc_w = 0; + plane_state->crtc_h = 0; + plane_state->src_x = 0; + plane_state->src_y = 0; + plane_state->src_w = 0; + plane_state->src_h = 0; + + return 0; +} +EXPORT_SYMBOL(__drm_atomic_helper_disable_plane); + +static int update_output_state(struct drm_atomic_state *state, + struct drm_mode_set *set) +{ + struct drm_device *dev = set->crtc->dev; + struct drm_crtc *crtc; + struct drm_crtc_state *new_crtc_state; + struct drm_connector *connector; + struct drm_connector_state *new_conn_state; + int ret, i; + + ret = drm_modeset_lock(&dev->mode_config.connection_mutex, + state->acquire_ctx); + if (ret) + return ret; + + /* First disable all connectors on the target crtc. */ + ret = drm_atomic_add_affected_connectors(state, set->crtc); + if (ret) + return ret; + + for_each_new_connector_in_state(state, connector, new_conn_state, i) { + if (new_conn_state->crtc == set->crtc) { + ret = drm_atomic_set_crtc_for_connector(new_conn_state, + NULL); + if (ret) + return ret; + + /* Make sure legacy setCrtc always re-trains */ + new_conn_state->link_status = DRM_LINK_STATUS_GOOD; + } + } + + /* Then set all connectors from set->connectors on the target crtc */ + for (i = 0; i < set->num_connectors; i++) { + new_conn_state = drm_atomic_get_connector_state(state, + set->connectors[i]); + if (IS_ERR(new_conn_state)) + return PTR_ERR(new_conn_state); + + ret = drm_atomic_set_crtc_for_connector(new_conn_state, + set->crtc); + if (ret) + return ret; + } + + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + /* + * Don't update ->enable for the CRTC in the set_config request, + * since a mismatch would indicate a bug in the upper layers. + * The actual modeset code later on will catch any + * inconsistencies here. + */ + if (crtc == set->crtc) + continue; + + if (!new_crtc_state->connector_mask) { + ret = drm_atomic_set_mode_prop_for_crtc(new_crtc_state, + NULL); + if (ret < 0) + return ret; + + new_crtc_state->active = false; + } + } + + return 0; +} + +/* just used from drm-client and atomic-helper: */ +int __drm_atomic_helper_set_config(struct drm_mode_set *set, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state; + struct drm_plane_state *primary_state; + struct drm_crtc *crtc = set->crtc; + int hdisplay, vdisplay; + int ret; + + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + primary_state = drm_atomic_get_plane_state(state, crtc->primary); + if (IS_ERR(primary_state)) + return PTR_ERR(primary_state); + + if (!set->mode) { + WARN_ON(set->fb); + WARN_ON(set->num_connectors); + + ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL); + if (ret != 0) + return ret; + + crtc_state->active = false; + + ret = drm_atomic_set_crtc_for_plane(primary_state, NULL); + if (ret != 0) + return ret; + + drm_atomic_set_fb_for_plane(primary_state, NULL); + + goto commit; + } + + WARN_ON(!set->fb); + WARN_ON(!set->num_connectors); + + ret = drm_atomic_set_mode_for_crtc(crtc_state, set->mode); + if (ret != 0) + return ret; + + crtc_state->active = true; + + ret = drm_atomic_set_crtc_for_plane(primary_state, crtc); + if (ret != 0) + return ret; + + drm_mode_get_hv_timing(set->mode, &hdisplay, &vdisplay); + + drm_atomic_set_fb_for_plane(primary_state, set->fb); + primary_state->crtc_x = 0; + primary_state->crtc_y = 0; + primary_state->crtc_w = hdisplay; + primary_state->crtc_h = vdisplay; + primary_state->src_x = set->x << 16; + primary_state->src_y = set->y << 16; + if (drm_rotation_90_or_270(primary_state->rotation)) { + primary_state->src_w = vdisplay << 16; + primary_state->src_h = hdisplay << 16; + } else { + primary_state->src_w = hdisplay << 16; + primary_state->src_h = vdisplay << 16; + } + +commit: + ret = update_output_state(state, set); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL(__drm_atomic_helper_set_config); + +void drm_atomic_print_state(const struct drm_atomic_state *state) +{ + struct drm_printer p = drm_info_printer(state->dev->dev); + struct drm_plane *plane; + struct drm_plane_state *plane_state; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_connector *connector; + struct drm_connector_state *connector_state; + int i; + + DRM_DEBUG_ATOMIC("checking %p\n", state); + + for_each_new_plane_in_state(state, plane, plane_state, i) + drm_atomic_plane_print_state(&p, plane_state); + + for_each_new_crtc_in_state(state, crtc, crtc_state, i) + drm_atomic_crtc_print_state(&p, crtc_state); + + for_each_new_connector_in_state(state, connector, connector_state, i) + drm_atomic_connector_print_state(&p, connector_state); +} + +static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, + bool take_locks) +{ + struct drm_mode_config *config = &dev->mode_config; + struct drm_plane *plane; + struct drm_crtc *crtc; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + + if (!drm_drv_uses_atomic_modeset(dev)) + return; + + list_for_each_entry(plane, &config->plane_list, head) { + if (take_locks) + drm_modeset_lock(&plane->mutex, NULL); + drm_atomic_plane_print_state(p, plane->state); + if (take_locks) + drm_modeset_unlock(&plane->mutex); + } + + list_for_each_entry(crtc, &config->crtc_list, head) { + if (take_locks) + drm_modeset_lock(&crtc->mutex, NULL); + drm_atomic_crtc_print_state(p, crtc->state); + if (take_locks) + drm_modeset_unlock(&crtc->mutex); + } + + drm_connector_list_iter_begin(dev, &conn_iter); + if (take_locks) + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + drm_for_each_connector_iter(connector, &conn_iter) + drm_atomic_connector_print_state(p, connector->state); + if (take_locks) + drm_modeset_unlock(&dev->mode_config.connection_mutex); + drm_connector_list_iter_end(&conn_iter); +} + +/** + * drm_state_dump - dump entire device atomic state + * @dev: the drm device + * @p: where to print the state to + * + * Just for debugging. Drivers might want an option to dump state + * to dmesg in case of error irq's. (Hint, you probably want to + * ratelimit this!) + * + * The caller must drm_modeset_lock_all(), or if this is called + * from error irq handler, it should not be enabled by default. + * (Ie. if you are debugging errors you might not care that this + * is racey. But calling this without all modeset locks held is + * not inherently safe.) + */ +void drm_state_dump(struct drm_device *dev, struct drm_printer *p) +{ + __drm_state_dump(dev, p, false); +} +EXPORT_SYMBOL(drm_state_dump); + +#ifdef CONFIG_DEBUG_FS +static int drm_state_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_printer p = drm_seq_file_printer(m); + + __drm_state_dump(dev, &p, true); + + return 0; +} + +/* any use in debugfs files to dump individual planes/crtc/etc? */ +static const struct drm_info_list drm_atomic_debugfs_list[] = { + {"state", drm_state_info, 0}, +}; + +int drm_atomic_debugfs_init(struct drm_minor *minor) +{ + return drm_debugfs_create_files(drm_atomic_debugfs_list, + ARRAY_SIZE(drm_atomic_debugfs_list), + minor->debugfs_root, minor); +} +#endif Index: sys/dev/drm/core/drm_atomic_helper.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_atomic_helper.c @@ -0,0 +1,3464 @@ +/* + * Copyright (C) 2014 Red Hat + * Copyright (C) 2014 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark + * Daniel Vetter + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "drm_crtc_helper_internal.h" +#include "drm_crtc_internal.h" + +/** + * DOC: overview + * + * This helper library provides implementations of check and commit functions on + * top of the CRTC modeset helper callbacks and the plane helper callbacks. It + * also provides convenience implementations for the atomic state handling + * callbacks for drivers which don't need to subclass the drm core structures to + * add their own additional internal state. + * + * This library also provides default implementations for the check callback in + * drm_atomic_helper_check() and for the commit callback with + * drm_atomic_helper_commit(). But the individual stages and callbacks are + * exposed to allow drivers to mix and match and e.g. use the plane helpers only + * together with a driver private modeset implementation. + * + * This library also provides implementations for all the legacy driver + * interfaces on top of the atomic interface. See drm_atomic_helper_set_config(), + * drm_atomic_helper_disable_plane(), drm_atomic_helper_disable_plane() and the + * various functions to implement set_property callbacks. New drivers must not + * implement these functions themselves but must use the provided helpers. + * + * The atomic helper uses the same function table structures as all other + * modesetting helpers. See the documentation for &struct drm_crtc_helper_funcs, + * struct &drm_encoder_helper_funcs and &struct drm_connector_helper_funcs. It + * also shares the &struct drm_plane_helper_funcs function table with the plane + * helpers. + */ +static void +drm_atomic_helper_plane_changed(struct drm_atomic_state *state, + struct drm_plane_state *old_plane_state, + struct drm_plane_state *plane_state, + struct drm_plane *plane) +{ + struct drm_crtc_state *crtc_state; + + if (old_plane_state->crtc) { + crtc_state = drm_atomic_get_new_crtc_state(state, + old_plane_state->crtc); + + if (WARN_ON(!crtc_state)) + return; + + crtc_state->planes_changed = true; + } + + if (plane_state->crtc) { + crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc); + + if (WARN_ON(!crtc_state)) + return; + + crtc_state->planes_changed = true; + } +} + +/* + * For connectors that support multiple encoders, either the + * .atomic_best_encoder() or .best_encoder() operation must be implemented. + */ +static struct drm_encoder * +pick_single_encoder_for_connector(struct drm_connector *connector) +{ + WARN_ON(connector->encoder_ids[1]); + return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]); +} + +static int handle_conflicting_encoders(struct drm_atomic_state *state, + bool disable_conflicting_encoders) +{ + struct drm_connector_state *new_conn_state; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct drm_encoder *encoder; + unsigned encoder_mask = 0; + int i, ret = 0; + + /* + * First loop, find all newly assigned encoders from the connectors + * part of the state. If the same encoder is assigned to multiple + * connectors bail out. + */ + for_each_new_connector_in_state(state, connector, new_conn_state, i) { + const struct drm_connector_helper_funcs *funcs = connector->helper_private; + struct drm_encoder *new_encoder; + + if (!new_conn_state->crtc) + continue; + + if (funcs->atomic_best_encoder) + new_encoder = funcs->atomic_best_encoder(connector, new_conn_state); + else if (funcs->best_encoder) + new_encoder = funcs->best_encoder(connector); + else + new_encoder = pick_single_encoder_for_connector(connector); + + if (new_encoder) { + if (encoder_mask & drm_encoder_mask(new_encoder)) { + DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n", + new_encoder->base.id, new_encoder->name, + connector->base.id, connector->name); + + return -EINVAL; + } + + encoder_mask |= drm_encoder_mask(new_encoder); + } + } + + if (!encoder_mask) + return 0; + + /* + * Second loop, iterate over all connectors not part of the state. + * + * If a conflicting encoder is found and disable_conflicting_encoders + * is not set, an error is returned. Userspace can provide a solution + * through the atomic ioctl. + * + * If the flag is set conflicting connectors are removed from the crtc + * and the crtc is disabled if no encoder is left. This preserves + * compatibility with the legacy set_config behavior. + */ + drm_connector_list_iter_begin(state->dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + struct drm_crtc_state *crtc_state; + + if (drm_atomic_get_new_connector_state(state, connector)) + continue; + + encoder = connector->state->best_encoder; + if (!encoder || !(encoder_mask & drm_encoder_mask(encoder))) + continue; + + if (!disable_conflicting_encoders) { + DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n", + encoder->base.id, encoder->name, + connector->state->crtc->base.id, + connector->state->crtc->name, + connector->base.id, connector->name); + ret = -EINVAL; + goto out; + } + + new_conn_state = drm_atomic_get_connector_state(state, connector); + if (IS_ERR(new_conn_state)) { + ret = PTR_ERR(new_conn_state); + goto out; + } + + DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n", + encoder->base.id, encoder->name, + new_conn_state->crtc->base.id, new_conn_state->crtc->name, + connector->base.id, connector->name); + + crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); + + ret = drm_atomic_set_crtc_for_connector(new_conn_state, NULL); + if (ret) + goto out; + + if (!crtc_state->connector_mask) { + ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, + NULL); + if (ret < 0) + goto out; + + crtc_state->active = false; + } + } +out: + drm_connector_list_iter_end(&conn_iter); + + return ret; +} + +static void +set_best_encoder(struct drm_atomic_state *state, + struct drm_connector_state *conn_state, + struct drm_encoder *encoder) +{ + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + + if (conn_state->best_encoder) { + /* Unset the encoder_mask in the old crtc state. */ + crtc = conn_state->connector->state->crtc; + + /* A NULL crtc is an error here because we should have + * duplicated a NULL best_encoder when crtc was NULL. + * As an exception restoring duplicated atomic state + * during resume is allowed, so don't warn when + * best_encoder is equal to encoder we intend to set. + */ + WARN_ON(!crtc && encoder != conn_state->best_encoder); + if (crtc) { + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + + crtc_state->encoder_mask &= + ~drm_encoder_mask(conn_state->best_encoder); + } + } + + if (encoder) { + crtc = conn_state->crtc; + WARN_ON(!crtc); + if (crtc) { + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + + crtc_state->encoder_mask |= + drm_encoder_mask(encoder); + } + } + + conn_state->best_encoder = encoder; +} + +static void +steal_encoder(struct drm_atomic_state *state, + struct drm_encoder *encoder) +{ + struct drm_crtc_state *crtc_state; + struct drm_connector *connector; + struct drm_connector_state *old_connector_state, *new_connector_state; + int i; + + for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) { + struct drm_crtc *encoder_crtc; + + if (new_connector_state->best_encoder != encoder) + continue; + + encoder_crtc = old_connector_state->crtc; + + DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n", + encoder->base.id, encoder->name, + encoder_crtc->base.id, encoder_crtc->name); + + set_best_encoder(state, new_connector_state, NULL); + + crtc_state = drm_atomic_get_new_crtc_state(state, encoder_crtc); + crtc_state->connectors_changed = true; + + return; + } +} + +static int +update_connector_routing(struct drm_atomic_state *state, + struct drm_connector *connector, + struct drm_connector_state *old_connector_state, + struct drm_connector_state *new_connector_state) +{ + const struct drm_connector_helper_funcs *funcs; + struct drm_encoder *new_encoder; + struct drm_crtc_state *crtc_state; + + DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n", + connector->base.id, + connector->name); + + if (old_connector_state->crtc != new_connector_state->crtc) { + if (old_connector_state->crtc) { + crtc_state = drm_atomic_get_new_crtc_state(state, old_connector_state->crtc); + crtc_state->connectors_changed = true; + } + + if (new_connector_state->crtc) { + crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc); + crtc_state->connectors_changed = true; + } + } + + if (!new_connector_state->crtc) { + DRM_DEBUG_ATOMIC("Disabling [CONNECTOR:%d:%s]\n", + connector->base.id, + connector->name); + + set_best_encoder(state, new_connector_state, NULL); + + return 0; + } + + crtc_state = drm_atomic_get_new_crtc_state(state, + new_connector_state->crtc); + /* + * For compatibility with legacy users, we want to make sure that + * we allow DPMS On->Off modesets on unregistered connectors. Modesets + * which would result in anything else must be considered invalid, to + * avoid turning on new displays on dead connectors. + * + * Since the connector can be unregistered at any point during an + * atomic check or commit, this is racy. But that's OK: all we care + * about is ensuring that userspace can't do anything but shut off the + * display on a connector that was destroyed after it's been notified, + * not before. + * + * Additionally, we also want to ignore connector registration when + * we're trying to restore an atomic state during system resume since + * there's a chance the connector may have been destroyed during the + * process, but it's better to ignore that then cause + * drm_atomic_helper_resume() to fail. + */ + if (!state->duplicated && drm_connector_is_unregistered(connector) && + crtc_state->active) { + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] is not registered\n", + connector->base.id, connector->name); + return -EINVAL; + } + + funcs = connector->helper_private; + + if (funcs->atomic_best_encoder) + new_encoder = funcs->atomic_best_encoder(connector, + new_connector_state); + else if (funcs->best_encoder) + new_encoder = funcs->best_encoder(connector); + else + new_encoder = pick_single_encoder_for_connector(connector); + + if (!new_encoder) { + DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n", + connector->base.id, + connector->name); + return -EINVAL; + } + + if (!drm_encoder_crtc_ok(new_encoder, new_connector_state->crtc)) { + DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n", + new_encoder->base.id, + new_encoder->name, + new_connector_state->crtc->base.id, + new_connector_state->crtc->name); + return -EINVAL; + } + + if (new_encoder == new_connector_state->best_encoder) { + set_best_encoder(state, new_connector_state, new_encoder); + + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n", + connector->base.id, + connector->name, + new_encoder->base.id, + new_encoder->name, + new_connector_state->crtc->base.id, + new_connector_state->crtc->name); + + return 0; + } + + steal_encoder(state, new_encoder); + + set_best_encoder(state, new_connector_state, new_encoder); + + crtc_state->connectors_changed = true; + + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n", + connector->base.id, + connector->name, + new_encoder->base.id, + new_encoder->name, + new_connector_state->crtc->base.id, + new_connector_state->crtc->name); + + return 0; +} + +static int +mode_fixup(struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *new_crtc_state; + struct drm_connector *connector; + struct drm_connector_state *new_conn_state; + int i; + int ret; + + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + if (!new_crtc_state->mode_changed && + !new_crtc_state->connectors_changed) + continue; + + drm_mode_copy(&new_crtc_state->adjusted_mode, &new_crtc_state->mode); + } + + for_each_new_connector_in_state(state, connector, new_conn_state, i) { + const struct drm_encoder_helper_funcs *funcs; + struct drm_encoder *encoder; + + WARN_ON(!!new_conn_state->best_encoder != !!new_conn_state->crtc); + + if (!new_conn_state->crtc || !new_conn_state->best_encoder) + continue; + + new_crtc_state = + drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); + + /* + * Each encoder has at most one connector (since we always steal + * it away), so we won't call ->mode_fixup twice. + */ + encoder = new_conn_state->best_encoder; + funcs = encoder->helper_private; + + ret = drm_bridge_mode_fixup(encoder->bridge, &new_crtc_state->mode, + &new_crtc_state->adjusted_mode); + if (!ret) { + DRM_DEBUG_ATOMIC("Bridge fixup failed\n"); + return -EINVAL; + } + + if (funcs && funcs->atomic_check) { + ret = funcs->atomic_check(encoder, new_crtc_state, + new_conn_state); + if (ret) { + DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] check failed\n", + encoder->base.id, encoder->name); + return ret; + } + } else if (funcs && funcs->mode_fixup) { + ret = funcs->mode_fixup(encoder, &new_crtc_state->mode, + &new_crtc_state->adjusted_mode); + if (!ret) { + DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] fixup failed\n", + encoder->base.id, encoder->name); + return -EINVAL; + } + } + } + + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + const struct drm_crtc_helper_funcs *funcs; + + if (!new_crtc_state->enable) + continue; + + if (!new_crtc_state->mode_changed && + !new_crtc_state->connectors_changed) + continue; + + funcs = crtc->helper_private; + if (!funcs->mode_fixup) + continue; + + ret = funcs->mode_fixup(crtc, &new_crtc_state->mode, + &new_crtc_state->adjusted_mode); + if (!ret) { + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] fixup failed\n", + crtc->base.id, crtc->name); + return -EINVAL; + } + } + + return 0; +} + +static enum drm_mode_status mode_valid_path(struct drm_connector *connector, + struct drm_encoder *encoder, + struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + enum drm_mode_status ret; + + ret = drm_encoder_mode_valid(encoder, mode); + if (ret != MODE_OK) { + DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] mode_valid() failed\n", + encoder->base.id, encoder->name); + return ret; + } + + ret = drm_bridge_mode_valid(encoder->bridge, mode); + if (ret != MODE_OK) { + DRM_DEBUG_ATOMIC("[BRIDGE] mode_valid() failed\n"); + return ret; + } + + ret = drm_crtc_mode_valid(crtc, mode); + if (ret != MODE_OK) { + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode_valid() failed\n", + crtc->base.id, crtc->name); + return ret; + } + + return ret; +} + +static int +mode_valid(struct drm_atomic_state *state) +{ + struct drm_connector_state *conn_state; + struct drm_connector *connector; + int i; + + for_each_new_connector_in_state(state, connector, conn_state, i) { + struct drm_encoder *encoder = conn_state->best_encoder; + struct drm_crtc *crtc = conn_state->crtc; + struct drm_crtc_state *crtc_state; + enum drm_mode_status mode_status; + const struct drm_display_mode *mode; + + if (!crtc || !encoder) + continue; + + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + if (!crtc_state) + continue; + if (!crtc_state->mode_changed && !crtc_state->connectors_changed) + continue; + + mode = &crtc_state->mode; + + mode_status = mode_valid_path(connector, encoder, crtc, mode); + if (mode_status != MODE_OK) + return -EINVAL; + } + + return 0; +} + +/** + * drm_atomic_helper_check_modeset - validate state object for modeset changes + * @dev: DRM device + * @state: the driver state object + * + * Check the state object to see if the requested state is physically possible. + * This does all the crtc and connector related computations for an atomic + * update and adds any additional connectors needed for full modesets. It calls + * the various per-object callbacks in the follow order: + * + * 1. &drm_connector_helper_funcs.atomic_best_encoder for determining the new encoder. + * 2. &drm_connector_helper_funcs.atomic_check to validate the connector state. + * 3. If it's determined a modeset is needed then all connectors on the affected crtc + * crtc are added and &drm_connector_helper_funcs.atomic_check is run on them. + * 4. &drm_encoder_helper_funcs.mode_valid, &drm_bridge_funcs.mode_valid and + * &drm_crtc_helper_funcs.mode_valid are called on the affected components. + * 5. &drm_bridge_funcs.mode_fixup is called on all encoder bridges. + * 6. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state. + * This function is only called when the encoder will be part of a configured crtc, + * it must not be used for implementing connector property validation. + * If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called + * instead. + * 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with crtc constraints. + * + * &drm_crtc_state.mode_changed is set when the input mode is changed. + * &drm_crtc_state.connectors_changed is set when a connector is added or + * removed from the crtc. &drm_crtc_state.active_changed is set when + * &drm_crtc_state.active changes, which is used for DPMS. + * See also: drm_atomic_crtc_needs_modeset() + * + * IMPORTANT: + * + * Drivers which set &drm_crtc_state.mode_changed (e.g. in their + * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done + * without a full modeset) _must_ call this function afterwards after that + * change. It is permitted to call this function multiple times for the same + * update, e.g. when the &drm_crtc_helper_funcs.atomic_check functions depend + * upon the adjusted dotclock for fifo space allocation and watermark + * computation. + * + * RETURNS: + * Zero for success or -errno + */ +int +drm_atomic_helper_check_modeset(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct drm_connector *connector; + struct drm_connector_state *old_connector_state, *new_connector_state; + int i, ret; + unsigned connectors_mask = 0; + + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + bool has_connectors = + !!new_crtc_state->connector_mask; + + WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); + + if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) { + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n", + crtc->base.id, crtc->name); + new_crtc_state->mode_changed = true; + } + + if (old_crtc_state->enable != new_crtc_state->enable) { + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enable changed\n", + crtc->base.id, crtc->name); + + /* + * For clarity this assignment is done here, but + * enable == 0 is only true when there are no + * connectors and a NULL mode. + * + * The other way around is true as well. enable != 0 + * iff connectors are attached and a mode is set. + */ + new_crtc_state->mode_changed = true; + new_crtc_state->connectors_changed = true; + } + + if (old_crtc_state->active != new_crtc_state->active) { + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active changed\n", + crtc->base.id, crtc->name); + new_crtc_state->active_changed = true; + } + + if (new_crtc_state->enable != has_connectors) { + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n", + crtc->base.id, crtc->name); + + return -EINVAL; + } + } + + ret = handle_conflicting_encoders(state, false); + if (ret) + return ret; + + for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) { + const struct drm_connector_helper_funcs *funcs = connector->helper_private; + + WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + + /* + * This only sets crtc->connectors_changed for routing changes, + * drivers must set crtc->connectors_changed themselves when + * connector properties need to be updated. + */ + ret = update_connector_routing(state, connector, + old_connector_state, + new_connector_state); + if (ret) + return ret; + if (old_connector_state->crtc) { + new_crtc_state = drm_atomic_get_new_crtc_state(state, + old_connector_state->crtc); + if (old_connector_state->link_status != + new_connector_state->link_status) + new_crtc_state->connectors_changed = true; + + if (old_connector_state->max_requested_bpc != + new_connector_state->max_requested_bpc) + new_crtc_state->connectors_changed = true; + } + + if (funcs->atomic_check) + ret = funcs->atomic_check(connector, state); + if (ret) + return ret; + + connectors_mask |= BIT(i); + } + + /* + * After all the routing has been prepared we need to add in any + * connector which is itself unchanged, but whose crtc changes its + * configuration. This must be done before calling mode_fixup in case a + * crtc only changed its mode but has the same set of connectors. + */ + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) + continue; + + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n", + crtc->base.id, crtc->name, + new_crtc_state->enable ? 'y' : 'n', + new_crtc_state->active ? 'y' : 'n'); + + ret = drm_atomic_add_affected_connectors(state, crtc); + if (ret != 0) + return ret; + + ret = drm_atomic_add_affected_planes(state, crtc); + if (ret != 0) + return ret; + } + + /* + * Iterate over all connectors again, to make sure atomic_check() + * has been called on them when a modeset is forced. + */ + for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) { + const struct drm_connector_helper_funcs *funcs = connector->helper_private; + + if (connectors_mask & BIT(i)) + continue; + + if (funcs->atomic_check) + ret = funcs->atomic_check(connector, state); + if (ret) + return ret; + } + + ret = mode_valid(state); + if (ret) + return ret; + + return mode_fixup(state); +} +EXPORT_SYMBOL(drm_atomic_helper_check_modeset); + +/** + * drm_atomic_helper_check_plane_state() - Check plane state for validity + * @plane_state: plane state to check + * @crtc_state: crtc state to check + * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point + * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point + * @can_position: is it legal to position the plane such that it + * doesn't cover the entire crtc? This will generally + * only be false for primary planes. + * @can_update_disabled: can the plane be updated while the crtc + * is disabled? + * + * Checks that a desired plane update is valid, and updates various + * bits of derived state (clipped coordinates etc.). Drivers that provide + * their own plane handling rather than helper-provided implementations may + * still wish to call this function to avoid duplication of error checking + * code. + * + * RETURNS: + * Zero if update appears valid, error code on failure + */ +int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, + const struct drm_crtc_state *crtc_state, + int min_scale, + int max_scale, + bool can_position, + bool can_update_disabled) +{ + struct drm_framebuffer *fb = plane_state->fb; + struct drm_rect *src = &plane_state->src; + struct drm_rect *dst = &plane_state->dst; + unsigned int rotation = plane_state->rotation; + struct drm_rect clip = {}; + int hscale, vscale; + + WARN_ON(plane_state->crtc && plane_state->crtc != crtc_state->crtc); + + *src = drm_plane_state_src(plane_state); + *dst = drm_plane_state_dest(plane_state); + + if (!fb) { + plane_state->visible = false; + return 0; + } + + /* crtc should only be NULL when disabling (i.e., !fb) */ + if (WARN_ON(!plane_state->crtc)) { + plane_state->visible = false; + return 0; + } + + if (!crtc_state->enable && !can_update_disabled) { + DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n"); + return -EINVAL; + } + + drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation); + + /* Check scaling */ + hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale); + vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale); + if (hscale < 0 || vscale < 0) { + DRM_DEBUG_KMS("Invalid scaling of plane\n"); + drm_rect_debug_print("src: ", &plane_state->src, true); + drm_rect_debug_print("dst: ", &plane_state->dst, false); + return -ERANGE; + } + + if (crtc_state->enable) + drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2); + + plane_state->visible = drm_rect_clip_scaled(src, dst, &clip); + + drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation); + + if (!plane_state->visible) + /* + * Plane isn't visible; some drivers can handle this + * so we just return success here. Drivers that can't + * (including those that use the primary plane helper's + * update function) will return an error from their + * update_plane handler. + */ + return 0; + + if (!can_position && !drm_rect_equals(dst, &clip)) { + DRM_DEBUG_KMS("Plane must cover entire CRTC\n"); + drm_rect_debug_print("dst: ", dst, false); + drm_rect_debug_print("clip: ", &clip, false); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(drm_atomic_helper_check_plane_state); + +/** + * drm_atomic_helper_check_planes - validate state object for planes changes + * @dev: DRM device + * @state: the driver state object + * + * Check the state object to see if the requested state is physically possible. + * This does all the plane update related checks using by calling into the + * &drm_crtc_helper_funcs.atomic_check and &drm_plane_helper_funcs.atomic_check + * hooks provided by the driver. + * + * It also sets &drm_crtc_state.planes_changed to indicate that a crtc has + * updated planes. + * + * RETURNS: + * Zero for success or -errno + */ +int +drm_atomic_helper_check_planes(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *new_crtc_state; + struct drm_plane *plane; + struct drm_plane_state *new_plane_state, *old_plane_state; + int i, ret = 0; + + for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { + const struct drm_plane_helper_funcs *funcs; + + WARN_ON(!drm_modeset_is_locked(&plane->mutex)); + + funcs = plane->helper_private; + + drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane); + + drm_atomic_helper_check_plane_damage(state, new_plane_state); + + if (!funcs || !funcs->atomic_check) + continue; + + ret = funcs->atomic_check(plane, new_plane_state); + if (ret) { + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n", + plane->base.id, plane->name); + return ret; + } + } + + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + const struct drm_crtc_helper_funcs *funcs; + + funcs = crtc->helper_private; + + if (!funcs || !funcs->atomic_check) + continue; + + ret = funcs->atomic_check(crtc, new_crtc_state); + if (ret) { + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n", + crtc->base.id, crtc->name); + return ret; + } + } + + return ret; +} +EXPORT_SYMBOL(drm_atomic_helper_check_planes); + +/** + * drm_atomic_helper_check - validate state object + * @dev: DRM device + * @state: the driver state object + * + * Check the state object to see if the requested state is physically possible. + * Only crtcs and planes have check callbacks, so for any additional (global) + * checking that a driver needs it can simply wrap that around this function. + * Drivers without such needs can directly use this as their + * &drm_mode_config_funcs.atomic_check callback. + * + * This just wraps the two parts of the state checking for planes and modeset + * state in the default order: First it calls drm_atomic_helper_check_modeset() + * and then drm_atomic_helper_check_planes(). The assumption is that the + * @drm_plane_helper_funcs.atomic_check and @drm_crtc_helper_funcs.atomic_check + * functions depend upon an updated adjusted_mode.clock to e.g. properly compute + * watermarks. + * + * Note that zpos normalization will add all enable planes to the state which + * might not desired for some drivers. + * For example enable/disable of a cursor plane which have fixed zpos value + * would trigger all other enabled planes to be forced to the state change. + * + * RETURNS: + * Zero for success or -errno + */ +int drm_atomic_helper_check(struct drm_device *dev, + struct drm_atomic_state *state) +{ + int ret; + + ret = drm_atomic_helper_check_modeset(dev, state); + if (ret) + return ret; + + if (dev->mode_config.normalize_zpos) { + ret = drm_atomic_normalize_zpos(dev, state); + if (ret) + return ret; + } + + ret = drm_atomic_helper_check_planes(dev, state); + if (ret) + return ret; + + if (state->legacy_cursor_update) + state->async_update = !drm_atomic_helper_async_check(dev, state); + + drm_self_refresh_helper_alter_state(state); + + return ret; +} +EXPORT_SYMBOL(drm_atomic_helper_check); + +static bool +crtc_needs_disable(struct drm_crtc_state *old_state, + struct drm_crtc_state *new_state) +{ + /* + * No new_state means the crtc is off, so the only criteria is whether + * it's currently active or in self refresh mode. + */ + if (!new_state) + return drm_atomic_crtc_effectively_active(old_state); + + /* + * We need to run through the crtc_funcs->disable() function if the crtc + * is currently on, if it's transitioning to self refresh mode, or if + * it's in self refresh mode and needs to be fully disabled. + */ + return old_state->active || + (old_state->self_refresh_active && !new_state->enable) || + new_state->self_refresh_active; +} + +static void +disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) +{ + struct drm_connector *connector; + struct drm_connector_state *old_conn_state, *new_conn_state; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + int i; + + for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) { + const struct drm_encoder_helper_funcs *funcs; + struct drm_encoder *encoder; + + /* Shut down everything that's in the changeset and currently + * still on. So need to check the old, saved state. */ + if (!old_conn_state->crtc) + continue; + + old_crtc_state = drm_atomic_get_old_crtc_state(old_state, old_conn_state->crtc); + + if (new_conn_state->crtc) + new_crtc_state = drm_atomic_get_new_crtc_state( + old_state, + new_conn_state->crtc); + else + new_crtc_state = NULL; + + if (!crtc_needs_disable(old_crtc_state, new_crtc_state) || + !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state)) + continue; + + encoder = old_conn_state->best_encoder; + + /* We shouldn't get this far if we didn't previously have + * an encoder.. but WARN_ON() rather than explode. + */ + if (WARN_ON(!encoder)) + continue; + + funcs = encoder->helper_private; + + DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n", + encoder->base.id, encoder->name); + + /* + * Each encoder has at most one connector (since we always steal + * it away), so we won't call disable hooks twice. + */ + drm_atomic_bridge_disable(encoder->bridge, old_state); + + /* Right function depends upon target state. */ + if (funcs) { + if (funcs->atomic_disable) + funcs->atomic_disable(encoder, old_state); + else if (new_conn_state->crtc && funcs->prepare) + funcs->prepare(encoder); + else if (funcs->disable) + funcs->disable(encoder); + else if (funcs->dpms) + funcs->dpms(encoder, DRM_MODE_DPMS_OFF); + } + + drm_atomic_bridge_post_disable(encoder->bridge, old_state); + } + + for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) { + const struct drm_crtc_helper_funcs *funcs; + int ret; + + /* Shut down everything that needs a full modeset. */ + if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) + continue; + + if (!crtc_needs_disable(old_crtc_state, new_crtc_state)) + continue; + + funcs = crtc->helper_private; + + DRM_DEBUG_ATOMIC("disabling [CRTC:%d:%s]\n", + crtc->base.id, crtc->name); + + + /* Right function depends upon target state. */ + if (new_crtc_state->enable && funcs->prepare) + funcs->prepare(crtc); + else if (funcs->atomic_disable) + funcs->atomic_disable(crtc, old_crtc_state); + else if (funcs->disable) + funcs->disable(crtc); + else if (funcs->dpms) + funcs->dpms(crtc, DRM_MODE_DPMS_OFF); + + if (!(dev->irq_enabled && dev->num_crtcs)) + continue; + + ret = drm_crtc_vblank_get(crtc); + WARN_ONCE(ret != -EINVAL, "driver forgot to call drm_crtc_vblank_off()\n"); + if (ret == 0) + drm_crtc_vblank_put(crtc); + } +} + +/** + * drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state + * @dev: DRM device + * @old_state: atomic state object with old state structures + * + * This function updates all the various legacy modeset state pointers in + * connectors, encoders and crtcs. It also updates the timestamping constants + * used for precise vblank timestamps by calling + * drm_calc_timestamping_constants(). + * + * Drivers can use this for building their own atomic commit if they don't have + * a pure helper-based modeset implementation. + * + * Since these updates are not synchronized with lockings, only code paths + * called from &drm_mode_config_helper_funcs.atomic_commit_tail can look at the + * legacy state filled out by this helper. Defacto this means this helper and + * the legacy state pointers are only really useful for transitioning an + * existing driver to the atomic world. + */ +void +drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev, + struct drm_atomic_state *old_state) +{ + struct drm_connector *connector; + struct drm_connector_state *old_conn_state, *new_conn_state; + struct drm_crtc *crtc; + struct drm_crtc_state *new_crtc_state; + int i; + + /* clear out existing links and update dpms */ + for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) { + if (connector->encoder) { + WARN_ON(!connector->encoder->crtc); + + connector->encoder->crtc = NULL; + connector->encoder = NULL; + } + + crtc = new_conn_state->crtc; + if ((!crtc && old_conn_state->crtc) || + (crtc && drm_atomic_crtc_needs_modeset(crtc->state))) { + int mode = DRM_MODE_DPMS_OFF; + + if (crtc && crtc->state->active) + mode = DRM_MODE_DPMS_ON; + + connector->dpms = mode; + } + } + + /* set new links */ + for_each_new_connector_in_state(old_state, connector, new_conn_state, i) { + if (!new_conn_state->crtc) + continue; + + if (WARN_ON(!new_conn_state->best_encoder)) + continue; + + connector->encoder = new_conn_state->best_encoder; + connector->encoder->crtc = new_conn_state->crtc; + } + + /* set legacy state in the crtc structure */ + for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) { + struct drm_plane *primary = crtc->primary; + struct drm_plane_state *new_plane_state; + + crtc->mode = new_crtc_state->mode; + crtc->enabled = new_crtc_state->enable; + + new_plane_state = + drm_atomic_get_new_plane_state(old_state, primary); + + if (new_plane_state && new_plane_state->crtc == crtc) { + crtc->x = new_plane_state->src_x >> 16; + crtc->y = new_plane_state->src_y >> 16; + } + + if (new_crtc_state->enable) + drm_calc_timestamping_constants(crtc, + &new_crtc_state->adjusted_mode); + } +} +EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state); + +static void +crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *new_crtc_state; + struct drm_connector *connector; + struct drm_connector_state *new_conn_state; + int i; + + for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) { + const struct drm_crtc_helper_funcs *funcs; + + if (!new_crtc_state->mode_changed) + continue; + + funcs = crtc->helper_private; + + if (new_crtc_state->enable && funcs->mode_set_nofb) { + DRM_DEBUG_ATOMIC("modeset on [CRTC:%d:%s]\n", + crtc->base.id, crtc->name); + + funcs->mode_set_nofb(crtc); + } + } + + for_each_new_connector_in_state(old_state, connector, new_conn_state, i) { + const struct drm_encoder_helper_funcs *funcs; + struct drm_encoder *encoder; + struct drm_display_mode *mode, *adjusted_mode; + + if (!new_conn_state->best_encoder) + continue; + + encoder = new_conn_state->best_encoder; + funcs = encoder->helper_private; + new_crtc_state = new_conn_state->crtc->state; + mode = &new_crtc_state->mode; + adjusted_mode = &new_crtc_state->adjusted_mode; + + if (!new_crtc_state->mode_changed) + continue; + + DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n", + encoder->base.id, encoder->name); + + /* + * Each encoder has at most one connector (since we always steal + * it away), so we won't call mode_set hooks twice. + */ + if (funcs && funcs->atomic_mode_set) { + funcs->atomic_mode_set(encoder, new_crtc_state, + new_conn_state); + } else if (funcs && funcs->mode_set) { + funcs->mode_set(encoder, mode, adjusted_mode); + } + + drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode); + } +} + +/** + * drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs + * @dev: DRM device + * @old_state: atomic state object with old state structures + * + * This function shuts down all the outputs that need to be shut down and + * prepares them (if required) with the new mode. + * + * For compatibility with legacy crtc helpers this should be called before + * drm_atomic_helper_commit_planes(), which is what the default commit function + * does. But drivers with different needs can group the modeset commits together + * and do the plane commits at the end. This is useful for drivers doing runtime + * PM since planes updates then only happen when the CRTC is actually enabled. + */ +void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev, + struct drm_atomic_state *old_state) +{ + disable_outputs(dev, old_state); + + drm_atomic_helper_update_legacy_modeset_state(dev, old_state); + + crtc_set_mode(dev, old_state); +} +EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables); + +static void drm_atomic_helper_commit_writebacks(struct drm_device *dev, + struct drm_atomic_state *old_state) +{ + struct drm_connector *connector; + struct drm_connector_state *new_conn_state; + int i; + + for_each_new_connector_in_state(old_state, connector, new_conn_state, i) { + const struct drm_connector_helper_funcs *funcs; + + funcs = connector->helper_private; + if (!funcs->atomic_commit) + continue; + + if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) { + WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK); + funcs->atomic_commit(connector, new_conn_state); + } + } +} + +/** + * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs + * @dev: DRM device + * @old_state: atomic state object with old state structures + * + * This function enables all the outputs with the new configuration which had to + * be turned off for the update. + * + * For compatibility with legacy crtc helpers this should be called after + * drm_atomic_helper_commit_planes(), which is what the default commit function + * does. But drivers with different needs can group the modeset commits together + * and do the plane commits at the end. This is useful for drivers doing runtime + * PM since planes updates then only happen when the CRTC is actually enabled. + */ +void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, + struct drm_atomic_state *old_state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; + struct drm_crtc_state *new_crtc_state; + struct drm_connector *connector; + struct drm_connector_state *new_conn_state; + int i; + + for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) { + const struct drm_crtc_helper_funcs *funcs; + + /* Need to filter out CRTCs where only planes change. */ + if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) + continue; + + if (!new_crtc_state->active) + continue; + + funcs = crtc->helper_private; + + if (new_crtc_state->enable) { + DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n", + crtc->base.id, crtc->name); + if (funcs->atomic_enable) + funcs->atomic_enable(crtc, old_crtc_state); + else if (funcs->commit) + funcs->commit(crtc); + } + } + + for_each_new_connector_in_state(old_state, connector, new_conn_state, i) { + const struct drm_encoder_helper_funcs *funcs; + struct drm_encoder *encoder; + + if (!new_conn_state->best_encoder) + continue; + + if (!new_conn_state->crtc->state->active || + !drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state)) + continue; + + encoder = new_conn_state->best_encoder; + funcs = encoder->helper_private; + + DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n", + encoder->base.id, encoder->name); + + /* + * Each encoder has at most one connector (since we always steal + * it away), so we won't call enable hooks twice. + */ + drm_atomic_bridge_pre_enable(encoder->bridge, old_state); + + if (funcs) { + if (funcs->atomic_enable) + funcs->atomic_enable(encoder, old_state); + else if (funcs->enable) + funcs->enable(encoder); + else if (funcs->commit) + funcs->commit(encoder); + } + + drm_atomic_bridge_enable(encoder->bridge, old_state); + } + + drm_atomic_helper_commit_writebacks(dev, old_state); +} +EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables); + +/** + * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state + * @dev: DRM device + * @state: atomic state object with old state structures + * @pre_swap: If true, do an interruptible wait, and @state is the new state. + * Otherwise @state is the old state. + * + * For implicit sync, driver should fish the exclusive fence out from the + * incoming fb's and stash it in the drm_plane_state. This is called after + * drm_atomic_helper_swap_state() so it uses the current plane state (and + * just uses the atomic state to find the changed planes) + * + * Note that @pre_swap is needed since the point where we block for fences moves + * around depending upon whether an atomic commit is blocking or + * non-blocking. For non-blocking commit all waiting needs to happen after + * drm_atomic_helper_swap_state() is called, but for blocking commits we want + * to wait **before** we do anything that can't be easily rolled back. That is + * before we call drm_atomic_helper_swap_state(). + * + * Returns zero if success or < 0 if dma_fence_wait() fails. + */ +int drm_atomic_helper_wait_for_fences(struct drm_device *dev, + struct drm_atomic_state *state, + bool pre_swap) +{ + struct drm_plane *plane; + struct drm_plane_state *new_plane_state; + int i, ret; + + for_each_new_plane_in_state(state, plane, new_plane_state, i) { + if (!new_plane_state->fence) + continue; + + WARN_ON(!new_plane_state->fb); + + /* + * If waiting for fences pre-swap (ie: nonblock), userspace can + * still interrupt the operation. Instead of blocking until the + * timer expires, make the wait interruptible. + */ + ret = dma_fence_wait(new_plane_state->fence, pre_swap); + if (ret) + return ret; + + dma_fence_put(new_plane_state->fence); + new_plane_state->fence = NULL; + } + + return 0; +} +EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences); + +/** + * drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs + * @dev: DRM device + * @old_state: atomic state object with old state structures + * + * Helper to, after atomic commit, wait for vblanks on all effected + * crtcs (ie. before cleaning up old framebuffers using + * drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the + * framebuffers have actually changed to optimize for the legacy cursor and + * plane update use-case. + * + * Drivers using the nonblocking commit tracking support initialized by calling + * drm_atomic_helper_setup_commit() should look at + * drm_atomic_helper_wait_for_flip_done() as an alternative. + */ +void +drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, + struct drm_atomic_state *old_state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + int i, ret; + unsigned crtc_mask = 0; + + /* + * Legacy cursor ioctls are completely unsynced, and userspace + * relies on that (by doing tons of cursor updates). + */ + if (old_state->legacy_cursor_update) + return; + + for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) { + if (!new_crtc_state->active) + continue; + + ret = drm_crtc_vblank_get(crtc); + if (ret != 0) + continue; + + crtc_mask |= drm_crtc_mask(crtc); + old_state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc); + } + + for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) { + if (!(crtc_mask & drm_crtc_mask(crtc))) + continue; + + ret = wait_event_timeout(dev->vblank[i].queue, + old_state->crtcs[i].last_vblank_count != + drm_crtc_vblank_count(crtc), + msecs_to_jiffies(100)); + + WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n", + crtc->base.id, crtc->name); + + drm_crtc_vblank_put(crtc); + } +} +EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks); + +/** + * drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done + * @dev: DRM device + * @old_state: atomic state object with old state structures + * + * Helper to, after atomic commit, wait for page flips on all effected + * crtcs (ie. before cleaning up old framebuffers using + * drm_atomic_helper_cleanup_planes()). Compared to + * drm_atomic_helper_wait_for_vblanks() this waits for the completion of on all + * CRTCs, assuming that cursors-only updates are signalling their completion + * immediately (or using a different path). + * + * This requires that drivers use the nonblocking commit tracking support + * initialized using drm_atomic_helper_setup_commit(). + */ +void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev, + struct drm_atomic_state *old_state) +{ + struct drm_crtc *crtc; + int i; + + for (i = 0; i < dev->mode_config.num_crtc; i++) { + struct drm_crtc_commit *commit = old_state->crtcs[i].commit; + int ret; + + crtc = old_state->crtcs[i].ptr; + + if (!crtc || !commit) + continue; + + ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ); + if (ret == 0) + DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n", + crtc->base.id, crtc->name); + } + + if (old_state->fake_commit) + complete_all(&old_state->fake_commit->flip_done); +} +EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done); + +/** + * drm_atomic_helper_commit_tail - commit atomic update to hardware + * @old_state: atomic state object with old state structures + * + * This is the default implementation for the + * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers + * that do not support runtime_pm or do not need the CRTC to be + * enabled to perform a commit. Otherwise, see + * drm_atomic_helper_commit_tail_rpm(). + * + * Note that the default ordering of how the various stages are called is to + * match the legacy modeset helper library closest. + */ +void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state) +{ + struct drm_device *dev = old_state->dev; + + drm_atomic_helper_commit_modeset_disables(dev, old_state); + + drm_atomic_helper_commit_planes(dev, old_state, 0); + + drm_atomic_helper_commit_modeset_enables(dev, old_state); + + drm_atomic_helper_fake_vblank(old_state); + + drm_atomic_helper_commit_hw_done(old_state); + + drm_atomic_helper_wait_for_vblanks(dev, old_state); + + drm_atomic_helper_cleanup_planes(dev, old_state); +} +EXPORT_SYMBOL(drm_atomic_helper_commit_tail); + +/** + * drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware + * @old_state: new modeset state to be committed + * + * This is an alternative implementation for the + * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers + * that support runtime_pm or need the CRTC to be enabled to perform a + * commit. Otherwise, one should use the default implementation + * drm_atomic_helper_commit_tail(). + */ +void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state) +{ + struct drm_device *dev = old_state->dev; + + drm_atomic_helper_commit_modeset_disables(dev, old_state); + + drm_atomic_helper_commit_modeset_enables(dev, old_state); + + drm_atomic_helper_commit_planes(dev, old_state, + DRM_PLANE_COMMIT_ACTIVE_ONLY); + + drm_atomic_helper_fake_vblank(old_state); + + drm_atomic_helper_commit_hw_done(old_state); + + drm_atomic_helper_wait_for_vblanks(dev, old_state); + + drm_atomic_helper_cleanup_planes(dev, old_state); +} +EXPORT_SYMBOL(drm_atomic_helper_commit_tail_rpm); + +static void commit_tail(struct drm_atomic_state *old_state) +{ + struct drm_device *dev = old_state->dev; + const struct drm_mode_config_helper_funcs *funcs; + + funcs = dev->mode_config.helper_private; + + drm_atomic_helper_wait_for_fences(dev, old_state, false); + + drm_atomic_helper_wait_for_dependencies(old_state); + + if (funcs && funcs->atomic_commit_tail) + funcs->atomic_commit_tail(old_state); + else + drm_atomic_helper_commit_tail(old_state); + + drm_atomic_helper_commit_cleanup_done(old_state); + + drm_atomic_state_put(old_state); +} + +static void commit_work(struct work_struct *work) +{ + struct drm_atomic_state *state = container_of(work, + struct drm_atomic_state, + commit_work); + commit_tail(state); +} + +/** + * drm_atomic_helper_async_check - check if state can be commited asynchronously + * @dev: DRM device + * @state: the driver state object + * + * This helper will check if it is possible to commit the state asynchronously. + * Async commits are not supposed to swap the states like normal sync commits + * but just do in-place changes on the current state. + * + * It will return 0 if the commit can happen in an asynchronous fashion or error + * if not. Note that error just mean it can't be commited asynchronously, if it + * fails the commit should be treated like a normal synchronous commit. + */ +int drm_atomic_helper_async_check(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_plane *plane = NULL; + struct drm_plane_state *old_plane_state = NULL; + struct drm_plane_state *new_plane_state = NULL; + const struct drm_plane_helper_funcs *funcs; + int i, n_planes = 0; + + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { + if (drm_atomic_crtc_needs_modeset(crtc_state)) + return -EINVAL; + } + + for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) + n_planes++; + + /* FIXME: we support only single plane updates for now */ + if (n_planes != 1) + return -EINVAL; + + if (!new_plane_state->crtc || + old_plane_state->crtc != new_plane_state->crtc) + return -EINVAL; + + funcs = plane->helper_private; + if (!funcs->atomic_async_update) + return -EINVAL; + + if (new_plane_state->fence) + return -EINVAL; + + /* + * Don't do an async update if there is an outstanding commit modifying + * the plane. This prevents our async update's changes from getting + * overridden by a previous synchronous update's state. + */ + if (old_plane_state->commit && + !try_wait_for_completion(&old_plane_state->commit->hw_done)) + return -EBUSY; + + return funcs->atomic_async_check(plane, new_plane_state); +} +EXPORT_SYMBOL(drm_atomic_helper_async_check); + +/** + * drm_atomic_helper_async_commit - commit state asynchronously + * @dev: DRM device + * @state: the driver state object + * + * This function commits a state asynchronously, i.e., not vblank + * synchronized. It should be used on a state only when + * drm_atomic_async_check() succeeds. Async commits are not supposed to swap + * the states like normal sync commits, but just do in-place changes on the + * current state. + * + * TODO: Implement full swap instead of doing in-place changes. + */ +void drm_atomic_helper_async_commit(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct drm_plane *plane; + struct drm_plane_state *plane_state; + const struct drm_plane_helper_funcs *funcs; + int i; + + for_each_new_plane_in_state(state, plane, plane_state, i) { + struct drm_framebuffer *new_fb = plane_state->fb; + struct drm_framebuffer *old_fb = plane->state->fb; + + funcs = plane->helper_private; + funcs->atomic_async_update(plane, plane_state); + + /* + * ->atomic_async_update() is supposed to update the + * plane->state in-place, make sure at least common + * properties have been properly updated. + */ + WARN_ON_ONCE(plane->state->fb != new_fb); + WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x); + WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y); + WARN_ON_ONCE(plane->state->src_x != plane_state->src_x); + WARN_ON_ONCE(plane->state->src_y != plane_state->src_y); + + /* + * Make sure the FBs have been swapped so that cleanups in the + * new_state performs a cleanup in the old FB. + */ + WARN_ON_ONCE(plane_state->fb != old_fb); + } +} +EXPORT_SYMBOL(drm_atomic_helper_async_commit); + +/** + * drm_atomic_helper_commit - commit validated state object + * @dev: DRM device + * @state: the driver state object + * @nonblock: whether nonblocking behavior is requested. + * + * This function commits a with drm_atomic_helper_check() pre-validated state + * object. This can still fail when e.g. the framebuffer reservation fails. This + * function implements nonblocking commits, using + * drm_atomic_helper_setup_commit() and related functions. + * + * Committing the actual hardware state is done through the + * &drm_mode_config_helper_funcs.atomic_commit_tail callback, or its default + * implementation drm_atomic_helper_commit_tail(). + * + * RETURNS: + * Zero for success or -errno. + */ +int drm_atomic_helper_commit(struct drm_device *dev, + struct drm_atomic_state *state, + bool nonblock) +{ + int ret; + + if (state->async_update) { + ret = drm_atomic_helper_prepare_planes(dev, state); + if (ret) + return ret; + + drm_atomic_helper_async_commit(dev, state); + drm_atomic_helper_cleanup_planes(dev, state); + + return 0; + } + + ret = drm_atomic_helper_setup_commit(state, nonblock); + if (ret) + return ret; + + INIT_WORK(&state->commit_work, commit_work); + + ret = drm_atomic_helper_prepare_planes(dev, state); + if (ret) + return ret; + + if (!nonblock) { + ret = drm_atomic_helper_wait_for_fences(dev, state, true); + if (ret) + goto err; + } + + /* + * This is the point of no return - everything below never fails except + * when the hw goes bonghits. Which means we can commit the new state on + * the software side now. + */ + + ret = drm_atomic_helper_swap_state(state, true); + if (ret) + goto err; + + /* + * Everything below can be run asynchronously without the need to grab + * any modeset locks at all under one condition: It must be guaranteed + * that the asynchronous work has either been cancelled (if the driver + * supports it, which at least requires that the framebuffers get + * cleaned up with drm_atomic_helper_cleanup_planes()) or completed + * before the new state gets committed on the software side with + * drm_atomic_helper_swap_state(). + * + * This scheme allows new atomic state updates to be prepared and + * checked in parallel to the asynchronous completion of the previous + * update. Which is important since compositors need to figure out the + * composition of the next frame right after having submitted the + * current layout. + * + * NOTE: Commit work has multiple phases, first hardware commit, then + * cleanup. We want them to overlap, hence need system_unbound_wq to + * make sure work items don't artificially stall on each another. + */ + + drm_atomic_state_get(state); + if (nonblock) + queue_work(system_unbound_wq, &state->commit_work); + else + commit_tail(state); + + return 0; + +err: + drm_atomic_helper_cleanup_planes(dev, state); + return ret; +} +EXPORT_SYMBOL(drm_atomic_helper_commit); + +/** + * DOC: implementing nonblocking commit + * + * Nonblocking atomic commits have to be implemented in the following sequence: + * + * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function + * which commit needs to call which can fail, so we want to run it first and + * synchronously. + * + * 2. Synchronize with any outstanding nonblocking commit worker threads which + * might be affected the new state update. This can be done by either cancelling + * or flushing the work items, depending upon whether the driver can deal with + * cancelled updates. Note that it is important to ensure that the framebuffer + * cleanup is still done when cancelling. + * + * Asynchronous workers need to have sufficient parallelism to be able to run + * different atomic commits on different CRTCs in parallel. The simplest way to + * achieve this is by running them on the &system_unbound_wq work queue. Note + * that drivers are not required to split up atomic commits and run an + * individual commit in parallel - userspace is supposed to do that if it cares. + * But it might be beneficial to do that for modesets, since those necessarily + * must be done as one global operation, and enabling or disabling a CRTC can + * take a long time. But even that is not required. + * + * 3. The software state is updated synchronously with + * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset + * locks means concurrent callers never see inconsistent state. And doing this + * while it's guaranteed that no relevant nonblocking worker runs means that + * nonblocking workers do not need grab any locks. Actually they must not grab + * locks, for otherwise the work flushing will deadlock. + * + * 4. Schedule a work item to do all subsequent steps, using the split-out + * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and + * then cleaning up the framebuffers after the old framebuffer is no longer + * being displayed. + * + * The above scheme is implemented in the atomic helper libraries in + * drm_atomic_helper_commit() using a bunch of helper functions. See + * drm_atomic_helper_setup_commit() for a starting point. + */ + +static int stall_checks(struct drm_crtc *crtc, bool nonblock) +{ + struct drm_crtc_commit *commit, *stall_commit = NULL; + bool completed = true; + int i; + long ret = 0; + + spin_lock(&crtc->commit_lock); + i = 0; + list_for_each_entry(commit, &crtc->commit_list, commit_entry) { + if (i == 0) { + completed = try_wait_for_completion(&commit->flip_done); + /* Userspace is not allowed to get ahead of the previous + * commit with nonblocking ones. */ + if (!completed && nonblock) { + spin_unlock(&crtc->commit_lock); + return -EBUSY; + } + } else if (i == 1) { + stall_commit = drm_crtc_commit_get(commit); + break; + } + + i++; + } + spin_unlock(&crtc->commit_lock); + + if (!stall_commit) + return 0; + + /* We don't want to let commits get ahead of cleanup work too much, + * stalling on 2nd previous commit means triple-buffer won't ever stall. + */ + ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done, + 10*HZ); + if (ret == 0) + DRM_ERROR("[CRTC:%d:%s] cleanup_done timed out\n", + crtc->base.id, crtc->name); + + drm_crtc_commit_put(stall_commit); + + return ret < 0 ? ret : 0; +} + +static void release_crtc_commit(struct completion *completion) +{ + struct drm_crtc_commit *commit = container_of(completion, + typeof(*commit), + flip_done); + + drm_crtc_commit_put(commit); +} + +static void init_commit(struct drm_crtc_commit *commit, struct drm_crtc *crtc) +{ + init_completion(&commit->flip_done); + init_completion(&commit->hw_done); + init_completion(&commit->cleanup_done); + INIT_LIST_HEAD(&commit->commit_entry); + kref_init(&commit->ref); + commit->crtc = crtc; +} + +static struct drm_crtc_commit * +crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc) +{ + if (crtc) { + struct drm_crtc_state *new_crtc_state; + + new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + + return new_crtc_state->commit; + } + + if (!state->fake_commit) { + state->fake_commit = kzalloc(sizeof(*state->fake_commit), GFP_KERNEL); + if (!state->fake_commit) + return NULL; + + init_commit(state->fake_commit, NULL); + } + + return state->fake_commit; +} + +/** + * drm_atomic_helper_setup_commit - setup possibly nonblocking commit + * @state: new modeset state to be committed + * @nonblock: whether nonblocking behavior is requested. + * + * This function prepares @state to be used by the atomic helper's support for + * nonblocking commits. Drivers using the nonblocking commit infrastructure + * should always call this function from their + * &drm_mode_config_funcs.atomic_commit hook. + * + * To be able to use this support drivers need to use a few more helper + * functions. drm_atomic_helper_wait_for_dependencies() must be called before + * actually committing the hardware state, and for nonblocking commits this call + * must be placed in the async worker. See also drm_atomic_helper_swap_state() + * and its stall parameter, for when a driver's commit hooks look at the + * &drm_crtc.state, &drm_plane.state or &drm_connector.state pointer directly. + * + * Completion of the hardware commit step must be signalled using + * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed + * to read or change any permanent software or hardware modeset state. The only + * exception is state protected by other means than &drm_modeset_lock locks. + * Only the free standing @state with pointers to the old state structures can + * be inspected, e.g. to clean up old buffers using + * drm_atomic_helper_cleanup_planes(). + * + * At the very end, before cleaning up @state drivers must call + * drm_atomic_helper_commit_cleanup_done(). + * + * This is all implemented by in drm_atomic_helper_commit(), giving drivers a + * complete and easy-to-use default implementation of the atomic_commit() hook. + * + * The tracking of asynchronously executed and still pending commits is done + * using the core structure &drm_crtc_commit. + * + * By default there's no need to clean up resources allocated by this function + * explicitly: drm_atomic_state_default_clear() will take care of that + * automatically. + * + * Returns: + * + * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast, + * -ENOMEM on allocation failures and -EINTR when a signal is pending. + */ +int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, + bool nonblock) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct drm_connector *conn; + struct drm_connector_state *old_conn_state, *new_conn_state; + struct drm_plane *plane; + struct drm_plane_state *old_plane_state, *new_plane_state; + struct drm_crtc_commit *commit; + int i, ret; + + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + commit = kzalloc(sizeof(*commit), GFP_KERNEL); + if (!commit) + return -ENOMEM; + + init_commit(commit, crtc); + + new_crtc_state->commit = commit; + + ret = stall_checks(crtc, nonblock); + if (ret) + return ret; + + /* Drivers only send out events when at least either current or + * new CRTC state is active. Complete right away if everything + * stays off. */ + if (!old_crtc_state->active && !new_crtc_state->active) { + complete_all(&commit->flip_done); + continue; + } + + /* Legacy cursor updates are fully unsynced. */ + if (state->legacy_cursor_update) { + complete_all(&commit->flip_done); + continue; + } + + if (!new_crtc_state->event) { + commit->event = kzalloc(sizeof(*commit->event), + GFP_KERNEL); + if (!commit->event) + return -ENOMEM; + + new_crtc_state->event = commit->event; + } + + new_crtc_state->event->base.completion = &commit->flip_done; + new_crtc_state->event->base.completion_release = release_crtc_commit; + drm_crtc_commit_get(commit); + + commit->abort_completion = true; + + state->crtcs[i].commit = commit; + drm_crtc_commit_get(commit); + } + + for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) { + /* Userspace is not allowed to get ahead of the previous + * commit with nonblocking ones. */ + if (nonblock && old_conn_state->commit && + !try_wait_for_completion(&old_conn_state->commit->flip_done)) + return -EBUSY; + + /* Always track connectors explicitly for e.g. link retraining. */ + commit = crtc_or_fake_commit(state, new_conn_state->crtc ?: old_conn_state->crtc); + if (!commit) + return -ENOMEM; + + new_conn_state->commit = drm_crtc_commit_get(commit); + } + + for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { + /* Userspace is not allowed to get ahead of the previous + * commit with nonblocking ones. */ + if (nonblock && old_plane_state->commit && + !try_wait_for_completion(&old_plane_state->commit->flip_done)) + return -EBUSY; + + /* Always track planes explicitly for async pageflip support. */ + commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc); + if (!commit) + return -ENOMEM; + + new_plane_state->commit = drm_crtc_commit_get(commit); + } + + return 0; +} +EXPORT_SYMBOL(drm_atomic_helper_setup_commit); + +/** + * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits + * @old_state: atomic state object with old state structures + * + * This function waits for all preceeding commits that touch the same CRTC as + * @old_state to both be committed to the hardware (as signalled by + * drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled + * by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event). + * + * This is part of the atomic helper support for nonblocking commits, see + * drm_atomic_helper_setup_commit() for an overview. + */ +void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; + struct drm_plane *plane; + struct drm_plane_state *old_plane_state; + struct drm_connector *conn; + struct drm_connector_state *old_conn_state; + struct drm_crtc_commit *commit; + int i; + long ret; + + for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) { + commit = old_crtc_state->commit; + + if (!commit) + continue; + + ret = wait_for_completion_timeout(&commit->hw_done, + 10*HZ); + if (ret == 0) + DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n", + crtc->base.id, crtc->name); + + /* Currently no support for overwriting flips, hence + * stall for previous one to execute completely. */ + ret = wait_for_completion_timeout(&commit->flip_done, + 10*HZ); + if (ret == 0) + DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n", + crtc->base.id, crtc->name); + } + + for_each_old_connector_in_state(old_state, conn, old_conn_state, i) { + commit = old_conn_state->commit; + + if (!commit) + continue; + + ret = wait_for_completion_timeout(&commit->hw_done, + 10*HZ); + if (ret == 0) + DRM_ERROR("[CONNECTOR:%d:%s] hw_done timed out\n", + conn->base.id, conn->name); + + /* Currently no support for overwriting flips, hence + * stall for previous one to execute completely. */ + ret = wait_for_completion_timeout(&commit->flip_done, + 10*HZ); + if (ret == 0) + DRM_ERROR("[CONNECTOR:%d:%s] flip_done timed out\n", + conn->base.id, conn->name); + } + + for_each_old_plane_in_state(old_state, plane, old_plane_state, i) { + commit = old_plane_state->commit; + + if (!commit) + continue; + + ret = wait_for_completion_timeout(&commit->hw_done, + 10*HZ); + if (ret == 0) + DRM_ERROR("[PLANE:%d:%s] hw_done timed out\n", + plane->base.id, plane->name); + + /* Currently no support for overwriting flips, hence + * stall for previous one to execute completely. */ + ret = wait_for_completion_timeout(&commit->flip_done, + 10*HZ); + if (ret == 0) + DRM_ERROR("[PLANE:%d:%s] flip_done timed out\n", + plane->base.id, plane->name); + } +} +EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies); + +/** + * drm_atomic_helper_fake_vblank - fake VBLANK events if needed + * @old_state: atomic state object with old state structures + * + * This function walks all CRTCs and fake VBLANK events on those with + * &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL. + * The primary use of this function is writeback connectors working in oneshot + * mode and faking VBLANK events. In this case they only fake the VBLANK event + * when a job is queued, and any change to the pipeline that does not touch the + * connector is leading to timeouts when calling + * drm_atomic_helper_wait_for_vblanks() or + * drm_atomic_helper_wait_for_flip_done(). + * + * This is part of the atomic helper support for nonblocking commits, see + * drm_atomic_helper_setup_commit() for an overview. + */ +void drm_atomic_helper_fake_vblank(struct drm_atomic_state *old_state) +{ + struct drm_crtc_state *new_crtc_state; + struct drm_crtc *crtc; + int i; + + for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) { + unsigned long flags; + + if (!new_crtc_state->no_vblank) + continue; + + spin_lock_irqsave(&old_state->dev->event_lock, flags); + if (new_crtc_state->event) { + drm_crtc_send_vblank_event(crtc, + new_crtc_state->event); + new_crtc_state->event = NULL; + } + spin_unlock_irqrestore(&old_state->dev->event_lock, flags); + } +} +EXPORT_SYMBOL(drm_atomic_helper_fake_vblank); + +/** + * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit + * @old_state: atomic state object with old state structures + * + * This function is used to signal completion of the hardware commit step. After + * this step the driver is not allowed to read or change any permanent software + * or hardware modeset state. The only exception is state protected by other + * means than &drm_modeset_lock locks. + * + * Drivers should try to postpone any expensive or delayed cleanup work after + * this function is called. + * + * This is part of the atomic helper support for nonblocking commits, see + * drm_atomic_helper_setup_commit() for an overview. + */ +void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct drm_crtc_commit *commit; + int i; + + for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) { + commit = new_crtc_state->commit; + if (!commit) + continue; + + /* + * copy new_crtc_state->commit to old_crtc_state->commit, + * it's unsafe to touch new_crtc_state after hw_done, + * but we still need to do so in cleanup_done(). + */ + if (old_crtc_state->commit) + drm_crtc_commit_put(old_crtc_state->commit); + + old_crtc_state->commit = drm_crtc_commit_get(commit); + + /* backend must have consumed any event by now */ + WARN_ON(new_crtc_state->event); + complete_all(&commit->hw_done); + } + + if (old_state->fake_commit) { + complete_all(&old_state->fake_commit->hw_done); + complete_all(&old_state->fake_commit->flip_done); + } +} +EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done); + +/** + * drm_atomic_helper_commit_cleanup_done - signal completion of commit + * @old_state: atomic state object with old state structures + * + * This signals completion of the atomic update @old_state, including any + * cleanup work. If used, it must be called right before calling + * drm_atomic_state_put(). + * + * This is part of the atomic helper support for nonblocking commits, see + * drm_atomic_helper_setup_commit() for an overview. + */ +void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; + struct drm_crtc_commit *commit; + int i; + + for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) { + commit = old_crtc_state->commit; + if (WARN_ON(!commit)) + continue; + + complete_all(&commit->cleanup_done); + WARN_ON(!try_wait_for_completion(&commit->hw_done)); + + spin_lock(&crtc->commit_lock); + list_del(&commit->commit_entry); + spin_unlock(&crtc->commit_lock); + } + + if (old_state->fake_commit) { + complete_all(&old_state->fake_commit->cleanup_done); + WARN_ON(!try_wait_for_completion(&old_state->fake_commit->hw_done)); + } +} +EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done); + +/** + * drm_atomic_helper_prepare_planes - prepare plane resources before commit + * @dev: DRM device + * @state: atomic state object with new state structures + * + * This function prepares plane state, specifically framebuffers, for the new + * configuration, by calling &drm_plane_helper_funcs.prepare_fb. If any failure + * is encountered this function will call &drm_plane_helper_funcs.cleanup_fb on + * any already successfully prepared framebuffer. + * + * Returns: + * 0 on success, negative error code on failure. + */ +int drm_atomic_helper_prepare_planes(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct drm_connector *connector; + struct drm_connector_state *new_conn_state; + struct drm_plane *plane; + struct drm_plane_state *new_plane_state; + int ret, i, j; + + for_each_new_connector_in_state(state, connector, new_conn_state, i) { + if (!new_conn_state->writeback_job) + continue; + + ret = drm_writeback_prepare_job(new_conn_state->writeback_job); + if (ret < 0) + return ret; + } + + for_each_new_plane_in_state(state, plane, new_plane_state, i) { + const struct drm_plane_helper_funcs *funcs; + + funcs = plane->helper_private; + + if (funcs->prepare_fb) { + ret = funcs->prepare_fb(plane, new_plane_state); + if (ret) + goto fail; + } + } + + return 0; + +fail: + for_each_new_plane_in_state(state, plane, new_plane_state, j) { + const struct drm_plane_helper_funcs *funcs; + + if (j >= i) + continue; + + funcs = plane->helper_private; + + if (funcs->cleanup_fb) + funcs->cleanup_fb(plane, new_plane_state); + } + + return ret; +} +EXPORT_SYMBOL(drm_atomic_helper_prepare_planes); + +static bool plane_crtc_active(const struct drm_plane_state *state) +{ + return state->crtc && state->crtc->state->active; +} + +/** + * drm_atomic_helper_commit_planes - commit plane state + * @dev: DRM device + * @old_state: atomic state object with old state structures + * @flags: flags for committing plane state + * + * This function commits the new plane state using the plane and atomic helper + * functions for planes and crtcs. It assumes that the atomic state has already + * been pushed into the relevant object state pointers, since this step can no + * longer fail. + * + * It still requires the global state object @old_state to know which planes and + * crtcs need to be updated though. + * + * Note that this function does all plane updates across all CRTCs in one step. + * If the hardware can't support this approach look at + * drm_atomic_helper_commit_planes_on_crtc() instead. + * + * Plane parameters can be updated by applications while the associated CRTC is + * disabled. The DRM/KMS core will store the parameters in the plane state, + * which will be available to the driver when the CRTC is turned on. As a result + * most drivers don't need to be immediately notified of plane updates for a + * disabled CRTC. + * + * Unless otherwise needed, drivers are advised to set the ACTIVE_ONLY flag in + * @flags in order not to receive plane update notifications related to a + * disabled CRTC. This avoids the need to manually ignore plane updates in + * driver code when the driver and/or hardware can't or just don't need to deal + * with updates on disabled CRTCs, for example when supporting runtime PM. + * + * Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant + * display controllers require to disable a CRTC's planes when the CRTC is + * disabled. This function would skip the &drm_plane_helper_funcs.atomic_disable + * call for a plane if the CRTC of the old plane state needs a modesetting + * operation. Of course, the drivers need to disable the planes in their CRTC + * disable callbacks since no one else would do that. + * + * The drm_atomic_helper_commit() default implementation doesn't set the + * ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers. + * This should not be copied blindly by drivers. + */ +void drm_atomic_helper_commit_planes(struct drm_device *dev, + struct drm_atomic_state *old_state, + uint32_t flags) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct drm_plane *plane; + struct drm_plane_state *old_plane_state, *new_plane_state; + int i; + bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY; + bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET; + + for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) { + const struct drm_crtc_helper_funcs *funcs; + + funcs = crtc->helper_private; + + if (!funcs || !funcs->atomic_begin) + continue; + + if (active_only && !new_crtc_state->active) + continue; + + funcs->atomic_begin(crtc, old_crtc_state); + } + + for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) { + const struct drm_plane_helper_funcs *funcs; + bool disabling; + + funcs = plane->helper_private; + + if (!funcs) + continue; + + disabling = drm_atomic_plane_disabling(old_plane_state, + new_plane_state); + + if (active_only) { + /* + * Skip planes related to inactive CRTCs. If the plane + * is enabled use the state of the current CRTC. If the + * plane is being disabled use the state of the old + * CRTC to avoid skipping planes being disabled on an + * active CRTC. + */ + if (!disabling && !plane_crtc_active(new_plane_state)) + continue; + if (disabling && !plane_crtc_active(old_plane_state)) + continue; + } + + /* + * Special-case disabling the plane if drivers support it. + */ + if (disabling && funcs->atomic_disable) { + struct drm_crtc_state *crtc_state; + + crtc_state = old_plane_state->crtc->state; + + if (drm_atomic_crtc_needs_modeset(crtc_state) && + no_disable) + continue; + + funcs->atomic_disable(plane, old_plane_state); + } else if (new_plane_state->crtc || disabling) { + funcs->atomic_update(plane, old_plane_state); + } + } + + for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) { + const struct drm_crtc_helper_funcs *funcs; + + funcs = crtc->helper_private; + + if (!funcs || !funcs->atomic_flush) + continue; + + if (active_only && !new_crtc_state->active) + continue; + + funcs->atomic_flush(crtc, old_crtc_state); + } +} +EXPORT_SYMBOL(drm_atomic_helper_commit_planes); + +/** + * drm_atomic_helper_commit_planes_on_crtc - commit plane state for a crtc + * @old_crtc_state: atomic state object with the old crtc state + * + * This function commits the new plane state using the plane and atomic helper + * functions for planes on the specific crtc. It assumes that the atomic state + * has already been pushed into the relevant object state pointers, since this + * step can no longer fail. + * + * This function is useful when plane updates should be done crtc-by-crtc + * instead of one global step like drm_atomic_helper_commit_planes() does. + * + * This function can only be savely used when planes are not allowed to move + * between different CRTCs because this function doesn't handle inter-CRTC + * depencies. Callers need to ensure that either no such depencies exist, + * resolve them through ordering of commit calls or through some other means. + */ +void +drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state) +{ + const struct drm_crtc_helper_funcs *crtc_funcs; + struct drm_crtc *crtc = old_crtc_state->crtc; + struct drm_atomic_state *old_state = old_crtc_state->state; + struct drm_crtc_state *new_crtc_state = + drm_atomic_get_new_crtc_state(old_state, crtc); + struct drm_plane *plane; + unsigned plane_mask; + + plane_mask = old_crtc_state->plane_mask; + plane_mask |= new_crtc_state->plane_mask; + + crtc_funcs = crtc->helper_private; + if (crtc_funcs && crtc_funcs->atomic_begin) + crtc_funcs->atomic_begin(crtc, old_crtc_state); + + drm_for_each_plane_mask(plane, crtc->dev, plane_mask) { + struct drm_plane_state *old_plane_state = + drm_atomic_get_old_plane_state(old_state, plane); + struct drm_plane_state *new_plane_state = + drm_atomic_get_new_plane_state(old_state, plane); + const struct drm_plane_helper_funcs *plane_funcs; + + plane_funcs = plane->helper_private; + + if (!old_plane_state || !plane_funcs) + continue; + + WARN_ON(new_plane_state->crtc && + new_plane_state->crtc != crtc); + + if (drm_atomic_plane_disabling(old_plane_state, new_plane_state) && + plane_funcs->atomic_disable) + plane_funcs->atomic_disable(plane, old_plane_state); + else if (new_plane_state->crtc || + drm_atomic_plane_disabling(old_plane_state, new_plane_state)) + plane_funcs->atomic_update(plane, old_plane_state); + } + + if (crtc_funcs && crtc_funcs->atomic_flush) + crtc_funcs->atomic_flush(crtc, old_crtc_state); +} +EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc); + +/** + * drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes + * @old_crtc_state: atomic state object with the old CRTC state + * @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks + * + * Disables all planes associated with the given CRTC. This can be + * used for instance in the CRTC helper atomic_disable callback to disable + * all planes. + * + * If the atomic-parameter is set the function calls the CRTC's + * atomic_begin hook before and atomic_flush hook after disabling the + * planes. + * + * It is a bug to call this function without having implemented the + * &drm_plane_helper_funcs.atomic_disable plane hook. + */ +void +drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state, + bool atomic) +{ + struct drm_crtc *crtc = old_crtc_state->crtc; + const struct drm_crtc_helper_funcs *crtc_funcs = + crtc->helper_private; + struct drm_plane *plane; + + if (atomic && crtc_funcs && crtc_funcs->atomic_begin) + crtc_funcs->atomic_begin(crtc, NULL); + + drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) { + const struct drm_plane_helper_funcs *plane_funcs = + plane->helper_private; + + if (!plane_funcs) + continue; + + WARN_ON(!plane_funcs->atomic_disable); + if (plane_funcs->atomic_disable) + plane_funcs->atomic_disable(plane, NULL); + } + + if (atomic && crtc_funcs && crtc_funcs->atomic_flush) + crtc_funcs->atomic_flush(crtc, NULL); +} +EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc); + +/** + * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit + * @dev: DRM device + * @old_state: atomic state object with old state structures + * + * This function cleans up plane state, specifically framebuffers, from the old + * configuration. Hence the old configuration must be perserved in @old_state to + * be able to call this function. + * + * This function must also be called on the new state when the atomic update + * fails at any point after calling drm_atomic_helper_prepare_planes(). + */ +void drm_atomic_helper_cleanup_planes(struct drm_device *dev, + struct drm_atomic_state *old_state) +{ + struct drm_plane *plane; + struct drm_plane_state *old_plane_state, *new_plane_state; + int i; + + for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) { + const struct drm_plane_helper_funcs *funcs; + struct drm_plane_state *plane_state; + + /* + * This might be called before swapping when commit is aborted, + * in which case we have to cleanup the new state. + */ + if (old_plane_state == plane->state) + plane_state = new_plane_state; + else + plane_state = old_plane_state; + + funcs = plane->helper_private; + + if (funcs->cleanup_fb) + funcs->cleanup_fb(plane, plane_state); + } +} +EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes); + +/** + * drm_atomic_helper_swap_state - store atomic state into current sw state + * @state: atomic state + * @stall: stall for preceeding commits + * + * This function stores the atomic state into the current state pointers in all + * driver objects. It should be called after all failing steps have been done + * and succeeded, but before the actual hardware state is committed. + * + * For cleanup and error recovery the current state for all changed objects will + * be swapped into @state. + * + * With that sequence it fits perfectly into the plane prepare/cleanup sequence: + * + * 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state. + * + * 2. Do any other steps that might fail. + * + * 3. Put the staged state into the current state pointers with this function. + * + * 4. Actually commit the hardware state. + * + * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3 + * contains the old state. Also do any other cleanup required with that state. + * + * @stall must be set when nonblocking commits for this driver directly access + * the &drm_plane.state, &drm_crtc.state or &drm_connector.state pointer. With + * the current atomic helpers this is almost always the case, since the helpers + * don't pass the right state structures to the callbacks. + * + * Returns: + * + * Returns 0 on success. Can return -ERESTARTSYS when @stall is true and the + * waiting for the previous commits has been interrupted. + */ +int drm_atomic_helper_swap_state(struct drm_atomic_state *state, + bool stall) +{ + int i, ret; + struct drm_connector *connector; + struct drm_connector_state *old_conn_state, *new_conn_state; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct drm_plane *plane; + struct drm_plane_state *old_plane_state, *new_plane_state; + struct drm_crtc_commit *commit; + struct drm_private_obj *obj; + struct drm_private_state *old_obj_state, *new_obj_state; + + if (stall) { + /* + * We have to stall for hw_done here before + * drm_atomic_helper_wait_for_dependencies() because flip + * depth > 1 is not yet supported by all drivers. As long as + * obj->state is directly dereferenced anywhere in the drivers + * atomic_commit_tail function, then it's unsafe to swap state + * before drm_atomic_helper_commit_hw_done() is called. + */ + + for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { + commit = old_crtc_state->commit; + + if (!commit) + continue; + + ret = wait_for_completion_interruptible(&commit->hw_done); + if (ret) + return ret; + } + + for_each_old_connector_in_state(state, connector, old_conn_state, i) { + commit = old_conn_state->commit; + + if (!commit) + continue; + + ret = wait_for_completion_interruptible(&commit->hw_done); + if (ret) + return ret; + } + + for_each_old_plane_in_state(state, plane, old_plane_state, i) { + commit = old_plane_state->commit; + + if (!commit) + continue; + + ret = wait_for_completion_interruptible(&commit->hw_done); + if (ret) + return ret; + } + } + + for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) { + WARN_ON(connector->state != old_conn_state); + + old_conn_state->state = state; + new_conn_state->state = NULL; + + state->connectors[i].state = old_conn_state; + connector->state = new_conn_state; + } + + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + WARN_ON(crtc->state != old_crtc_state); + + old_crtc_state->state = state; + new_crtc_state->state = NULL; + + state->crtcs[i].state = old_crtc_state; + crtc->state = new_crtc_state; + + if (new_crtc_state->commit) { + spin_lock(&crtc->commit_lock); + list_add(&new_crtc_state->commit->commit_entry, + &crtc->commit_list); + spin_unlock(&crtc->commit_lock); + + new_crtc_state->commit->event = NULL; + } + } + + for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { + WARN_ON(plane->state != old_plane_state); + + old_plane_state->state = state; + new_plane_state->state = NULL; + + state->planes[i].state = old_plane_state; + plane->state = new_plane_state; + } + + for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) { + WARN_ON(obj->state != old_obj_state); + + old_obj_state->state = state; + new_obj_state->state = NULL; + + state->private_objs[i].state = old_obj_state; + obj->state = new_obj_state; + } + + return 0; +} +EXPORT_SYMBOL(drm_atomic_helper_swap_state); + +/** + * drm_atomic_helper_update_plane - Helper for primary plane update using atomic + * @plane: plane object to update + * @crtc: owning CRTC of owning plane + * @fb: framebuffer to flip onto plane + * @crtc_x: x offset of primary plane on crtc + * @crtc_y: y offset of primary plane on crtc + * @crtc_w: width of primary plane rectangle on crtc + * @crtc_h: height of primary plane rectangle on crtc + * @src_x: x offset of @fb for panning + * @src_y: y offset of @fb for panning + * @src_w: width of source rectangle in @fb + * @src_h: height of source rectangle in @fb + * @ctx: lock acquire context + * + * Provides a default plane update handler using the atomic driver interface. + * + * RETURNS: + * Zero on success, error code on failure + */ +int drm_atomic_helper_update_plane(struct drm_plane *plane, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_atomic_state *state; + struct drm_plane_state *plane_state; + int ret = 0; + + state = drm_atomic_state_alloc(plane->dev); + if (!state) + return -ENOMEM; + + state->acquire_ctx = ctx; + plane_state = drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) { + ret = PTR_ERR(plane_state); + goto fail; + } + + ret = drm_atomic_set_crtc_for_plane(plane_state, crtc); + if (ret != 0) + goto fail; + drm_atomic_set_fb_for_plane(plane_state, fb); + plane_state->crtc_x = crtc_x; + plane_state->crtc_y = crtc_y; + plane_state->crtc_w = crtc_w; + plane_state->crtc_h = crtc_h; + plane_state->src_x = src_x; + plane_state->src_y = src_y; + plane_state->src_w = src_w; + plane_state->src_h = src_h; + + if (plane == crtc->cursor) + state->legacy_cursor_update = true; + + ret = drm_atomic_commit(state); +fail: + drm_atomic_state_put(state); + return ret; +} +EXPORT_SYMBOL(drm_atomic_helper_update_plane); + +/** + * drm_atomic_helper_disable_plane - Helper for primary plane disable using * atomic + * @plane: plane to disable + * @ctx: lock acquire context + * + * Provides a default plane disable handler using the atomic driver interface. + * + * RETURNS: + * Zero on success, error code on failure + */ +int drm_atomic_helper_disable_plane(struct drm_plane *plane, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_atomic_state *state; + struct drm_plane_state *plane_state; + int ret = 0; + + state = drm_atomic_state_alloc(plane->dev); + if (!state) + return -ENOMEM; + + state->acquire_ctx = ctx; + plane_state = drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) { + ret = PTR_ERR(plane_state); + goto fail; + } + + if (plane_state->crtc && plane_state->crtc->cursor == plane) + plane_state->state->legacy_cursor_update = true; + + ret = __drm_atomic_helper_disable_plane(plane, plane_state); + if (ret != 0) + goto fail; + + ret = drm_atomic_commit(state); +fail: + drm_atomic_state_put(state); + return ret; +} +EXPORT_SYMBOL(drm_atomic_helper_disable_plane); + +/** + * drm_atomic_helper_set_config - set a new config from userspace + * @set: mode set configuration + * @ctx: lock acquisition context + * + * Provides a default crtc set_config handler using the atomic driver interface. + * + * NOTE: For backwards compatibility with old userspace this automatically + * resets the "link-status" property to GOOD, to force any link + * re-training. The SETCRTC ioctl does not define whether an update does + * need a full modeset or just a plane update, hence we're allowed to do + * that. See also drm_connector_set_link_status_property(). + * + * Returns: + * Returns 0 on success, negative errno numbers on failure. + */ +int drm_atomic_helper_set_config(struct drm_mode_set *set, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_atomic_state *state; + struct drm_crtc *crtc = set->crtc; + int ret = 0; + + state = drm_atomic_state_alloc(crtc->dev); + if (!state) + return -ENOMEM; + + state->acquire_ctx = ctx; + ret = __drm_atomic_helper_set_config(set, state); + if (ret != 0) + goto fail; + + ret = handle_conflicting_encoders(state, true); + if (ret) + return ret; + + ret = drm_atomic_commit(state); + +fail: + drm_atomic_state_put(state); + return ret; +} +EXPORT_SYMBOL(drm_atomic_helper_set_config); + +/** + * drm_atomic_helper_disable_all - disable all currently active outputs + * @dev: DRM device + * @ctx: lock acquisition context + * + * Loops through all connectors, finding those that aren't turned off and then + * turns them off by setting their DPMS mode to OFF and deactivating the CRTC + * that they are connected to. + * + * This is used for example in suspend/resume to disable all currently active + * functions when suspending. If you just want to shut down everything at e.g. + * driver unload, look at drm_atomic_helper_shutdown(). + * + * Note that if callers haven't already acquired all modeset locks this might + * return -EDEADLK, which must be handled by calling drm_modeset_backoff(). + * + * Returns: + * 0 on success or a negative error code on failure. + * + * See also: + * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and + * drm_atomic_helper_shutdown(). + */ +int drm_atomic_helper_disable_all(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_atomic_state *state; + struct drm_connector_state *conn_state; + struct drm_connector *conn; + struct drm_plane_state *plane_state; + struct drm_plane *plane; + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + int ret, i; + + state = drm_atomic_state_alloc(dev); + if (!state) + return -ENOMEM; + + state->acquire_ctx = ctx; + + drm_for_each_crtc(crtc, dev) { + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) { + ret = PTR_ERR(crtc_state); + goto free; + } + + crtc_state->active = false; + + ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL); + if (ret < 0) + goto free; + + ret = drm_atomic_add_affected_planes(state, crtc); + if (ret < 0) + goto free; + + ret = drm_atomic_add_affected_connectors(state, crtc); + if (ret < 0) + goto free; + } + + for_each_new_connector_in_state(state, conn, conn_state, i) { + ret = drm_atomic_set_crtc_for_connector(conn_state, NULL); + if (ret < 0) + goto free; + } + + for_each_new_plane_in_state(state, plane, plane_state, i) { + ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); + if (ret < 0) + goto free; + + drm_atomic_set_fb_for_plane(plane_state, NULL); + } + + ret = drm_atomic_commit(state); +free: + drm_atomic_state_put(state); + return ret; +} +EXPORT_SYMBOL(drm_atomic_helper_disable_all); + +/** + * drm_atomic_helper_shutdown - shutdown all CRTC + * @dev: DRM device + * + * This shuts down all CRTC, which is useful for driver unloading. Shutdown on + * suspend should instead be handled with drm_atomic_helper_suspend(), since + * that also takes a snapshot of the modeset state to be restored on resume. + * + * This is just a convenience wrapper around drm_atomic_helper_disable_all(), + * and it is the atomic version of drm_crtc_force_disable_all(). + */ +void drm_atomic_helper_shutdown(struct drm_device *dev) +{ + struct drm_modeset_acquire_ctx ctx; + int ret; + + DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret); + + ret = drm_atomic_helper_disable_all(dev, &ctx); + if (ret) + DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret); + + DRM_MODESET_LOCK_ALL_END(ctx, ret); +} +EXPORT_SYMBOL(drm_atomic_helper_shutdown); + +/** + * drm_atomic_helper_duplicate_state - duplicate an atomic state object + * @dev: DRM device + * @ctx: lock acquisition context + * + * Makes a copy of the current atomic state by looping over all objects and + * duplicating their respective states. This is used for example by suspend/ + * resume support code to save the state prior to suspend such that it can + * be restored upon resume. + * + * Note that this treats atomic state as persistent between save and restore. + * Drivers must make sure that this is possible and won't result in confusion + * or erroneous behaviour. + * + * Note that if callers haven't already acquired all modeset locks this might + * return -EDEADLK, which must be handled by calling drm_modeset_backoff(). + * + * Returns: + * A pointer to the copy of the atomic state object on success or an + * ERR_PTR()-encoded error code on failure. + * + * See also: + * drm_atomic_helper_suspend(), drm_atomic_helper_resume() + */ +struct drm_atomic_state * +drm_atomic_helper_duplicate_state(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_atomic_state *state; + struct drm_connector *conn; + struct drm_connector_list_iter conn_iter; + struct drm_plane *plane; + struct drm_crtc *crtc; + int err = 0; + + state = drm_atomic_state_alloc(dev); + if (!state) + return ERR_PTR(-ENOMEM); + + state->acquire_ctx = ctx; + state->duplicated = true; + + drm_for_each_crtc(crtc, dev) { + struct drm_crtc_state *crtc_state; + + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) { + err = PTR_ERR(crtc_state); + goto free; + } + } + + drm_for_each_plane(plane, dev) { + struct drm_plane_state *plane_state; + + plane_state = drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) { + err = PTR_ERR(plane_state); + goto free; + } + } + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(conn, &conn_iter) { + struct drm_connector_state *conn_state; + + conn_state = drm_atomic_get_connector_state(state, conn); + if (IS_ERR(conn_state)) { + err = PTR_ERR(conn_state); + drm_connector_list_iter_end(&conn_iter); + goto free; + } + } + drm_connector_list_iter_end(&conn_iter); + + /* clear the acquire context so that it isn't accidentally reused */ + state->acquire_ctx = NULL; + +free: + if (err < 0) { + drm_atomic_state_put(state); + state = ERR_PTR(err); + } + + return state; +} +EXPORT_SYMBOL(drm_atomic_helper_duplicate_state); + +/** + * drm_atomic_helper_suspend - subsystem-level suspend helper + * @dev: DRM device + * + * Duplicates the current atomic state, disables all active outputs and then + * returns a pointer to the original atomic state to the caller. Drivers can + * pass this pointer to the drm_atomic_helper_resume() helper upon resume to + * restore the output configuration that was active at the time the system + * entered suspend. + * + * Note that it is potentially unsafe to use this. The atomic state object + * returned by this function is assumed to be persistent. Drivers must ensure + * that this holds true. Before calling this function, drivers must make sure + * to suspend fbdev emulation so that nothing can be using the device. + * + * Returns: + * A pointer to a copy of the state before suspend on success or an ERR_PTR()- + * encoded error code on failure. Drivers should store the returned atomic + * state object and pass it to the drm_atomic_helper_resume() helper upon + * resume. + * + * See also: + * drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(), + * drm_atomic_helper_resume(), drm_atomic_helper_commit_duplicated_state() + */ +struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev) +{ + struct drm_modeset_acquire_ctx ctx; + struct drm_atomic_state *state; + int err; + + /* This can never be returned, but it makes the compiler happy */ + state = ERR_PTR(-EINVAL); + + DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err); + + state = drm_atomic_helper_duplicate_state(dev, &ctx); + if (IS_ERR(state)) + goto unlock; + + err = drm_atomic_helper_disable_all(dev, &ctx); + if (err < 0) { + drm_atomic_state_put(state); + state = ERR_PTR(err); + goto unlock; + } + +unlock: + DRM_MODESET_LOCK_ALL_END(ctx, err); + if (err) + return ERR_PTR(err); + + return state; +} +EXPORT_SYMBOL(drm_atomic_helper_suspend); + +/** + * drm_atomic_helper_commit_duplicated_state - commit duplicated state + * @state: duplicated atomic state to commit + * @ctx: pointer to acquire_ctx to use for commit. + * + * The state returned by drm_atomic_helper_duplicate_state() and + * drm_atomic_helper_suspend() is partially invalid, and needs to + * be fixed up before commit. + * + * Returns: + * 0 on success or a negative error code on failure. + * + * See also: + * drm_atomic_helper_suspend() + */ +int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state, + struct drm_modeset_acquire_ctx *ctx) +{ + int i, ret; + struct drm_plane *plane; + struct drm_plane_state *new_plane_state; + struct drm_connector *connector; + struct drm_connector_state *new_conn_state; + struct drm_crtc *crtc; + struct drm_crtc_state *new_crtc_state; + + state->acquire_ctx = ctx; + + for_each_new_plane_in_state(state, plane, new_plane_state, i) + state->planes[i].old_state = plane->state; + + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) + state->crtcs[i].old_state = crtc->state; + + for_each_new_connector_in_state(state, connector, new_conn_state, i) + state->connectors[i].old_state = connector->state; + + ret = drm_atomic_commit(state); + + state->acquire_ctx = NULL; + + return ret; +} +EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state); + +/** + * drm_atomic_helper_resume - subsystem-level resume helper + * @dev: DRM device + * @state: atomic state to resume to + * + * Calls drm_mode_config_reset() to synchronize hardware and software states, + * grabs all modeset locks and commits the atomic state object. This can be + * used in conjunction with the drm_atomic_helper_suspend() helper to + * implement suspend/resume for drivers that support atomic mode-setting. + * + * Returns: + * 0 on success or a negative error code on failure. + * + * See also: + * drm_atomic_helper_suspend() + */ +int drm_atomic_helper_resume(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct drm_modeset_acquire_ctx ctx; + int err; + + drm_mode_config_reset(dev); + + DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err); + + err = drm_atomic_helper_commit_duplicated_state(state, &ctx); + + DRM_MODESET_LOCK_ALL_END(ctx, err); + drm_atomic_state_put(state); + + return err; +} +EXPORT_SYMBOL(drm_atomic_helper_resume); + +static int page_flip_common(struct drm_atomic_state *state, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags) +{ + struct drm_plane *plane = crtc->primary; + struct drm_plane_state *plane_state; + struct drm_crtc_state *crtc_state; + int ret = 0; + + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + crtc_state->event = event; + crtc_state->async_flip = flags & DRM_MODE_PAGE_FLIP_ASYNC; + + plane_state = drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) + return PTR_ERR(plane_state); + + ret = drm_atomic_set_crtc_for_plane(plane_state, crtc); + if (ret != 0) + return ret; + drm_atomic_set_fb_for_plane(plane_state, fb); + + /* Make sure we don't accidentally do a full modeset. */ + state->allow_modeset = false; + if (!crtc_state->active) { + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled, rejecting legacy flip\n", + crtc->base.id, crtc->name); + return -EINVAL; + } + + return ret; +} + +/** + * drm_atomic_helper_page_flip - execute a legacy page flip + * @crtc: DRM crtc + * @fb: DRM framebuffer + * @event: optional DRM event to signal upon completion + * @flags: flip flags for non-vblank sync'ed updates + * @ctx: lock acquisition context + * + * Provides a default &drm_crtc_funcs.page_flip implementation + * using the atomic driver interface. + * + * Returns: + * Returns 0 on success, negative errno numbers on failure. + * + * See also: + * drm_atomic_helper_page_flip_target() + */ +int drm_atomic_helper_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_plane *plane = crtc->primary; + struct drm_atomic_state *state; + int ret = 0; + + state = drm_atomic_state_alloc(plane->dev); + if (!state) + return -ENOMEM; + + state->acquire_ctx = ctx; + + ret = page_flip_common(state, crtc, fb, event, flags); + if (ret != 0) + goto fail; + + ret = drm_atomic_nonblocking_commit(state); +fail: + drm_atomic_state_put(state); + return ret; +} +EXPORT_SYMBOL(drm_atomic_helper_page_flip); + +/** + * drm_atomic_helper_page_flip_target - do page flip on target vblank period. + * @crtc: DRM crtc + * @fb: DRM framebuffer + * @event: optional DRM event to signal upon completion + * @flags: flip flags for non-vblank sync'ed updates + * @target: specifying the target vblank period when the flip to take effect + * @ctx: lock acquisition context + * + * Provides a default &drm_crtc_funcs.page_flip_target implementation. + * Similar to drm_atomic_helper_page_flip() with extra parameter to specify + * target vblank period to flip. + * + * Returns: + * Returns 0 on success, negative errno numbers on failure. + */ +int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags, + uint32_t target, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_plane *plane = crtc->primary; + struct drm_atomic_state *state; + struct drm_crtc_state *crtc_state; + int ret = 0; + + state = drm_atomic_state_alloc(plane->dev); + if (!state) + return -ENOMEM; + + state->acquire_ctx = ctx; + + ret = page_flip_common(state, crtc, fb, event, flags); + if (ret != 0) + goto fail; + + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + if (WARN_ON(!crtc_state)) { + ret = -EINVAL; + goto fail; + } + crtc_state->target_vblank = target; + + ret = drm_atomic_nonblocking_commit(state); +fail: + drm_atomic_state_put(state); + return ret; +} +EXPORT_SYMBOL(drm_atomic_helper_page_flip_target); + +/** + * drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table + * @crtc: CRTC object + * @red: red correction table + * @green: green correction table + * @blue: green correction table + * @size: size of the tables + * @ctx: lock acquire context + * + * Implements support for legacy gamma correction table for drivers + * that support color management through the DEGAMMA_LUT/GAMMA_LUT + * properties. See drm_crtc_enable_color_mgmt() and the containing chapter for + * how the atomic color management and gamma tables work. + */ +int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, + u16 *red, u16 *green, u16 *blue, + uint32_t size, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_device *dev = crtc->dev; + struct drm_atomic_state *state; + struct drm_crtc_state *crtc_state; + struct drm_property_blob *blob = NULL; + struct drm_color_lut *blob_data; + int i, ret = 0; + bool replaced; + + state = drm_atomic_state_alloc(crtc->dev); + if (!state) + return -ENOMEM; + + blob = drm_property_create_blob(dev, + sizeof(struct drm_color_lut) * size, + NULL); + if (IS_ERR(blob)) { + ret = PTR_ERR(blob); + blob = NULL; + goto fail; + } + + /* Prepare GAMMA_LUT with the legacy values. */ + blob_data = blob->data; + for (i = 0; i < size; i++) { + blob_data[i].red = red[i]; + blob_data[i].green = green[i]; + blob_data[i].blue = blue[i]; + } + + state->acquire_ctx = ctx; + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) { + ret = PTR_ERR(crtc_state); + goto fail; + } + + /* Reset DEGAMMA_LUT and CTM properties. */ + replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL); + replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL); + replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob); + crtc_state->color_mgmt_changed |= replaced; + + ret = drm_atomic_commit(state); + +fail: + drm_atomic_state_put(state); + drm_property_blob_put(blob); + return ret; +} +EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set); Index: sys/dev/drm/core/drm_atomic_state_helper.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_atomic_state_helper.c @@ -0,0 +1,501 @@ +/* + * Copyright (C) 2018 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark + * Daniel Vetter + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/** + * DOC: atomic state reset and initialization + * + * Both the drm core and the atomic helpers assume that there is always the full + * and correct atomic software state for all connectors, CRTCs and planes + * available. Which is a bit a problem on driver load and also after system + * suspend. One way to solve this is to have a hardware state read-out + * infrastructure which reconstructs the full software state (e.g. the i915 + * driver). + * + * The simpler solution is to just reset the software state to everything off, + * which is easiest to do by calling drm_mode_config_reset(). To facilitate this + * the atomic helpers provide default reset implementations for all hooks. + * + * On the upside the precise state tracking of atomic simplifies system suspend + * and resume a lot. For drivers using drm_mode_config_reset() a complete recipe + * is implemented in drm_atomic_helper_suspend() and drm_atomic_helper_resume(). + * For other drivers the building blocks are split out, see the documentation + * for these functions. + */ + +/** + * __drm_atomic_helper_crtc_reset - reset state on CRTC + * @crtc: drm CRTC + * @crtc_state: CRTC state to assign + * + * Initializes the newly allocated @crtc_state and assigns it to + * the &drm_crtc->state pointer of @crtc, usually required when + * initializing the drivers or when called from the &drm_crtc_funcs.reset + * hook. + * + * This is useful for drivers that subclass the CRTC state. + */ +void +__drm_atomic_helper_crtc_reset(struct drm_crtc *crtc, + struct drm_crtc_state *crtc_state) +{ + if (crtc_state) + crtc_state->crtc = crtc; + + crtc->state = crtc_state; +} +EXPORT_SYMBOL(__drm_atomic_helper_crtc_reset); + +/** + * drm_atomic_helper_crtc_reset - default &drm_crtc_funcs.reset hook for CRTCs + * @crtc: drm CRTC + * + * Resets the atomic state for @crtc by freeing the state pointer (which might + * be NULL, e.g. at driver load time) and allocating a new empty state object. + */ +void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc) +{ + struct drm_crtc_state *crtc_state = + kzalloc(sizeof(*crtc->state), GFP_KERNEL); + + if (crtc->state) + crtc->funcs->atomic_destroy_state(crtc, crtc->state); + + __drm_atomic_helper_crtc_reset(crtc, crtc_state); +} +EXPORT_SYMBOL(drm_atomic_helper_crtc_reset); + +/** + * __drm_atomic_helper_crtc_duplicate_state - copy atomic CRTC state + * @crtc: CRTC object + * @state: atomic CRTC state + * + * Copies atomic state from a CRTC's current state and resets inferred values. + * This is useful for drivers that subclass the CRTC state. + */ +void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + memcpy(state, crtc->state, sizeof(*state)); + + if (state->mode_blob) + drm_property_blob_get(state->mode_blob); + if (state->degamma_lut) + drm_property_blob_get(state->degamma_lut); + if (state->ctm) + drm_property_blob_get(state->ctm); + if (state->gamma_lut) + drm_property_blob_get(state->gamma_lut); + state->mode_changed = false; + state->active_changed = false; + state->planes_changed = false; + state->connectors_changed = false; + state->color_mgmt_changed = false; + state->zpos_changed = false; + state->commit = NULL; + state->event = NULL; + state->async_flip = false; + + /* Self refresh should be canceled when a new update is available */ + state->active = drm_atomic_crtc_effectively_active(state); + state->self_refresh_active = false; +} +EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state); + +/** + * drm_atomic_helper_crtc_duplicate_state - default state duplicate hook + * @crtc: drm CRTC + * + * Default CRTC state duplicate hook for drivers which don't have their own + * subclassed CRTC state structure. + */ +struct drm_crtc_state * +drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc) +{ + struct drm_crtc_state *state; + + if (WARN_ON(!crtc->state)) + return NULL; + + state = kmalloc(sizeof(*state), GFP_KERNEL); + if (state) + __drm_atomic_helper_crtc_duplicate_state(crtc, state); + + return state; +} +EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state); + +/** + * __drm_atomic_helper_crtc_destroy_state - release CRTC state + * @state: CRTC state object to release + * + * Releases all resources stored in the CRTC state without actually freeing + * the memory of the CRTC state. This is useful for drivers that subclass the + * CRTC state. + */ +void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state) +{ + if (state->commit) { + /* + * In the event that a non-blocking commit returns + * -ERESTARTSYS before the commit_tail work is queued, we will + * have an extra reference to the commit object. Release it, if + * the event has not been consumed by the worker. + * + * state->event may be freed, so we can't directly look at + * state->event->base.completion. + */ + if (state->event && state->commit->abort_completion) + drm_crtc_commit_put(state->commit); + + kfree(state->commit->event); + state->commit->event = NULL; + + drm_crtc_commit_put(state->commit); + } + + drm_property_blob_put(state->mode_blob); + drm_property_blob_put(state->degamma_lut); + drm_property_blob_put(state->ctm); + drm_property_blob_put(state->gamma_lut); +} +EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state); + +/** + * drm_atomic_helper_crtc_destroy_state - default state destroy hook + * @crtc: drm CRTC + * @state: CRTC state object to release + * + * Default CRTC state destroy hook for drivers which don't have their own + * subclassed CRTC state structure. + */ +void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + __drm_atomic_helper_crtc_destroy_state(state); + kfree(state); +} +EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state); + +/** + * __drm_atomic_helper_plane_reset - resets planes state to default values + * @plane: plane object, must not be NULL + * @state: atomic plane state, must not be NULL + * + * Initializes plane state to default. This is useful for drivers that subclass + * the plane state. + */ +void __drm_atomic_helper_plane_reset(struct drm_plane *plane, + struct drm_plane_state *state) +{ + state->plane = plane; + state->rotation = DRM_MODE_ROTATE_0; + + state->alpha = DRM_BLEND_ALPHA_OPAQUE; + state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI; + + plane->state = state; +} +EXPORT_SYMBOL(__drm_atomic_helper_plane_reset); + +/** + * drm_atomic_helper_plane_reset - default &drm_plane_funcs.reset hook for planes + * @plane: drm plane + * + * Resets the atomic state for @plane by freeing the state pointer (which might + * be NULL, e.g. at driver load time) and allocating a new empty state object. + */ +void drm_atomic_helper_plane_reset(struct drm_plane *plane) +{ + if (plane->state) + __drm_atomic_helper_plane_destroy_state(plane->state); + + kfree(plane->state); + plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL); + if (plane->state) + __drm_atomic_helper_plane_reset(plane, plane->state); +} +EXPORT_SYMBOL(drm_atomic_helper_plane_reset); + +/** + * __drm_atomic_helper_plane_duplicate_state - copy atomic plane state + * @plane: plane object + * @state: atomic plane state + * + * Copies atomic state from a plane's current state. This is useful for + * drivers that subclass the plane state. + */ +void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + memcpy(state, plane->state, sizeof(*state)); + + if (state->fb) + drm_framebuffer_get(state->fb); + + state->fence = NULL; + state->commit = NULL; + state->fb_damage_clips = NULL; +} +EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state); + +/** + * drm_atomic_helper_plane_duplicate_state - default state duplicate hook + * @plane: drm plane + * + * Default plane state duplicate hook for drivers which don't have their own + * subclassed plane state structure. + */ +struct drm_plane_state * +drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane) +{ + struct drm_plane_state *state; + + if (WARN_ON(!plane->state)) + return NULL; + + state = kmalloc(sizeof(*state), GFP_KERNEL); + if (state) + __drm_atomic_helper_plane_duplicate_state(plane, state); + + return state; +} +EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state); + +/** + * __drm_atomic_helper_plane_destroy_state - release plane state + * @state: plane state object to release + * + * Releases all resources stored in the plane state without actually freeing + * the memory of the plane state. This is useful for drivers that subclass the + * plane state. + */ +void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state) +{ + if (state->fb) + drm_framebuffer_put(state->fb); + + if (state->fence) + dma_fence_put(state->fence); + + if (state->commit) + drm_crtc_commit_put(state->commit); + + drm_property_blob_put(state->fb_damage_clips); +} +EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state); + +/** + * drm_atomic_helper_plane_destroy_state - default state destroy hook + * @plane: drm plane + * @state: plane state object to release + * + * Default plane state destroy hook for drivers which don't have their own + * subclassed plane state structure. + */ +void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + __drm_atomic_helper_plane_destroy_state(state); + kfree(state); +} +EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state); + +/** + * __drm_atomic_helper_connector_reset - reset state on connector + * @connector: drm connector + * @conn_state: connector state to assign + * + * Initializes the newly allocated @conn_state and assigns it to + * the &drm_connector->state pointer of @connector, usually required when + * initializing the drivers or when called from the &drm_connector_funcs.reset + * hook. + * + * This is useful for drivers that subclass the connector state. + */ +void +__drm_atomic_helper_connector_reset(struct drm_connector *connector, + struct drm_connector_state *conn_state) +{ + if (conn_state) + conn_state->connector = connector; + + connector->state = conn_state; +} +EXPORT_SYMBOL(__drm_atomic_helper_connector_reset); + +/** + * drm_atomic_helper_connector_reset - default &drm_connector_funcs.reset hook for connectors + * @connector: drm connector + * + * Resets the atomic state for @connector by freeing the state pointer (which + * might be NULL, e.g. at driver load time) and allocating a new empty state + * object. + */ +void drm_atomic_helper_connector_reset(struct drm_connector *connector) +{ + struct drm_connector_state *conn_state = + kzalloc(sizeof(*conn_state), GFP_KERNEL); + + if (connector->state) + __drm_atomic_helper_connector_destroy_state(connector->state); + + kfree(connector->state); + __drm_atomic_helper_connector_reset(connector, conn_state); +} +EXPORT_SYMBOL(drm_atomic_helper_connector_reset); + +/** + * drm_atomic_helper_connector_tv_reset - Resets TV connector properties + * @connector: DRM connector + * + * Resets the TV-related properties attached to a connector. + */ +void drm_atomic_helper_connector_tv_reset(struct drm_connector *connector) +{ + struct drm_cmdline_mode *cmdline = &connector->cmdline_mode; + struct drm_connector_state *state = connector->state; + + state->tv.margins.left = cmdline->tv_margins.left; + state->tv.margins.right = cmdline->tv_margins.right; + state->tv.margins.top = cmdline->tv_margins.top; + state->tv.margins.bottom = cmdline->tv_margins.bottom; +} +EXPORT_SYMBOL(drm_atomic_helper_connector_tv_reset); + +/** + * __drm_atomic_helper_connector_duplicate_state - copy atomic connector state + * @connector: connector object + * @state: atomic connector state + * + * Copies atomic state from a connector's current state. This is useful for + * drivers that subclass the connector state. + */ +void +__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector, + struct drm_connector_state *state) +{ + memcpy(state, connector->state, sizeof(*state)); + if (state->crtc) + drm_connector_get(connector); + state->commit = NULL; + + if (state->hdr_output_metadata) + drm_property_blob_get(state->hdr_output_metadata); + + /* Don't copy over a writeback job, they are used only once */ + state->writeback_job = NULL; +} +EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state); + +/** + * drm_atomic_helper_connector_duplicate_state - default state duplicate hook + * @connector: drm connector + * + * Default connector state duplicate hook for drivers which don't have their own + * subclassed connector state structure. + */ +struct drm_connector_state * +drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector) +{ + struct drm_connector_state *state; + + if (WARN_ON(!connector->state)) + return NULL; + + state = kmalloc(sizeof(*state), GFP_KERNEL); + if (state) + __drm_atomic_helper_connector_duplicate_state(connector, state); + + return state; +} +EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state); + +/** + * __drm_atomic_helper_connector_destroy_state - release connector state + * @state: connector state object to release + * + * Releases all resources stored in the connector state without actually + * freeing the memory of the connector state. This is useful for drivers that + * subclass the connector state. + */ +void +__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state) +{ + if (state->crtc) + drm_connector_put(state->connector); + + if (state->commit) + drm_crtc_commit_put(state->commit); + + if (state->writeback_job) + drm_writeback_cleanup_job(state->writeback_job); + + drm_property_blob_put(state->hdr_output_metadata); +} +EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state); + +/** + * drm_atomic_helper_connector_destroy_state - default state destroy hook + * @connector: drm connector + * @state: connector state object to release + * + * Default connector state destroy hook for drivers which don't have their own + * subclassed connector state structure. + */ +void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, + struct drm_connector_state *state) +{ + __drm_atomic_helper_connector_destroy_state(state); + kfree(state); +} +EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state); + +/** + * __drm_atomic_helper_private_duplicate_state - copy atomic private state + * @obj: CRTC object + * @state: new private object state + * + * Copies atomic state from a private objects's current state and resets inferred values. + * This is useful for drivers that subclass the private state. + */ +void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + memcpy(state, obj->state, sizeof(*state)); +} +EXPORT_SYMBOL(__drm_atomic_helper_private_obj_duplicate_state); Index: sys/dev/drm/core/drm_atomic_uapi.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_atomic_uapi.c @@ -0,0 +1,1430 @@ +/* + * Copyright (C) 2014 Red Hat + * Copyright (C) 2014 Intel Corp. + * Copyright (C) 2018 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark + * Daniel Vetter + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "drm_crtc_internal.h" + +/** + * DOC: overview + * + * This file contains the marshalling and demarshalling glue for the atomic UAPI + * in all its forms: The monster ATOMIC IOCTL itself, code for GET_PROPERTY and + * SET_PROPERTY IOCTLs. Plus interface functions for compatibility helpers and + * drivers which have special needs to construct their own atomic updates, e.g. + * for load detect or similiar. + */ + +/** + * drm_atomic_set_mode_for_crtc - set mode for CRTC + * @state: the CRTC whose incoming state to update + * @mode: kernel-internal mode to use for the CRTC, or NULL to disable + * + * Set a mode (originating from the kernel) on the desired CRTC state and update + * the enable property. + * + * RETURNS: + * Zero on success, error code on failure. Cannot return -EDEADLK. + */ +int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state, + const struct drm_display_mode *mode) +{ + struct drm_crtc *crtc = state->crtc; + struct drm_mode_modeinfo umode; + + /* Early return for no change. */ + if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0) + return 0; + + drm_property_blob_put(state->mode_blob); + state->mode_blob = NULL; + + if (mode) { + drm_mode_convert_to_umode(&umode, mode); + state->mode_blob = + drm_property_create_blob(state->crtc->dev, + sizeof(umode), + &umode); + if (IS_ERR(state->mode_blob)) + return PTR_ERR(state->mode_blob); + + drm_mode_copy(&state->mode, mode); + state->enable = true; + DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n", + mode->name, crtc->base.id, crtc->name, state); + } else { + memset(&state->mode, 0, sizeof(state->mode)); + state->enable = false; + DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n", + crtc->base.id, crtc->name, state); + } + + return 0; +} +EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc); + +/** + * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC + * @state: the CRTC whose incoming state to update + * @blob: pointer to blob property to use for mode + * + * Set a mode (originating from a blob property) on the desired CRTC state. + * This function will take a reference on the blob property for the CRTC state, + * and release the reference held on the state's existing mode property, if any + * was set. + * + * RETURNS: + * Zero on success, error code on failure. Cannot return -EDEADLK. + */ +int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, + struct drm_property_blob *blob) +{ + struct drm_crtc *crtc = state->crtc; + + if (blob == state->mode_blob) + return 0; + + drm_property_blob_put(state->mode_blob); + state->mode_blob = NULL; + + memset(&state->mode, 0, sizeof(state->mode)); + + if (blob) { + int ret; + + if (blob->length != sizeof(struct drm_mode_modeinfo)) { + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] bad mode blob length: %zu\n", + crtc->base.id, crtc->name, + blob->length); + return -EINVAL; + } + + ret = drm_mode_convert_umode(crtc->dev, + &state->mode, blob->data); + if (ret) { + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n", + crtc->base.id, crtc->name, + ret, drm_get_mode_status_name(state->mode.status)); + drm_mode_debug_printmodeline(&state->mode); + return -EINVAL; + } + + state->mode_blob = drm_property_blob_get(blob); + state->enable = true; + DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n", + state->mode.name, crtc->base.id, crtc->name, + state); + } else { + state->enable = false; + DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n", + crtc->base.id, crtc->name, state); + } + + return 0; +} +EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc); + +/** + * drm_atomic_set_crtc_for_plane - set crtc for plane + * @plane_state: the plane whose incoming state to update + * @crtc: crtc to use for the plane + * + * Changing the assigned crtc for a plane requires us to grab the lock and state + * for the new crtc, as needed. This function takes care of all these details + * besides updating the pointer in the state object itself. + * + * Returns: + * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK + * then the w/w mutex code has detected a deadlock and the entire atomic + * sequence must be restarted. All other errors are fatal. + */ +int +drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, + struct drm_crtc *crtc) +{ + struct drm_plane *plane = plane_state->plane; + struct drm_crtc_state *crtc_state; + /* Nothing to do for same crtc*/ + if (plane_state->crtc == crtc) + return 0; + if (plane_state->crtc) { + crtc_state = drm_atomic_get_crtc_state(plane_state->state, + plane_state->crtc); + if (WARN_ON(IS_ERR(crtc_state))) + return PTR_ERR(crtc_state); + + crtc_state->plane_mask &= ~drm_plane_mask(plane); + } + + plane_state->crtc = crtc; + + if (crtc) { + crtc_state = drm_atomic_get_crtc_state(plane_state->state, + crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + crtc_state->plane_mask |= drm_plane_mask(plane); + } + + if (crtc) + DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [CRTC:%d:%s]\n", + plane->base.id, plane->name, plane_state, + crtc->base.id, crtc->name); + else + DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [NOCRTC]\n", + plane->base.id, plane->name, plane_state); + + return 0; +} +EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane); + +/** + * drm_atomic_set_fb_for_plane - set framebuffer for plane + * @plane_state: atomic state object for the plane + * @fb: fb to use for the plane + * + * Changing the assigned framebuffer for a plane requires us to grab a reference + * to the new fb and drop the reference to the old fb, if there is one. This + * function takes care of all these details besides updating the pointer in the + * state object itself. + */ +void +drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, + struct drm_framebuffer *fb) +{ + struct drm_plane *plane = plane_state->plane; + + if (fb) + DRM_DEBUG_ATOMIC("Set [FB:%d] for [PLANE:%d:%s] state %p\n", + fb->base.id, plane->base.id, plane->name, + plane_state); + else + DRM_DEBUG_ATOMIC("Set [NOFB] for [PLANE:%d:%s] state %p\n", + plane->base.id, plane->name, plane_state); + + drm_framebuffer_assign(&plane_state->fb, fb); +} +EXPORT_SYMBOL(drm_atomic_set_fb_for_plane); + +/** + * drm_atomic_set_fence_for_plane - set fence for plane + * @plane_state: atomic state object for the plane + * @fence: dma_fence to use for the plane + * + * Helper to setup the plane_state fence in case it is not set yet. + * By using this drivers doesn't need to worry if the user choose + * implicit or explicit fencing. + * + * This function will not set the fence to the state if it was set + * via explicit fencing interfaces on the atomic ioctl. In that case it will + * drop the reference to the fence as we are not storing it anywhere. + * Otherwise, if &drm_plane_state.fence is not set this function we just set it + * with the received implicit fence. In both cases this function consumes a + * reference for @fence. + * + * This way explicit fencing can be used to overrule implicit fencing, which is + * important to make explicit fencing use-cases work: One example is using one + * buffer for 2 screens with different refresh rates. Implicit fencing will + * clamp rendering to the refresh rate of the slower screen, whereas explicit + * fence allows 2 independent render and display loops on a single buffer. If a + * driver allows obeys both implicit and explicit fences for plane updates, then + * it will break all the benefits of explicit fencing. + */ +void +drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state, + struct dma_fence *fence) +{ + if (plane_state->fence) { + dma_fence_put(fence); + return; + } + + plane_state->fence = fence; +} +EXPORT_SYMBOL(drm_atomic_set_fence_for_plane); + +/** + * drm_atomic_set_crtc_for_connector - set crtc for connector + * @conn_state: atomic state object for the connector + * @crtc: crtc to use for the connector + * + * Changing the assigned crtc for a connector requires us to grab the lock and + * state for the new crtc, as needed. This function takes care of all these + * details besides updating the pointer in the state object itself. + * + * Returns: + * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK + * then the w/w mutex code has detected a deadlock and the entire atomic + * sequence must be restarted. All other errors are fatal. + */ +int +drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, + struct drm_crtc *crtc) +{ + struct drm_connector *connector = conn_state->connector; + struct drm_crtc_state *crtc_state; + + if (conn_state->crtc == crtc) + return 0; + + if (conn_state->crtc) { + crtc_state = drm_atomic_get_new_crtc_state(conn_state->state, + conn_state->crtc); + + crtc_state->connector_mask &= + ~drm_connector_mask(conn_state->connector); + + drm_connector_put(conn_state->connector); + conn_state->crtc = NULL; + } + + if (crtc) { + crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + crtc_state->connector_mask |= + drm_connector_mask(conn_state->connector); + + drm_connector_get(conn_state->connector); + conn_state->crtc = crtc; + + DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [CRTC:%d:%s]\n", + connector->base.id, connector->name, + conn_state, crtc->base.id, crtc->name); + } else { + DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [NOCRTC]\n", + connector->base.id, connector->name, + conn_state); + } + + return 0; +} +EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector); + +static void set_out_fence_for_crtc(struct drm_atomic_state *state, + struct drm_crtc *crtc, s32 __user *fence_ptr) +{ + state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr; +} + +static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, + struct drm_crtc *crtc) +{ + s32 __user *fence_ptr; + + fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr; + state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL; + + return fence_ptr; +} + +static int set_out_fence_for_connector(struct drm_atomic_state *state, + struct drm_connector *connector, + s32 __user *fence_ptr) +{ + unsigned int index = drm_connector_index(connector); + + if (!fence_ptr) + return 0; + + if (put_user(-1, fence_ptr)) + return -EFAULT; + + state->connectors[index].out_fence_ptr = fence_ptr; + + return 0; +} + +static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state, + struct drm_connector *connector) +{ + unsigned int index = drm_connector_index(connector); + s32 __user *fence_ptr; + + fence_ptr = state->connectors[index].out_fence_ptr; + state->connectors[index].out_fence_ptr = NULL; + + return fence_ptr; +} + +static int +drm_atomic_replace_property_blob_from_id(struct drm_device *dev, + struct drm_property_blob **blob, + uint64_t blob_id, + ssize_t expected_size, + ssize_t expected_elem_size, + bool *replaced) +{ + struct drm_property_blob *new_blob = NULL; + + if (blob_id != 0) { + new_blob = drm_property_lookup_blob(dev, blob_id); + if (new_blob == NULL) + return -EINVAL; + + if (expected_size > 0 && + new_blob->length != expected_size) { + drm_property_blob_put(new_blob); + return -EINVAL; + } + if (expected_elem_size > 0 && + new_blob->length % expected_elem_size != 0) { + drm_property_blob_put(new_blob); + return -EINVAL; + } + } + + *replaced |= drm_property_replace_blob(blob, new_blob); + drm_property_blob_put(new_blob); + + return 0; +} + +static int drm_atomic_crtc_set_property(struct drm_crtc *crtc, + struct drm_crtc_state *state, struct drm_property *property, + uint64_t val) +{ + struct drm_device *dev = crtc->dev; + struct drm_mode_config *config = &dev->mode_config; + bool replaced = false; + int ret; + + if (property == config->prop_active) + state->active = val; + else if (property == config->prop_mode_id) { + struct drm_property_blob *mode = + drm_property_lookup_blob(dev, val); + ret = drm_atomic_set_mode_prop_for_crtc(state, mode); + drm_property_blob_put(mode); + return ret; + } else if (property == config->prop_vrr_enabled) { + state->vrr_enabled = val; + } else if (property == config->degamma_lut_property) { + ret = drm_atomic_replace_property_blob_from_id(dev, + &state->degamma_lut, + val, + -1, sizeof(struct drm_color_lut), + &replaced); + state->color_mgmt_changed |= replaced; + return ret; + } else if (property == config->ctm_property) { + ret = drm_atomic_replace_property_blob_from_id(dev, + &state->ctm, + val, + sizeof(struct drm_color_ctm), -1, + &replaced); + state->color_mgmt_changed |= replaced; + return ret; + } else if (property == config->gamma_lut_property) { + ret = drm_atomic_replace_property_blob_from_id(dev, + &state->gamma_lut, + val, + -1, sizeof(struct drm_color_lut), + &replaced); + state->color_mgmt_changed |= replaced; + return ret; + } else if (property == config->prop_out_fence_ptr) { + s32 __user *fence_ptr = u64_to_user_ptr(val); + + if (!fence_ptr) + return 0; + + if (put_user(-1, fence_ptr)) + return -EFAULT; + + set_out_fence_for_crtc(state->state, crtc, fence_ptr); + } else if (crtc->funcs->atomic_set_property) { + return crtc->funcs->atomic_set_property(crtc, state, property, val); + } else { + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n", + crtc->base.id, crtc->name, + property->base.id, property->name); + return -EINVAL; + } + + return 0; +} + +static int +drm_atomic_crtc_get_property(struct drm_crtc *crtc, + const struct drm_crtc_state *state, + struct drm_property *property, uint64_t *val) +{ + struct drm_device *dev = crtc->dev; + struct drm_mode_config *config = &dev->mode_config; + + if (property == config->prop_active) + *val = drm_atomic_crtc_effectively_active(state); + else if (property == config->prop_mode_id) + *val = (state->mode_blob) ? state->mode_blob->base.id : 0; + else if (property == config->prop_vrr_enabled) + *val = state->vrr_enabled; + else if (property == config->degamma_lut_property) + *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0; + else if (property == config->ctm_property) + *val = (state->ctm) ? state->ctm->base.id : 0; + else if (property == config->gamma_lut_property) + *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0; + else if (property == config->prop_out_fence_ptr) + *val = 0; + else if (crtc->funcs->atomic_get_property) + return crtc->funcs->atomic_get_property(crtc, state, property, val); + else + return -EINVAL; + + return 0; +} + +static int drm_atomic_plane_set_property(struct drm_plane *plane, + struct drm_plane_state *state, struct drm_file *file_priv, + struct drm_property *property, uint64_t val) +{ + struct drm_device *dev = plane->dev; + struct drm_mode_config *config = &dev->mode_config; + bool replaced = false; + int ret; + + if (property == config->prop_fb_id) { + struct drm_framebuffer *fb; + fb = drm_framebuffer_lookup(dev, file_priv, val); + drm_atomic_set_fb_for_plane(state, fb); + if (fb) + drm_framebuffer_put(fb); + } else if (property == config->prop_in_fence_fd) { + if (state->fence) + return -EINVAL; + + if (U642I64(val) == -1) + return 0; + + state->fence = sync_file_get_fence(val); + if (!state->fence) + return -EINVAL; + + } else if (property == config->prop_crtc_id) { + struct drm_crtc *crtc = drm_crtc_find(dev, file_priv, val); + if (val && !crtc) + return -EACCES; + return drm_atomic_set_crtc_for_plane(state, crtc); + } else if (property == config->prop_crtc_x) { + state->crtc_x = U642I64(val); + } else if (property == config->prop_crtc_y) { + state->crtc_y = U642I64(val); + } else if (property == config->prop_crtc_w) { + state->crtc_w = val; + } else if (property == config->prop_crtc_h) { + state->crtc_h = val; + } else if (property == config->prop_src_x) { + state->src_x = val; + } else if (property == config->prop_src_y) { + state->src_y = val; + } else if (property == config->prop_src_w) { + state->src_w = val; + } else if (property == config->prop_src_h) { + state->src_h = val; + } else if (property == plane->alpha_property) { + state->alpha = val; + } else if (property == plane->blend_mode_property) { + state->pixel_blend_mode = val; + } else if (property == plane->rotation_property) { + if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) { + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] bad rotation bitmask: 0x%llx\n", + plane->base.id, plane->name, val); + return -EINVAL; + } + state->rotation = val; + } else if (property == plane->zpos_property) { + state->zpos = val; + } else if (property == plane->color_encoding_property) { + state->color_encoding = val; + } else if (property == plane->color_range_property) { + state->color_range = val; + } else if (property == config->prop_fb_damage_clips) { + ret = drm_atomic_replace_property_blob_from_id(dev, + &state->fb_damage_clips, + val, + -1, + sizeof(struct drm_rect), + &replaced); + return ret; + } else if (plane->funcs->atomic_set_property) { + return plane->funcs->atomic_set_property(plane, state, + property, val); + } else { + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n", + plane->base.id, plane->name, + property->base.id, property->name); + return -EINVAL; + } + + return 0; +} + +static int +drm_atomic_plane_get_property(struct drm_plane *plane, + const struct drm_plane_state *state, + struct drm_property *property, uint64_t *val) +{ + struct drm_device *dev = plane->dev; + struct drm_mode_config *config = &dev->mode_config; + + if (property == config->prop_fb_id) { + *val = (state->fb) ? state->fb->base.id : 0; + } else if (property == config->prop_in_fence_fd) { + *val = -1; + } else if (property == config->prop_crtc_id) { + *val = (state->crtc) ? state->crtc->base.id : 0; + } else if (property == config->prop_crtc_x) { + *val = I642U64(state->crtc_x); + } else if (property == config->prop_crtc_y) { + *val = I642U64(state->crtc_y); + } else if (property == config->prop_crtc_w) { + *val = state->crtc_w; + } else if (property == config->prop_crtc_h) { + *val = state->crtc_h; + } else if (property == config->prop_src_x) { + *val = state->src_x; + } else if (property == config->prop_src_y) { + *val = state->src_y; + } else if (property == config->prop_src_w) { + *val = state->src_w; + } else if (property == config->prop_src_h) { + *val = state->src_h; + } else if (property == plane->alpha_property) { + *val = state->alpha; + } else if (property == plane->blend_mode_property) { + *val = state->pixel_blend_mode; + } else if (property == plane->rotation_property) { + *val = state->rotation; + } else if (property == plane->zpos_property) { + *val = state->zpos; + } else if (property == plane->color_encoding_property) { + *val = state->color_encoding; + } else if (property == plane->color_range_property) { + *val = state->color_range; + } else if (property == config->prop_fb_damage_clips) { + *val = (state->fb_damage_clips) ? + state->fb_damage_clips->base.id : 0; + } else if (plane->funcs->atomic_get_property) { + return plane->funcs->atomic_get_property(plane, state, property, val); + } else { + return -EINVAL; + } + + return 0; +} + +static int drm_atomic_set_writeback_fb_for_connector( + struct drm_connector_state *conn_state, + struct drm_framebuffer *fb) +{ + int ret; + + ret = drm_writeback_set_fb(conn_state, fb); + if (ret < 0) + return ret; + + if (fb) + DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n", + fb->base.id, conn_state); + else + DRM_DEBUG_ATOMIC("Set [NOFB] for connector state %p\n", + conn_state); + + return 0; +} + +static int drm_atomic_connector_set_property(struct drm_connector *connector, + struct drm_connector_state *state, struct drm_file *file_priv, + struct drm_property *property, uint64_t val) +{ + struct drm_device *dev = connector->dev; + struct drm_mode_config *config = &dev->mode_config; + bool replaced = false; + int ret; + + if (property == config->prop_crtc_id) { + struct drm_crtc *crtc = drm_crtc_find(dev, file_priv, val); + if (val && !crtc) + return -EACCES; + return drm_atomic_set_crtc_for_connector(state, crtc); + } else if (property == config->dpms_property) { + /* setting DPMS property requires special handling, which + * is done in legacy setprop path for us. Disallow (for + * now?) atomic writes to DPMS property: + */ + return -EINVAL; + } else if (property == config->tv_select_subconnector_property) { + state->tv.subconnector = val; + } else if (property == config->tv_left_margin_property) { + state->tv.margins.left = val; + } else if (property == config->tv_right_margin_property) { + state->tv.margins.right = val; + } else if (property == config->tv_top_margin_property) { + state->tv.margins.top = val; + } else if (property == config->tv_bottom_margin_property) { + state->tv.margins.bottom = val; + } else if (property == config->tv_mode_property) { + state->tv.mode = val; + } else if (property == config->tv_brightness_property) { + state->tv.brightness = val; + } else if (property == config->tv_contrast_property) { + state->tv.contrast = val; + } else if (property == config->tv_flicker_reduction_property) { + state->tv.flicker_reduction = val; + } else if (property == config->tv_overscan_property) { + state->tv.overscan = val; + } else if (property == config->tv_saturation_property) { + state->tv.saturation = val; + } else if (property == config->tv_hue_property) { + state->tv.hue = val; + } else if (property == config->link_status_property) { + /* Never downgrade from GOOD to BAD on userspace's request here, + * only hw issues can do that. + * + * For an atomic property the userspace doesn't need to be able + * to understand all the properties, but needs to be able to + * restore the state it wants on VT switch. So if the userspace + * tries to change the link_status from GOOD to BAD, driver + * silently rejects it and returns a 0. This prevents userspace + * from accidently breaking the display when it restores the + * state. + */ + if (state->link_status != DRM_LINK_STATUS_GOOD) + state->link_status = val; + } else if (property == config->hdr_output_metadata_property) { + ret = drm_atomic_replace_property_blob_from_id(dev, + &state->hdr_output_metadata, + val, + sizeof(struct hdr_output_metadata), -1, + &replaced); + return ret; + } else if (property == config->aspect_ratio_property) { + state->picture_aspect_ratio = val; + } else if (property == config->content_type_property) { + state->content_type = val; + } else if (property == connector->scaling_mode_property) { + state->scaling_mode = val; + } else if (property == config->content_protection_property) { + if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) { + DRM_DEBUG_KMS("only drivers can set CP Enabled\n"); + return -EINVAL; + } + state->content_protection = val; + } else if (property == config->hdcp_content_type_property) { + state->hdcp_content_type = val; + } else if (property == connector->colorspace_property) { + state->colorspace = val; + } else if (property == config->writeback_fb_id_property) { + struct drm_framebuffer *fb; + int ret; + fb = drm_framebuffer_lookup(dev, file_priv, val); + ret = drm_atomic_set_writeback_fb_for_connector(state, fb); + if (fb) + drm_framebuffer_put(fb); + return ret; + } else if (property == config->writeback_out_fence_ptr_property) { + s32 __user *fence_ptr = u64_to_user_ptr(val); + + return set_out_fence_for_connector(state->state, connector, + fence_ptr); + } else if (property == connector->max_bpc_property) { + state->max_requested_bpc = val; + } else if (connector->funcs->atomic_set_property) { + return connector->funcs->atomic_set_property(connector, + state, property, val); + } else { + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]]\n", + connector->base.id, connector->name, + property->base.id, property->name); + return -EINVAL; + } + + return 0; +} + +static int +drm_atomic_connector_get_property(struct drm_connector *connector, + const struct drm_connector_state *state, + struct drm_property *property, uint64_t *val) +{ + struct drm_device *dev = connector->dev; + struct drm_mode_config *config = &dev->mode_config; + + if (property == config->prop_crtc_id) { + *val = (state->crtc) ? state->crtc->base.id : 0; + } else if (property == config->dpms_property) { + if (state->crtc && state->crtc->state->self_refresh_active) + *val = DRM_MODE_DPMS_ON; + else + *val = connector->dpms; + } else if (property == config->tv_select_subconnector_property) { + *val = state->tv.subconnector; + } else if (property == config->tv_left_margin_property) { + *val = state->tv.margins.left; + } else if (property == config->tv_right_margin_property) { + *val = state->tv.margins.right; + } else if (property == config->tv_top_margin_property) { + *val = state->tv.margins.top; + } else if (property == config->tv_bottom_margin_property) { + *val = state->tv.margins.bottom; + } else if (property == config->tv_mode_property) { + *val = state->tv.mode; + } else if (property == config->tv_brightness_property) { + *val = state->tv.brightness; + } else if (property == config->tv_contrast_property) { + *val = state->tv.contrast; + } else if (property == config->tv_flicker_reduction_property) { + *val = state->tv.flicker_reduction; + } else if (property == config->tv_overscan_property) { + *val = state->tv.overscan; + } else if (property == config->tv_saturation_property) { + *val = state->tv.saturation; + } else if (property == config->tv_hue_property) { + *val = state->tv.hue; + } else if (property == config->link_status_property) { + *val = state->link_status; + } else if (property == config->aspect_ratio_property) { + *val = state->picture_aspect_ratio; + } else if (property == config->content_type_property) { + *val = state->content_type; + } else if (property == connector->colorspace_property) { + *val = state->colorspace; + } else if (property == connector->scaling_mode_property) { + *val = state->scaling_mode; + } else if (property == config->hdr_output_metadata_property) { + *val = state->hdr_output_metadata ? + state->hdr_output_metadata->base.id : 0; + } else if (property == config->content_protection_property) { + *val = state->content_protection; + } else if (property == config->hdcp_content_type_property) { + *val = state->hdcp_content_type; + } else if (property == config->writeback_fb_id_property) { + /* Writeback framebuffer is one-shot, write and forget */ + *val = 0; + } else if (property == config->writeback_out_fence_ptr_property) { + *val = 0; + } else if (property == connector->max_bpc_property) { + *val = state->max_requested_bpc; + } else if (connector->funcs->atomic_get_property) { + return connector->funcs->atomic_get_property(connector, + state, property, val); + } else { + return -EINVAL; + } + + return 0; +} + +int drm_atomic_get_property(struct drm_mode_object *obj, + struct drm_property *property, uint64_t *val) +{ + struct drm_device *dev = property->dev; + int ret; + + switch (obj->type) { + case DRM_MODE_OBJECT_CONNECTOR: { + struct drm_connector *connector = obj_to_connector(obj); + WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + ret = drm_atomic_connector_get_property(connector, + connector->state, property, val); + break; + } + case DRM_MODE_OBJECT_CRTC: { + struct drm_crtc *crtc = obj_to_crtc(obj); + WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); + ret = drm_atomic_crtc_get_property(crtc, + crtc->state, property, val); + break; + } + case DRM_MODE_OBJECT_PLANE: { + struct drm_plane *plane = obj_to_plane(obj); + WARN_ON(!drm_modeset_is_locked(&plane->mutex)); + ret = drm_atomic_plane_get_property(plane, + plane->state, property, val); + break; + } + default: + ret = -EINVAL; + break; + } + + return ret; +} + +/* + * The big monster ioctl + */ + +static struct drm_pending_vblank_event *create_vblank_event( + struct drm_crtc *crtc, uint64_t user_data) +{ + struct drm_pending_vblank_event *e = NULL; + + e = kzalloc(sizeof *e, GFP_KERNEL); + if (!e) + return NULL; + + e->event.base.type = DRM_EVENT_FLIP_COMPLETE; + e->event.base.length = sizeof(e->event); + e->event.vbl.crtc_id = crtc->base.id; + e->event.vbl.user_data = user_data; + + return e; +} + +int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state, + struct drm_connector *connector, + int mode) +{ + struct drm_connector *tmp_connector; + struct drm_connector_state *new_conn_state; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + int i, ret, old_mode = connector->dpms; + bool active = false; + + ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex, + state->acquire_ctx); + if (ret) + return ret; + + if (mode != DRM_MODE_DPMS_ON) + mode = DRM_MODE_DPMS_OFF; + connector->dpms = mode; + + crtc = connector->state->crtc; + if (!crtc) + goto out; + ret = drm_atomic_add_affected_connectors(state, crtc); + if (ret) + goto out; + + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) { + ret = PTR_ERR(crtc_state); + goto out; + } + + for_each_new_connector_in_state(state, tmp_connector, new_conn_state, i) { + if (new_conn_state->crtc != crtc) + continue; + if (tmp_connector->dpms == DRM_MODE_DPMS_ON) { + active = true; + break; + } + } + + crtc_state->active = active; + ret = drm_atomic_commit(state); +out: + if (ret != 0) + connector->dpms = old_mode; + return ret; +} + +int drm_atomic_set_property(struct drm_atomic_state *state, + struct drm_file *file_priv, + struct drm_mode_object *obj, + struct drm_property *prop, + uint64_t prop_value) +{ + struct drm_mode_object *ref; + int ret; + + if (!drm_property_change_valid_get(prop, prop_value, &ref)) + return -EINVAL; + + switch (obj->type) { + case DRM_MODE_OBJECT_CONNECTOR: { + struct drm_connector *connector = obj_to_connector(obj); + struct drm_connector_state *connector_state; + + connector_state = drm_atomic_get_connector_state(state, connector); + if (IS_ERR(connector_state)) { + ret = PTR_ERR(connector_state); + break; + } + + ret = drm_atomic_connector_set_property(connector, + connector_state, file_priv, + prop, prop_value); + break; + } + case DRM_MODE_OBJECT_CRTC: { + struct drm_crtc *crtc = obj_to_crtc(obj); + struct drm_crtc_state *crtc_state; + + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) { + ret = PTR_ERR(crtc_state); + break; + } + + ret = drm_atomic_crtc_set_property(crtc, + crtc_state, prop, prop_value); + break; + } + case DRM_MODE_OBJECT_PLANE: { + struct drm_plane *plane = obj_to_plane(obj); + struct drm_plane_state *plane_state; + + plane_state = drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) { + ret = PTR_ERR(plane_state); + break; + } + + ret = drm_atomic_plane_set_property(plane, + plane_state, file_priv, + prop, prop_value); + break; + } + default: + ret = -EINVAL; + break; + } + + drm_property_change_valid_put(prop, ref); + return ret; +} + +/** + * DOC: explicit fencing properties + * + * Explicit fencing allows userspace to control the buffer synchronization + * between devices. A Fence or a group of fences are transfered to/from + * userspace using Sync File fds and there are two DRM properties for that. + * IN_FENCE_FD on each DRM Plane to send fences to the kernel and + * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel. + * + * As a contrast, with implicit fencing the kernel keeps track of any + * ongoing rendering, and automatically ensures that the atomic update waits + * for any pending rendering to complete. For shared buffers represented with + * a &struct dma_buf this is tracked in &struct reservation_object. + * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org), + * whereas explicit fencing is what Android wants. + * + * "IN_FENCE_FD”: + * Use this property to pass a fence that DRM should wait on before + * proceeding with the Atomic Commit request and show the framebuffer for + * the plane on the screen. The fence can be either a normal fence or a + * merged one, the sync_file framework will handle both cases and use a + * fence_array if a merged fence is received. Passing -1 here means no + * fences to wait on. + * + * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag + * it will only check if the Sync File is a valid one. + * + * On the driver side the fence is stored on the @fence parameter of + * &struct drm_plane_state. Drivers which also support implicit fencing + * should set the implicit fence using drm_atomic_set_fence_for_plane(), + * to make sure there's consistent behaviour between drivers in precedence + * of implicit vs. explicit fencing. + * + * "OUT_FENCE_PTR”: + * Use this property to pass a file descriptor pointer to DRM. Once the + * Atomic Commit request call returns OUT_FENCE_PTR will be filled with + * the file descriptor number of a Sync File. This Sync File contains the + * CRTC fence that will be signaled when all framebuffers present on the + * Atomic Commit * request for that given CRTC are scanned out on the + * screen. + * + * The Atomic Commit request fails if a invalid pointer is passed. If the + * Atomic Commit request fails for any other reason the out fence fd + * returned will be -1. On a Atomic Commit with the + * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1. + * + * Note that out-fences don't have a special interface to drivers and are + * internally represented by a &struct drm_pending_vblank_event in struct + * &drm_crtc_state, which is also used by the nonblocking atomic commit + * helpers and for the DRM event handling for existing userspace. + */ + +struct drm_out_fence_state { + s32 __user *out_fence_ptr; + struct sync_file *sync_file; + int fd; +}; + +static int setup_out_fence(struct drm_out_fence_state *fence_state, + struct dma_fence *fence) +{ + fence_state->fd = get_unused_fd_flags(O_CLOEXEC); + if (fence_state->fd < 0) + return fence_state->fd; + + if (put_user(fence_state->fd, fence_state->out_fence_ptr)) + return -EFAULT; + + fence_state->sync_file = sync_file_create(fence); + if (!fence_state->sync_file) + return -ENOMEM; + + return 0; +} + +static int prepare_signaling(struct drm_device *dev, + struct drm_atomic_state *state, + struct drm_mode_atomic *arg, + struct drm_file *file_priv, + struct drm_out_fence_state **fence_state, + unsigned int *num_fences) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_connector *conn; + struct drm_connector_state *conn_state; + int i, c = 0, ret; + + if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) + return 0; + + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { + s32 __user *fence_ptr; + + fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc); + + if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) { + struct drm_pending_vblank_event *e; + + e = create_vblank_event(crtc, arg->user_data); + if (!e) + return -ENOMEM; + + crtc_state->event = e; + } + + if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { + struct drm_pending_vblank_event *e = crtc_state->event; + + if (!file_priv) + continue; + + ret = drm_event_reserve_init(dev, file_priv, &e->base, + &e->event.base); + if (ret) { + kfree(e); + crtc_state->event = NULL; + return ret; + } + } + + if (fence_ptr) { + struct dma_fence *fence; + struct drm_out_fence_state *f; + + f = krealloc(*fence_state, sizeof(**fence_state) * + (*num_fences + 1), GFP_KERNEL); + if (!f) + return -ENOMEM; + + memset(&f[*num_fences], 0, sizeof(*f)); + + f[*num_fences].out_fence_ptr = fence_ptr; + *fence_state = f; + + fence = drm_crtc_create_fence(crtc); + if (!fence) + return -ENOMEM; + + ret = setup_out_fence(&f[(*num_fences)++], fence); + if (ret) { + dma_fence_put(fence); + return ret; + } + + crtc_state->event->base.fence = fence; + } + + c++; + } + + for_each_new_connector_in_state(state, conn, conn_state, i) { + struct drm_writeback_connector *wb_conn; + struct drm_out_fence_state *f; + struct dma_fence *fence; + s32 __user *fence_ptr; + + if (!conn_state->writeback_job) + continue; + + fence_ptr = get_out_fence_for_connector(state, conn); + if (!fence_ptr) + continue; + + f = krealloc(*fence_state, sizeof(**fence_state) * + (*num_fences + 1), GFP_KERNEL); + if (!f) + return -ENOMEM; + + memset(&f[*num_fences], 0, sizeof(*f)); + + f[*num_fences].out_fence_ptr = fence_ptr; + *fence_state = f; + + wb_conn = drm_connector_to_writeback(conn); + fence = drm_writeback_get_out_fence(wb_conn); + if (!fence) + return -ENOMEM; + + ret = setup_out_fence(&f[(*num_fences)++], fence); + if (ret) { + dma_fence_put(fence); + return ret; + } + + conn_state->writeback_job->out_fence = fence; + } + + /* + * Having this flag means user mode pends on event which will never + * reach due to lack of at least one CRTC for signaling + */ + if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) + return -EINVAL; + + return 0; +} + +static void complete_signaling(struct drm_device *dev, + struct drm_atomic_state *state, + struct drm_out_fence_state *fence_state, + unsigned int num_fences, + bool install_fds) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + int i; + + if (install_fds) { + for (i = 0; i < num_fences; i++) + fd_install(fence_state[i].fd, + fence_state[i].sync_file->file); + + kfree(fence_state); + return; + } + + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { + struct drm_pending_vblank_event *event = crtc_state->event; + /* + * Free the allocated event. drm_atomic_helper_setup_commit + * can allocate an event too, so only free it if it's ours + * to prevent a double free in drm_atomic_state_clear. + */ + if (event && (event->base.fence || event->base.file_priv)) { + drm_event_cancel_free(dev, &event->base); + crtc_state->event = NULL; + } + } + + if (!fence_state) + return; + + for (i = 0; i < num_fences; i++) { + if (fence_state[i].sync_file) + fput(fence_state[i].sync_file->file); + if (fence_state[i].fd >= 0) + put_unused_fd(fence_state[i].fd); + + /* If this fails log error to the user */ + if (fence_state[i].out_fence_ptr && + put_user(-1, fence_state[i].out_fence_ptr)) + DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n"); + } + + kfree(fence_state); +} + +int drm_mode_atomic_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_mode_atomic *arg = data; + uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr); + uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr); + uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr); + uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr); + unsigned int copied_objs, copied_props; + struct drm_atomic_state *state; + struct drm_modeset_acquire_ctx ctx; + struct drm_out_fence_state *fence_state; + int ret = 0; + unsigned int i, j, num_fences; + + /* disallow for drivers not supporting atomic: */ + if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) + return -EOPNOTSUPP; + + /* disallow for userspace that has not enabled atomic cap (even + * though this may be a bit overkill, since legacy userspace + * wouldn't know how to call this ioctl) + */ + if (!file_priv->atomic) + return -EINVAL; + + if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS) + return -EINVAL; + + if (arg->reserved) + return -EINVAL; + + if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) + return -EINVAL; + + /* can't test and expect an event at the same time. */ + if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) && + (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) + return -EINVAL; + + state = drm_atomic_state_alloc(dev); + if (!state) + return -ENOMEM; + + drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); + state->acquire_ctx = &ctx; + state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); + +retry: + copied_objs = 0; + copied_props = 0; + fence_state = NULL; + num_fences = 0; + + for (i = 0; i < arg->count_objs; i++) { + uint32_t obj_id, count_props; + struct drm_mode_object *obj; + + if (get_user(obj_id, objs_ptr + copied_objs)) { + ret = -EFAULT; + goto out; + } + + obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY); + if (!obj) { + ret = -ENOENT; + goto out; + } + + if (!obj->properties) { + drm_mode_object_put(obj); + ret = -ENOENT; + goto out; + } + + if (get_user(count_props, count_props_ptr + copied_objs)) { + drm_mode_object_put(obj); + ret = -EFAULT; + goto out; + } + + copied_objs++; + + for (j = 0; j < count_props; j++) { + uint32_t prop_id; + uint64_t prop_value; + struct drm_property *prop; + + if (get_user(prop_id, props_ptr + copied_props)) { + drm_mode_object_put(obj); + ret = -EFAULT; + goto out; + } + + prop = drm_mode_obj_find_prop_id(obj, prop_id); + if (!prop) { + drm_mode_object_put(obj); + ret = -ENOENT; + goto out; + } + + if (copy_from_user(&prop_value, + prop_values_ptr + copied_props, + sizeof(prop_value))) { + drm_mode_object_put(obj); + ret = -EFAULT; + goto out; + } + + ret = drm_atomic_set_property(state, file_priv, + obj, prop, prop_value); + if (ret) { + drm_mode_object_put(obj); + goto out; + } + + copied_props++; + } + + drm_mode_object_put(obj); + } + + ret = prepare_signaling(dev, state, arg, file_priv, &fence_state, + &num_fences); + if (ret) + goto out; + + if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) { + ret = drm_atomic_check_only(state); + } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) { + ret = drm_atomic_nonblocking_commit(state); + } else { + if (unlikely(drm_debug & DRM_UT_STATE)) + drm_atomic_print_state(state); + + ret = drm_atomic_commit(state); + } + +out: + complete_signaling(dev, state, fence_state, num_fences, !ret); + + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + ret = drm_modeset_backoff(&ctx); + if (!ret) + goto retry; + } + + drm_atomic_state_put(state); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + return ret; +} Index: sys/dev/drm/core/drm_auth.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_auth.c @@ -0,0 +1,386 @@ +/* + * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Author Rickard E. (Rik) Faith + * Author Gareth Hughes + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +/* FBSD: Linux header polution, we need fs.h and lockdep.h */ +#include +#include + +#include +#include +#include +#include +#include + +#include "drm_internal.h" +#include "drm_legacy.h" + +/** + * DOC: master and authentication + * + * &struct drm_master is used to track groups of clients with open + * primary/legacy device nodes. For every &struct drm_file which has had at + * least once successfully became the device master (either through the + * SET_MASTER IOCTL, or implicitly through opening the primary device node when + * no one else is the current master that time) there exists one &drm_master. + * This is noted in &drm_file.is_master. All other clients have just a pointer + * to the &drm_master they are associated with. + * + * In addition only one &drm_master can be the current master for a &drm_device. + * It can be switched through the DROP_MASTER and SET_MASTER IOCTL, or + * implicitly through closing/openeing the primary device node. See also + * drm_is_current_master(). + * + * Clients can authenticate against the current master (if it matches their own) + * using the GETMAGIC and AUTHMAGIC IOCTLs. Together with exchanging masters, + * this allows controlled access to the device for an entire group of mutually + * trusted clients. + */ + +int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct drm_auth *auth = data; + int ret = 0; + + mutex_lock(&dev->master_mutex); + if (!file_priv->magic) { + ret = idr_alloc(&file_priv->master->magic_map, file_priv, + 1, 0, GFP_KERNEL); + if (ret >= 0) + file_priv->magic = ret; + } + auth->magic = file_priv->magic; + mutex_unlock(&dev->master_mutex); + + DRM_DEBUG("%u\n", auth->magic); + + return ret < 0 ? ret : 0; +} + +int drm_authmagic(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_auth *auth = data; + struct drm_file *file; + + DRM_DEBUG("%u\n", auth->magic); + + mutex_lock(&dev->master_mutex); + file = idr_find(&file_priv->master->magic_map, auth->magic); + if (file) { + file->authenticated = 1; + idr_replace(&file_priv->master->magic_map, NULL, auth->magic); + } + mutex_unlock(&dev->master_mutex); + + return file ? 0 : -EINVAL; +} + +struct drm_master *drm_master_create(struct drm_device *dev) +{ + struct drm_master *master; + + master = kzalloc(sizeof(*master), GFP_KERNEL); + if (!master) + return NULL; + + kref_init(&master->refcount); + drm_master_legacy_init(master); + idr_init(&master->magic_map); + master->dev = dev; + + /* initialize the tree of output resource lessees */ + INIT_LIST_HEAD(&master->lessees); + INIT_LIST_HEAD(&master->lessee_list); + idr_init(&master->leases); + idr_init(&master->lessee_idr); + + return master; +} + +static int drm_set_master(struct drm_device *dev, struct drm_file *fpriv, + bool new_master) +{ + int ret = 0; + + dev->master = drm_master_get(fpriv->master); + if (dev->driver->master_set) { + ret = dev->driver->master_set(dev, fpriv, new_master); + if (unlikely(ret != 0)) { + drm_master_put(&dev->master); + } + } + + return ret; +} + +static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv) +{ + struct drm_master *old_master; + int ret; + + lockdep_assert_held_once(&dev->master_mutex); + + WARN_ON(fpriv->is_master); + old_master = fpriv->master; + fpriv->master = drm_master_create(dev); + if (!fpriv->master) { + fpriv->master = old_master; + return -ENOMEM; + } + + if (dev->driver->master_create) { + ret = dev->driver->master_create(dev, fpriv->master); + if (ret) + goto out_err; + } + fpriv->is_master = 1; + fpriv->authenticated = 1; + + ret = drm_set_master(dev, fpriv, true); + if (ret) + goto out_err; + + if (old_master) + drm_master_put(&old_master); + + return 0; + +out_err: + /* drop references and restore old master on failure */ + drm_master_put(&fpriv->master); + fpriv->master = old_master; + fpriv->is_master = 0; + + return ret; +} + +int drm_setmaster_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + int ret = 0; + + mutex_lock(&dev->master_mutex); + if (drm_is_current_master(file_priv)) + goto out_unlock; + + if (dev->master) { + ret = -EINVAL; + goto out_unlock; + } + + if (!file_priv->master) { + ret = -EINVAL; + goto out_unlock; + } + + if (!file_priv->is_master) { + ret = drm_new_set_master(dev, file_priv); + goto out_unlock; + } + + if (file_priv->master->lessor != NULL) { + DRM_DEBUG_LEASE("Attempt to set lessee %d as master\n", file_priv->master->lessee_id); + ret = -EINVAL; + goto out_unlock; + } + + ret = drm_set_master(dev, file_priv, false); +out_unlock: + mutex_unlock(&dev->master_mutex); + return ret; +} + +static void drm_drop_master(struct drm_device *dev, + struct drm_file *fpriv) +{ + if (dev->driver->master_drop) + dev->driver->master_drop(dev, fpriv); + drm_master_put(&dev->master); +} + +int drm_dropmaster_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + int ret = -EINVAL; + + mutex_lock(&dev->master_mutex); + if (!drm_is_current_master(file_priv)) + goto out_unlock; + + if (!dev->master) + goto out_unlock; + + if (file_priv->master->lessor != NULL) { + DRM_DEBUG_LEASE("Attempt to drop lessee %d as master\n", file_priv->master->lessee_id); + ret = -EINVAL; + goto out_unlock; + } + + ret = 0; + drm_drop_master(dev, file_priv); +out_unlock: + mutex_unlock(&dev->master_mutex); + return ret; +} + +int drm_master_open(struct drm_file *file_priv) +{ + struct drm_device *dev = file_priv->minor->dev; + int ret = 0; + + /* if there is no current master make this fd it, but do not create + * any master object for render clients */ + mutex_lock(&dev->master_mutex); + if (!dev->master) + ret = drm_new_set_master(dev, file_priv); + else + file_priv->master = drm_master_get(dev->master); + mutex_unlock(&dev->master_mutex); + + return ret; +} + +void drm_master_release(struct drm_file *file_priv) +{ + struct drm_device *dev = file_priv->minor->dev; + struct drm_master *master = file_priv->master; + + mutex_lock(&dev->master_mutex); + if (file_priv->magic) + idr_remove(&file_priv->master->magic_map, file_priv->magic); + + if (!drm_is_current_master(file_priv)) + goto out; + + drm_legacy_lock_master_cleanup(dev, master); + + if (dev->master == file_priv->master) + drm_drop_master(dev, file_priv); +out: + if (drm_core_check_feature(dev, DRIVER_MODESET) && file_priv->is_master) { + /* Revoke any leases held by this or lessees, but only if + * this is the "real" master + */ + drm_lease_revoke(master); + } + + /* drop the master reference held by the file priv */ + if (file_priv->master) + drm_master_put(&file_priv->master); + mutex_unlock(&dev->master_mutex); +} + +/** + * drm_is_current_master - checks whether @priv is the current master + * @fpriv: DRM file private + * + * Checks whether @fpriv is current master on its device. This decides whether a + * client is allowed to run DRM_MASTER IOCTLs. + * + * Most of the modern IOCTL which require DRM_MASTER are for kernel modesetting + * - the current master is assumed to own the non-shareable display hardware. + */ +bool drm_is_current_master(struct drm_file *fpriv) +{ +#ifdef __linux__ + return fpriv->is_master && drm_lease_owner(fpriv->master) == fpriv->minor->dev->master; +#elif defined(__FreeBSD__) + return fpriv->is_master; +#endif +} +EXPORT_SYMBOL(drm_is_current_master); + +/** + * drm_master_get - reference a master pointer + * @master: &struct drm_master + * + * Increments the reference count of @master and returns a pointer to @master. + */ +struct drm_master *drm_master_get(struct drm_master *master) +{ + kref_get(&master->refcount); + return master; +} +EXPORT_SYMBOL(drm_master_get); + +static void drm_master_destroy(struct kref *kref) +{ + struct drm_master *master = container_of(kref, struct drm_master, refcount); + struct drm_device *dev = master->dev; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + drm_lease_destroy(master); + + if (dev->driver->master_destroy) + dev->driver->master_destroy(dev, master); + + drm_legacy_master_rmmaps(dev, master); + + idr_destroy(&master->magic_map); + idr_destroy(&master->leases); + idr_destroy(&master->lessee_idr); + + kfree(master->unique); + kfree(master); +} + +/** + * drm_master_put - unreference and clear a master pointer + * @master: pointer to a pointer of &struct drm_master + * + * This decrements the &drm_master behind @master and sets it to NULL. + */ +void drm_master_put(struct drm_master **master) +{ + kref_put(&(*master)->refcount, drm_master_destroy); + *master = NULL; +} +EXPORT_SYMBOL(drm_master_put); + +/* Used by drm_client and drm_fb_helper */ +bool drm_master_internal_acquire(struct drm_device *dev) +{ + mutex_lock(&dev->master_mutex); + if (dev->master) { + mutex_unlock(&dev->master_mutex); + return false; + } + + return true; +} +EXPORT_SYMBOL(drm_master_internal_acquire); + +/* Used by drm_client and drm_fb_helper */ +void drm_master_internal_release(struct drm_device *dev) +{ + mutex_unlock(&dev->master_mutex); +} +EXPORT_SYMBOL(drm_master_internal_release); Index: sys/dev/drm/core/drm_blend.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_blend.c @@ -0,0 +1,598 @@ +/* + * Copyright (C) 2016 Samsung Electronics Co.Ltd + * Authors: + * Marek Szyprowski + * + * DRM core plane blending related functions + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include +#include +#include + +#include +#include +#include +#include + +#include "drm_crtc_internal.h" + +/** + * DOC: overview + * + * The basic plane composition model supported by standard plane properties only + * has a source rectangle (in logical pixels within the &drm_framebuffer), with + * sub-pixel accuracy, which is scaled up to a pixel-aligned destination + * rectangle in the visible area of a &drm_crtc. The visible area of a CRTC is + * defined by the horizontal and vertical visible pixels (stored in @hdisplay + * and @vdisplay) of the requested mode (stored in &drm_crtc_state.mode). These + * two rectangles are both stored in the &drm_plane_state. + * + * For the atomic ioctl the following standard (atomic) properties on the plane object + * encode the basic plane composition model: + * + * SRC_X: + * X coordinate offset for the source rectangle within the + * &drm_framebuffer, in 16.16 fixed point. Must be positive. + * SRC_Y: + * Y coordinate offset for the source rectangle within the + * &drm_framebuffer, in 16.16 fixed point. Must be positive. + * SRC_W: + * Width for the source rectangle within the &drm_framebuffer, in 16.16 + * fixed point. SRC_X plus SRC_W must be within the width of the source + * framebuffer. Must be positive. + * SRC_H: + * Height for the source rectangle within the &drm_framebuffer, in 16.16 + * fixed point. SRC_Y plus SRC_H must be within the height of the source + * framebuffer. Must be positive. + * CRTC_X: + * X coordinate offset for the destination rectangle. Can be negative. + * CRTC_Y: + * Y coordinate offset for the destination rectangle. Can be negative. + * CRTC_W: + * Width for the destination rectangle. CRTC_X plus CRTC_W can extend past + * the currently visible horizontal area of the &drm_crtc. + * CRTC_H: + * Height for the destination rectangle. CRTC_Y plus CRTC_H can extend past + * the currently visible vertical area of the &drm_crtc. + * FB_ID: + * Mode object ID of the &drm_framebuffer this plane should scan out. + * CRTC_ID: + * Mode object ID of the &drm_crtc this plane should be connected to. + * + * Note that the source rectangle must fully lie within the bounds of the + * &drm_framebuffer. The destination rectangle can lie outside of the visible + * area of the current mode of the CRTC. It must be apprpriately clipped by the + * driver, which can be done by calling drm_plane_helper_check_update(). Drivers + * are also allowed to round the subpixel sampling positions appropriately, but + * only to the next full pixel. No pixel outside of the source rectangle may + * ever be sampled, which is important when applying more sophisticated + * filtering than just a bilinear one when scaling. The filtering mode when + * scaling is unspecified. + * + * On top of this basic transformation additional properties can be exposed by + * the driver: + * + * alpha: + * Alpha is setup with drm_plane_create_alpha_property(). It controls the + * plane-wide opacity, from transparent (0) to opaque (0xffff). It can be + * combined with pixel alpha. + * The pixel values in the framebuffers are expected to not be + * pre-multiplied by the global alpha associated to the plane. + * + * rotation: + * Rotation is set up with drm_plane_create_rotation_property(). It adds a + * rotation and reflection step between the source and destination rectangles. + * Without this property the rectangle is only scaled, but not rotated or + * reflected. + * + * Possbile values: + * + * "rotate-": + * Signals that a drm plane is rotated degrees in counter + * clockwise direction. + * + * "reflect-": + * Signals that the contents of a drm plane is reflected along the + * axis, in the same way as mirroring. + * + * reflect-x:: + * + * |o | | o| + * | | -> | | + * | v| |v | + * + * reflect-y:: + * + * |o | | ^| + * | | -> | | + * | v| |o | + * + * zpos: + * Z position is set up with drm_plane_create_zpos_immutable_property() and + * drm_plane_create_zpos_property(). It controls the visibility of overlapping + * planes. Without this property the primary plane is always below the cursor + * plane, and ordering between all other planes is undefined. + * + * pixel blend mode: + * Pixel blend mode is set up with drm_plane_create_blend_mode_property(). + * It adds a blend mode for alpha blending equation selection, describing + * how the pixels from the current plane are composited with the + * background. + * + * Three alpha blending equations are defined: + * + * "None": + * Blend formula that ignores the pixel alpha:: + * + * out.rgb = plane_alpha * fg.rgb + + * (1 - plane_alpha) * bg.rgb + * + * "Pre-multiplied": + * Blend formula that assumes the pixel color values + * have been already pre-multiplied with the alpha + * channel values:: + * + * out.rgb = plane_alpha * fg.rgb + + * (1 - (plane_alpha * fg.alpha)) * bg.rgb + * + * "Coverage": + * Blend formula that assumes the pixel color values have not + * been pre-multiplied and will do so when blending them to the + * background color values:: + * + * out.rgb = plane_alpha * fg.alpha * fg.rgb + + * (1 - (plane_alpha * fg.alpha)) * bg.rgb + * + * Using the following symbols: + * + * "fg.rgb": + * Each of the RGB component values from the plane's pixel + * "fg.alpha": + * Alpha component value from the plane's pixel. If the plane's + * pixel format has no alpha component, then this is assumed to be + * 1.0. In these cases, this property has no effect, as all three + * equations become equivalent. + * "bg.rgb": + * Each of the RGB component values from the background + * "plane_alpha": + * Plane alpha value set by the plane "alpha" property. If the + * plane does not expose the "alpha" property, then this is + * assumed to be 1.0 + * + * Note that all the property extensions described here apply either to the + * plane or the CRTC (e.g. for the background color, which currently is not + * exposed and assumed to be black). + */ + +/** + * drm_plane_create_alpha_property - create a new alpha property + * @plane: drm plane + * + * This function creates a generic, mutable, alpha property and enables support + * for it in the DRM core. It is attached to @plane. + * + * The alpha property will be allowed to be within the bounds of 0 + * (transparent) to 0xffff (opaque). + * + * Returns: + * 0 on success, negative error code on failure. + */ +int drm_plane_create_alpha_property(struct drm_plane *plane) +{ + struct drm_property *prop; + + prop = drm_property_create_range(plane->dev, 0, "alpha", + 0, DRM_BLEND_ALPHA_OPAQUE); + if (!prop) + return -ENOMEM; + + drm_object_attach_property(&plane->base, prop, DRM_BLEND_ALPHA_OPAQUE); + plane->alpha_property = prop; + + if (plane->state) + plane->state->alpha = DRM_BLEND_ALPHA_OPAQUE; + + return 0; +} +EXPORT_SYMBOL(drm_plane_create_alpha_property); + +/** + * drm_plane_create_rotation_property - create a new rotation property + * @plane: drm plane + * @rotation: initial value of the rotation property + * @supported_rotations: bitmask of supported rotations and reflections + * + * This creates a new property with the selected support for transformations. + * + * Since a rotation by 180° degress is the same as reflecting both along the x + * and the y axis the rotation property is somewhat redundant. Drivers can use + * drm_rotation_simplify() to normalize values of this property. + * + * The property exposed to userspace is a bitmask property (see + * drm_property_create_bitmask()) called "rotation" and has the following + * bitmask enumaration values: + * + * DRM_MODE_ROTATE_0: + * "rotate-0" + * DRM_MODE_ROTATE_90: + * "rotate-90" + * DRM_MODE_ROTATE_180: + * "rotate-180" + * DRM_MODE_ROTATE_270: + * "rotate-270" + * DRM_MODE_REFLECT_X: + * "reflect-x" + * DRM_MODE_REFLECT_Y: + * "reflect-y" + * + * Rotation is the specified amount in degrees in counter clockwise direction, + * the X and Y axis are within the source rectangle, i.e. the X/Y axis before + * rotation. After reflection, the rotation is applied to the image sampled from + * the source rectangle, before scaling it to fit the destination rectangle. + */ +int drm_plane_create_rotation_property(struct drm_plane *plane, + unsigned int rotation, + unsigned int supported_rotations) +{ + static const struct drm_prop_enum_list props[] = { + { __builtin_ffs(DRM_MODE_ROTATE_0) - 1, "rotate-0" }, + { __builtin_ffs(DRM_MODE_ROTATE_90) - 1, "rotate-90" }, + { __builtin_ffs(DRM_MODE_ROTATE_180) - 1, "rotate-180" }, + { __builtin_ffs(DRM_MODE_ROTATE_270) - 1, "rotate-270" }, + { __builtin_ffs(DRM_MODE_REFLECT_X) - 1, "reflect-x" }, + { __builtin_ffs(DRM_MODE_REFLECT_Y) - 1, "reflect-y" }, + }; + struct drm_property *prop; + + WARN_ON((supported_rotations & DRM_MODE_ROTATE_MASK) == 0); + WARN_ON(!is_power_of_2(rotation & DRM_MODE_ROTATE_MASK)); + WARN_ON(rotation & ~supported_rotations); + + prop = drm_property_create_bitmask(plane->dev, 0, "rotation", + props, ARRAY_SIZE(props), + supported_rotations); + if (!prop) + return -ENOMEM; + + drm_object_attach_property(&plane->base, prop, rotation); + + if (plane->state) + plane->state->rotation = rotation; + + plane->rotation_property = prop; + + return 0; +} +EXPORT_SYMBOL(drm_plane_create_rotation_property); + +/** + * drm_rotation_simplify() - Try to simplify the rotation + * @rotation: Rotation to be simplified + * @supported_rotations: Supported rotations + * + * Attempt to simplify the rotation to a form that is supported. + * Eg. if the hardware supports everything except DRM_MODE_REFLECT_X + * one could call this function like this: + * + * drm_rotation_simplify(rotation, DRM_MODE_ROTATE_0 | + * DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 | + * DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_Y); + * + * to eliminate the DRM_MODE_ROTATE_X flag. Depending on what kind of + * transforms the hardware supports, this function may not + * be able to produce a supported transform, so the caller should + * check the result afterwards. + */ +unsigned int drm_rotation_simplify(unsigned int rotation, + unsigned int supported_rotations) +{ + if (rotation & ~supported_rotations) { + rotation ^= DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y; + rotation = (rotation & DRM_MODE_REFLECT_MASK) | + BIT((ffs(rotation & DRM_MODE_ROTATE_MASK) + 1) + % 4); + } + + return rotation; +} +EXPORT_SYMBOL(drm_rotation_simplify); + +/** + * drm_plane_create_zpos_property - create mutable zpos property + * @plane: drm plane + * @zpos: initial value of zpos property + * @min: minimal possible value of zpos property + * @max: maximal possible value of zpos property + * + * This function initializes generic mutable zpos property and enables support + * for it in drm core. Drivers can then attach this property to planes to enable + * support for configurable planes arrangement during blending operation. + * Drivers that attach a mutable zpos property to any plane should call the + * drm_atomic_normalize_zpos() helper during their implementation of + * &drm_mode_config_funcs.atomic_check(), which will update the normalized zpos + * values and store them in &drm_plane_state.normalized_zpos. Usually min + * should be set to 0 and max to maximal number of planes for given crtc - 1. + * + * If zpos of some planes cannot be changed (like fixed background or + * cursor/topmost planes), driver should adjust min/max values and assign those + * planes immutable zpos property with lower or higher values (for more + * information, see drm_plane_create_zpos_immutable_property() function). In such + * case driver should also assign proper initial zpos values for all planes in + * its plane_reset() callback, so the planes will be always sorted properly. + * + * See also drm_atomic_normalize_zpos(). + * + * The property exposed to userspace is called "zpos". + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_plane_create_zpos_property(struct drm_plane *plane, + unsigned int zpos, + unsigned int min, unsigned int max) +{ + struct drm_property *prop; + + prop = drm_property_create_range(plane->dev, 0, "zpos", min, max); + if (!prop) + return -ENOMEM; + + drm_object_attach_property(&plane->base, prop, zpos); + + plane->zpos_property = prop; + + if (plane->state) { + plane->state->zpos = zpos; + plane->state->normalized_zpos = zpos; + } + + return 0; +} +EXPORT_SYMBOL(drm_plane_create_zpos_property); + +/** + * drm_plane_create_zpos_immutable_property - create immuttable zpos property + * @plane: drm plane + * @zpos: value of zpos property + * + * This function initializes generic immutable zpos property and enables + * support for it in drm core. Using this property driver lets userspace + * to get the arrangement of the planes for blending operation and notifies + * it that the hardware (or driver) doesn't support changing of the planes' + * order. For mutable zpos see drm_plane_create_zpos_property(). + * + * The property exposed to userspace is called "zpos". + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_plane_create_zpos_immutable_property(struct drm_plane *plane, + unsigned int zpos) +{ + struct drm_property *prop; + + prop = drm_property_create_range(plane->dev, DRM_MODE_PROP_IMMUTABLE, + "zpos", zpos, zpos); + if (!prop) + return -ENOMEM; + + drm_object_attach_property(&plane->base, prop, zpos); + + plane->zpos_property = prop; + + if (plane->state) { + plane->state->zpos = zpos; + plane->state->normalized_zpos = zpos; + } + + return 0; +} +EXPORT_SYMBOL(drm_plane_create_zpos_immutable_property); + +static int drm_atomic_state_zpos_cmp(const void *a, const void *b) +{ + const struct drm_plane_state *sa = *(struct drm_plane_state **)a; + const struct drm_plane_state *sb = *(struct drm_plane_state **)b; + + if (sa->zpos != sb->zpos) + return sa->zpos - sb->zpos; + else + return sa->plane->base.id - sb->plane->base.id; +} + +static int drm_atomic_helper_crtc_normalize_zpos(struct drm_crtc *crtc, + struct drm_crtc_state *crtc_state) +{ + struct drm_atomic_state *state = crtc_state->state; + struct drm_device *dev = crtc->dev; + int total_planes = dev->mode_config.num_total_plane; + struct drm_plane_state **states; + struct drm_plane *plane; + int i, n = 0; + int ret = 0; + + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n", + crtc->base.id, crtc->name); + + states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL); + if (!states) + return -ENOMEM; + + /* + * Normalization process might create new states for planes which + * normalized_zpos has to be recalculated. + */ + drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) { + struct drm_plane_state *plane_state = + drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) { + ret = PTR_ERR(plane_state); + goto done; + } + states[n++] = plane_state; + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] processing zpos value %d\n", + plane->base.id, plane->name, + plane_state->zpos); + } + + sort(states, n, sizeof(*states), drm_atomic_state_zpos_cmp, NULL); + + for (i = 0; i < n; i++) { + plane = states[i]->plane; + + states[i]->normalized_zpos = i; + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] normalized zpos value %d\n", + plane->base.id, plane->name, i); + } + crtc_state->zpos_changed = true; + +done: + kfree(states); + return ret; +} + +/** + * drm_atomic_normalize_zpos - calculate normalized zpos values for all crtcs + * @dev: DRM device + * @state: atomic state of DRM device + * + * This function calculates normalized zpos value for all modified planes in + * the provided atomic state of DRM device. + * + * For every CRTC this function checks new states of all planes assigned to + * it and calculates normalized zpos value for these planes. Planes are compared + * first by their zpos values, then by plane id (if zpos is equal). The plane + * with lowest zpos value is at the bottom. The &drm_plane_state.normalized_zpos + * is then filled with unique values from 0 to number of active planes in crtc + * minus one. + * + * RETURNS + * Zero for success or -errno + */ +int drm_atomic_normalize_zpos(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct drm_plane *plane; + struct drm_plane_state *old_plane_state, *new_plane_state; + int i, ret = 0; + + for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { + crtc = new_plane_state->crtc; + if (!crtc) + continue; + if (old_plane_state->zpos != new_plane_state->zpos) { + new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + new_crtc_state->zpos_changed = true; + } + } + + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + if (old_crtc_state->plane_mask != new_crtc_state->plane_mask || + new_crtc_state->zpos_changed) { + ret = drm_atomic_helper_crtc_normalize_zpos(crtc, + new_crtc_state); + if (ret) + return ret; + } + } + return 0; +} +EXPORT_SYMBOL(drm_atomic_normalize_zpos); + +/** + * drm_plane_create_blend_mode_property - create a new blend mode property + * @plane: drm plane + * @supported_modes: bitmask of supported modes, must include + * BIT(DRM_MODE_BLEND_PREMULTI). Current DRM assumption is + * that alpha is premultiplied, and old userspace can break if + * the property defaults to anything else. + * + * This creates a new property describing the blend mode. + * + * The property exposed to userspace is an enumeration property (see + * drm_property_create_enum()) called "pixel blend mode" and has the + * following enumeration values: + * + * "None": + * Blend formula that ignores the pixel alpha. + * + * "Pre-multiplied": + * Blend formula that assumes the pixel color values have been already + * pre-multiplied with the alpha channel values. + * + * "Coverage": + * Blend formula that assumes the pixel color values have not been + * pre-multiplied and will do so when blending them to the background color + * values. + * + * RETURNS: + * Zero for success or -errno + */ +int drm_plane_create_blend_mode_property(struct drm_plane *plane, + unsigned int supported_modes) +{ + struct drm_device *dev = plane->dev; + struct drm_property *prop; + static const struct drm_prop_enum_list props[] = { + { DRM_MODE_BLEND_PIXEL_NONE, "None" }, + { DRM_MODE_BLEND_PREMULTI, "Pre-multiplied" }, + { DRM_MODE_BLEND_COVERAGE, "Coverage" }, + }; + unsigned int valid_mode_mask = BIT(DRM_MODE_BLEND_PIXEL_NONE) | + BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE); + int i; + + if (WARN_ON((supported_modes & ~valid_mode_mask) || + ((supported_modes & BIT(DRM_MODE_BLEND_PREMULTI)) == 0))) + return -EINVAL; + + prop = drm_property_create(dev, DRM_MODE_PROP_ENUM, + "pixel blend mode", + hweight32(supported_modes)); + if (!prop) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(props); i++) { + int ret; + + if (!(BIT(props[i].type) & supported_modes)) + continue; + + ret = drm_property_add_enum(prop, props[i].type, + props[i].name); + + if (ret) { + drm_property_destroy(dev, prop); + + return ret; + } + } + + drm_object_attach_property(&plane->base, prop, DRM_MODE_BLEND_PREMULTI); + plane->blend_mode_property = prop; + + return 0; +} +EXPORT_SYMBOL(drm_plane_create_blend_mode_property); Index: sys/dev/drm/core/drm_bridge.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_bridge.c @@ -0,0 +1,496 @@ +/* + * Copyright (c) 2014 Samsung Electronics Co., Ltd + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include + +#include +#include + +#include "drm_crtc_internal.h" + +/** + * DOC: overview + * + * &struct drm_bridge represents a device that hangs on to an encoder. These are + * handy when a regular &drm_encoder entity isn't enough to represent the entire + * encoder chain. + * + * A bridge is always attached to a single &drm_encoder at a time, but can be + * either connected to it directly, or through an intermediate bridge:: + * + * encoder ---> bridge B ---> bridge A + * + * Here, the output of the encoder feeds to bridge B, and that furthers feeds to + * bridge A. + * + * The driver using the bridge is responsible to make the associations between + * the encoder and bridges. Once these links are made, the bridges will + * participate along with encoder functions to perform mode_set/enable/disable + * through the ops provided in &drm_bridge_funcs. + * + * drm_bridge, like drm_panel, aren't drm_mode_object entities like planes, + * CRTCs, encoders or connectors and hence are not visible to userspace. They + * just provide additional hooks to get the desired output at the end of the + * encoder chain. + * + * Bridges can also be chained up using the &drm_bridge.next pointer. + * + * Both legacy CRTC helpers and the new atomic modeset helpers support bridges. + */ + +static DEFINE_MUTEX(bridge_lock); +static LIST_HEAD(bridge_list); + +/** + * drm_bridge_add - add the given bridge to the global bridge list + * + * @bridge: bridge control structure + */ +void drm_bridge_add(struct drm_bridge *bridge) +{ + mutex_lock(&bridge_lock); + list_add_tail(&bridge->list, &bridge_list); + mutex_unlock(&bridge_lock); +} +EXPORT_SYMBOL(drm_bridge_add); + +/** + * drm_bridge_remove - remove the given bridge from the global bridge list + * + * @bridge: bridge control structure + */ +void drm_bridge_remove(struct drm_bridge *bridge) +{ + mutex_lock(&bridge_lock); + list_del_init(&bridge->list); + mutex_unlock(&bridge_lock); +} +EXPORT_SYMBOL(drm_bridge_remove); + +/** + * drm_bridge_attach - attach the bridge to an encoder's chain + * + * @encoder: DRM encoder + * @bridge: bridge to attach + * @previous: previous bridge in the chain (optional) + * + * Called by a kms driver to link the bridge to an encoder's chain. The previous + * argument specifies the previous bridge in the chain. If NULL, the bridge is + * linked directly at the encoder's output. Otherwise it is linked at the + * previous bridge's output. + * + * If non-NULL the previous bridge must be already attached by a call to this + * function. + * + * Note that bridges attached to encoders are auto-detached during encoder + * cleanup in drm_encoder_cleanup(), so drm_bridge_attach() should generally + * *not* be balanced with a drm_bridge_detach() in driver code. + * + * RETURNS: + * Zero on success, error code on failure + */ +int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge, + struct drm_bridge *previous) +{ + int ret; + + if (!encoder || !bridge) + return -EINVAL; + + if (previous && (!previous->dev || previous->encoder != encoder)) + return -EINVAL; + + if (bridge->dev) + return -EBUSY; + + bridge->dev = encoder->dev; + bridge->encoder = encoder; + + if (bridge->funcs->attach) { + ret = bridge->funcs->attach(bridge); + if (ret < 0) { + bridge->dev = NULL; + bridge->encoder = NULL; + return ret; + } + } + + if (previous) + previous->next = bridge; + else + encoder->bridge = bridge; + + return 0; +} +EXPORT_SYMBOL(drm_bridge_attach); + +void drm_bridge_detach(struct drm_bridge *bridge) +{ + if (WARN_ON(!bridge)) + return; + + if (WARN_ON(!bridge->dev)) + return; + + if (bridge->funcs->detach) + bridge->funcs->detach(bridge); + + bridge->dev = NULL; +} + +/** + * DOC: bridge callbacks + * + * The &drm_bridge_funcs ops are populated by the bridge driver. The DRM + * internals (atomic and CRTC helpers) use the helpers defined in drm_bridge.c + * These helpers call a specific &drm_bridge_funcs op for all the bridges + * during encoder configuration. + * + * For detailed specification of the bridge callbacks see &drm_bridge_funcs. + */ + +/** + * drm_bridge_mode_fixup - fixup proposed mode for all bridges in the + * encoder chain + * @bridge: bridge control structure + * @mode: desired mode to be set for the bridge + * @adjusted_mode: updated mode that works for this bridge + * + * Calls &drm_bridge_funcs.mode_fixup for all the bridges in the + * encoder chain, starting from the first bridge to the last. + * + * Note: the bridge passed should be the one closest to the encoder + * + * RETURNS: + * true on success, false on failure + */ +bool drm_bridge_mode_fixup(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + bool ret = true; + + if (!bridge) + return true; + + if (bridge->funcs->mode_fixup) + ret = bridge->funcs->mode_fixup(bridge, mode, adjusted_mode); + + ret = ret && drm_bridge_mode_fixup(bridge->next, mode, adjusted_mode); + + return ret; +} +EXPORT_SYMBOL(drm_bridge_mode_fixup); + +/** + * drm_bridge_mode_valid - validate the mode against all bridges in the + * encoder chain. + * @bridge: bridge control structure + * @mode: desired mode to be validated + * + * Calls &drm_bridge_funcs.mode_valid for all the bridges in the encoder + * chain, starting from the first bridge to the last. If at least one bridge + * does not accept the mode the function returns the error code. + * + * Note: the bridge passed should be the one closest to the encoder. + * + * RETURNS: + * MODE_OK on success, drm_mode_status Enum error code on failure + */ +enum drm_mode_status drm_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_mode *mode) +{ + enum drm_mode_status ret = MODE_OK; + + if (!bridge) + return ret; + + if (bridge->funcs->mode_valid) + ret = bridge->funcs->mode_valid(bridge, mode); + + if (ret != MODE_OK) + return ret; + + return drm_bridge_mode_valid(bridge->next, mode); +} +EXPORT_SYMBOL(drm_bridge_mode_valid); + +/** + * drm_bridge_disable - disables all bridges in the encoder chain + * @bridge: bridge control structure + * + * Calls &drm_bridge_funcs.disable op for all the bridges in the encoder + * chain, starting from the last bridge to the first. These are called before + * calling the encoder's prepare op. + * + * Note: the bridge passed should be the one closest to the encoder + */ +void drm_bridge_disable(struct drm_bridge *bridge) +{ + if (!bridge) + return; + + drm_bridge_disable(bridge->next); + + if (bridge->funcs->disable) + bridge->funcs->disable(bridge); +} +EXPORT_SYMBOL(drm_bridge_disable); + +/** + * drm_bridge_post_disable - cleans up after disabling all bridges in the encoder chain + * @bridge: bridge control structure + * + * Calls &drm_bridge_funcs.post_disable op for all the bridges in the + * encoder chain, starting from the first bridge to the last. These are called + * after completing the encoder's prepare op. + * + * Note: the bridge passed should be the one closest to the encoder + */ +void drm_bridge_post_disable(struct drm_bridge *bridge) +{ + if (!bridge) + return; + + if (bridge->funcs->post_disable) + bridge->funcs->post_disable(bridge); + + drm_bridge_post_disable(bridge->next); +} +EXPORT_SYMBOL(drm_bridge_post_disable); + +/** + * drm_bridge_mode_set - set proposed mode for all bridges in the + * encoder chain + * @bridge: bridge control structure + * @mode: desired mode to be set for the bridge + * @adjusted_mode: updated mode that works for this bridge + * + * Calls &drm_bridge_funcs.mode_set op for all the bridges in the + * encoder chain, starting from the first bridge to the last. + * + * Note: the bridge passed should be the one closest to the encoder + */ +void drm_bridge_mode_set(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode) +{ + if (!bridge) + return; + + if (bridge->funcs->mode_set) + bridge->funcs->mode_set(bridge, mode, adjusted_mode); + + drm_bridge_mode_set(bridge->next, mode, adjusted_mode); +} +EXPORT_SYMBOL(drm_bridge_mode_set); + +/** + * drm_bridge_pre_enable - prepares for enabling all + * bridges in the encoder chain + * @bridge: bridge control structure + * + * Calls &drm_bridge_funcs.pre_enable op for all the bridges in the encoder + * chain, starting from the last bridge to the first. These are called + * before calling the encoder's commit op. + * + * Note: the bridge passed should be the one closest to the encoder + */ +void drm_bridge_pre_enable(struct drm_bridge *bridge) +{ + if (!bridge) + return; + + drm_bridge_pre_enable(bridge->next); + + if (bridge->funcs->pre_enable) + bridge->funcs->pre_enable(bridge); +} +EXPORT_SYMBOL(drm_bridge_pre_enable); + +/** + * drm_bridge_enable - enables all bridges in the encoder chain + * @bridge: bridge control structure + * + * Calls &drm_bridge_funcs.enable op for all the bridges in the encoder + * chain, starting from the first bridge to the last. These are called + * after completing the encoder's commit op. + * + * Note that the bridge passed should be the one closest to the encoder + */ +void drm_bridge_enable(struct drm_bridge *bridge) +{ + if (!bridge) + return; + + if (bridge->funcs->enable) + bridge->funcs->enable(bridge); + + drm_bridge_enable(bridge->next); +} +EXPORT_SYMBOL(drm_bridge_enable); + +/** + * drm_atomic_bridge_disable - disables all bridges in the encoder chain + * @bridge: bridge control structure + * @state: atomic state being committed + * + * Calls &drm_bridge_funcs.atomic_disable (falls back on + * &drm_bridge_funcs.disable) op for all the bridges in the encoder chain, + * starting from the last bridge to the first. These are called before calling + * &drm_encoder_helper_funcs.atomic_disable + * + * Note: the bridge passed should be the one closest to the encoder + */ +void drm_atomic_bridge_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + if (!bridge) + return; + + drm_atomic_bridge_disable(bridge->next, state); + + if (bridge->funcs->atomic_disable) + bridge->funcs->atomic_disable(bridge, state); + else if (bridge->funcs->disable) + bridge->funcs->disable(bridge); +} +EXPORT_SYMBOL(drm_atomic_bridge_disable); + +/** + * drm_atomic_bridge_post_disable - cleans up after disabling all bridges in the + * encoder chain + * @bridge: bridge control structure + * @state: atomic state being committed + * + * Calls &drm_bridge_funcs.atomic_post_disable (falls back on + * &drm_bridge_funcs.post_disable) op for all the bridges in the encoder chain, + * starting from the first bridge to the last. These are called after completing + * &drm_encoder_helper_funcs.atomic_disable + * + * Note: the bridge passed should be the one closest to the encoder + */ +void drm_atomic_bridge_post_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + if (!bridge) + return; + + if (bridge->funcs->atomic_post_disable) + bridge->funcs->atomic_post_disable(bridge, state); + else if (bridge->funcs->post_disable) + bridge->funcs->post_disable(bridge); + + drm_atomic_bridge_post_disable(bridge->next, state); +} +EXPORT_SYMBOL(drm_atomic_bridge_post_disable); + +/** + * drm_atomic_bridge_pre_enable - prepares for enabling all bridges in the + * encoder chain + * @bridge: bridge control structure + * @state: atomic state being committed + * + * Calls &drm_bridge_funcs.atomic_pre_enable (falls back on + * &drm_bridge_funcs.pre_enable) op for all the bridges in the encoder chain, + * starting from the last bridge to the first. These are called before calling + * &drm_encoder_helper_funcs.atomic_enable + * + * Note: the bridge passed should be the one closest to the encoder + */ +void drm_atomic_bridge_pre_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + if (!bridge) + return; + + drm_atomic_bridge_pre_enable(bridge->next, state); + + if (bridge->funcs->atomic_pre_enable) + bridge->funcs->atomic_pre_enable(bridge, state); + else if (bridge->funcs->pre_enable) + bridge->funcs->pre_enable(bridge); +} +EXPORT_SYMBOL(drm_atomic_bridge_pre_enable); + +/** + * drm_atomic_bridge_enable - enables all bridges in the encoder chain + * @bridge: bridge control structure + * @state: atomic state being committed + * + * Calls &drm_bridge_funcs.atomic_enable (falls back on + * &drm_bridge_funcs.enable) op for all the bridges in the encoder chain, + * starting from the first bridge to the last. These are called after completing + * &drm_encoder_helper_funcs.atomic_enable + * + * Note: the bridge passed should be the one closest to the encoder + */ +void drm_atomic_bridge_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + if (!bridge) + return; + + if (bridge->funcs->atomic_enable) + bridge->funcs->atomic_enable(bridge, state); + else if (bridge->funcs->enable) + bridge->funcs->enable(bridge); + + drm_atomic_bridge_enable(bridge->next, state); +} +EXPORT_SYMBOL(drm_atomic_bridge_enable); + +#ifdef CONFIG_OF +/** + * of_drm_find_bridge - find the bridge corresponding to the device node in + * the global bridge list + * + * @np: device node + * + * RETURNS: + * drm_bridge control struct on success, NULL on failure + */ +struct drm_bridge *of_drm_find_bridge(struct device_node *np) +{ + struct drm_bridge *bridge; + + mutex_lock(&bridge_lock); + + list_for_each_entry(bridge, &bridge_list, list) { + if (bridge->of_node == np) { + mutex_unlock(&bridge_lock); + return bridge; + } + } + + mutex_unlock(&bridge_lock); + return NULL; +} +EXPORT_SYMBOL(of_drm_find_bridge); +#endif + +MODULE_AUTHOR("Ajay Kumar "); +MODULE_DESCRIPTION("DRM bridge infrastructure"); +MODULE_LICENSE("GPL and additional rights"); Index: sys/dev/drm/core/drm_cache.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_cache.c @@ -0,0 +1,176 @@ +/************************************************************************** + * + * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellström + */ + +#include +#include + +#include + +#if defined(CONFIG_X86) +#include + +/* + * clflushopt is an unordered instruction which needs fencing with mfence or + * sfence to avoid ordering issues. For drm_clflush_page this fencing happens + * in the caller. + */ +static void +drm_clflush_page(struct page *page) +{ + uint8_t *page_virtual; + unsigned int i; + const int size = boot_cpu_data.x86_clflush_size; + + if (unlikely(page == NULL)) + return; + + page_virtual = kmap_atomic(page); + for (i = 0; i < PAGE_SIZE; i += size) + clflushopt(page_virtual + i); + kunmap_atomic(page_virtual); +} + +static void drm_cache_flush_clflush(struct page *pages[], + unsigned long num_pages) +{ + unsigned long i; + + mb(); + for (i = 0; i < num_pages; i++) + drm_clflush_page(*pages++); + mb(); +} +#endif + +/** + * drm_clflush_pages - Flush dcache lines of a set of pages. + * @pages: List of pages to be flushed. + * @num_pages: Number of pages in the array. + * + * Flush every data cache line entry that points to an address belonging + * to a page in the array. + */ +void +drm_clflush_pages(struct page *pages[], unsigned long num_pages) +{ + +#if defined(CONFIG_X86) + if (static_cpu_has(X86_FEATURE_CLFLUSH)) { + drm_cache_flush_clflush(pages, num_pages); + return; + } + + if (wbinvd_on_all_cpus()) + pr_err("Timed out waiting for cache flush\n"); + +#elif defined(__powerpc__) + unsigned long i; + for (i = 0; i < num_pages; i++) { + struct page *page = pages[i]; + void *page_virtual; + + if (unlikely(page == NULL)) + continue; + + page_virtual = kmap_atomic(page); + flush_dcache_range((unsigned long)page_virtual, + (unsigned long)page_virtual + PAGE_SIZE); + kunmap_atomic(page_virtual); + } +#else + pr_err("Architecture has no drm_cache.c support\n"); + WARN_ON_ONCE(1); +#endif +} +EXPORT_SYMBOL(drm_clflush_pages); + +/** + * drm_clflush_sg - Flush dcache lines pointing to a scather-gather. + * @st: struct sg_table. + * + * Flush every data cache line entry that points to an address in the + * sg. + */ +void +drm_clflush_sg(struct sg_table *st) +{ +#if defined(CONFIG_X86) + if (static_cpu_has(X86_FEATURE_CLFLUSH)) { + struct sg_page_iter sg_iter; + + mb(); + for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) + drm_clflush_page(sg_page_iter_page(&sg_iter)); + mb(); + + return; + } + + if (wbinvd_on_all_cpus()) + pr_err("Timed out waiting for cache flush\n"); +#else + pr_err("Architecture has no drm_cache.c support\n"); + WARN_ON_ONCE(1); +#endif +} +EXPORT_SYMBOL(drm_clflush_sg); + +/** + * drm_clflush_virt_range - Flush dcache lines of a region + * @addr: Initial kernel memory address. + * @length: Region size. + * + * Flush every data cache line entry that points to an address in the + * region requested. + */ +void +drm_clflush_virt_range(void *addr, unsigned long length) +{ +#if defined(CONFIG_X86) + if (static_cpu_has(X86_FEATURE_CLFLUSH)) { + const int size = boot_cpu_data.x86_clflush_size; + void *end = addr + length; + addr = (void *)(((unsigned long)addr) & -size); + mb(); + for (; addr < end; addr += size) + clflushopt(addr); + clflushopt(end - 1); /* force serialisation */ + mb(); + return; + } + + if (wbinvd_on_all_cpus()) + pr_err("Timed out waiting for cache flush\n"); +#else + pr_err("Architecture has no drm_cache.c support\n"); + WARN_ON_ONCE(1); +#endif +} +EXPORT_SYMBOL(drm_clflush_virt_range); Index: sys/dev/drm/core/drm_color_mgmt.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_color_mgmt.c @@ -0,0 +1,511 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include + +#include +#include +#include +#include +#include + +#include "drm_crtc_internal.h" + +/** + * DOC: overview + * + * Color management or color space adjustments is supported through a set of 5 + * properties on the &drm_crtc object. They are set up by calling + * drm_crtc_enable_color_mgmt(). + * + * "DEGAMMA_LUT”: + * Blob property to set the degamma lookup table (LUT) mapping pixel data + * from the framebuffer before it is given to the transformation matrix. + * The data is interpreted as an array of &struct drm_color_lut elements. + * Hardware might choose not to use the full precision of the LUT elements + * nor use all the elements of the LUT (for example the hardware might + * choose to interpolate between LUT[0] and LUT[4]). + * + * Setting this to NULL (blob property value set to 0) means a + * linear/pass-thru gamma table should be used. This is generally the + * driver boot-up state too. Drivers can access this blob through + * &drm_crtc_state.degamma_lut. + * + * “DEGAMMA_LUT_SIZE”: + * Unsinged range property to give the size of the lookup table to be set + * on the DEGAMMA_LUT property (the size depends on the underlying + * hardware). If drivers support multiple LUT sizes then they should + * publish the largest size, and sub-sample smaller sized LUTs (e.g. for + * split-gamma modes) appropriately. + * + * “CTM”: + * Blob property to set the current transformation matrix (CTM) apply to + * pixel data after the lookup through the degamma LUT and before the + * lookup through the gamma LUT. The data is interpreted as a struct + * &drm_color_ctm. + * + * Setting this to NULL (blob property value set to 0) means a + * unit/pass-thru matrix should be used. This is generally the driver + * boot-up state too. Drivers can access the blob for the color conversion + * matrix through &drm_crtc_state.ctm. + * + * “GAMMA_LUT”: + * Blob property to set the gamma lookup table (LUT) mapping pixel data + * after the transformation matrix to data sent to the connector. The + * data is interpreted as an array of &struct drm_color_lut elements. + * Hardware might choose not to use the full precision of the LUT elements + * nor use all the elements of the LUT (for example the hardware might + * choose to interpolate between LUT[0] and LUT[4]). + * + * Setting this to NULL (blob property value set to 0) means a + * linear/pass-thru gamma table should be used. This is generally the + * driver boot-up state too. Drivers can access this blob through + * &drm_crtc_state.gamma_lut. + * + * “GAMMA_LUT_SIZE”: + * Unsigned range property to give the size of the lookup table to be set + * on the GAMMA_LUT property (the size depends on the underlying hardware). + * If drivers support multiple LUT sizes then they should publish the + * largest size, and sub-sample smaller sized LUTs (e.g. for split-gamma + * modes) appropriately. + * + * There is also support for a legacy gamma table, which is set up by calling + * drm_mode_crtc_set_gamma_size(). Drivers which support both should use + * drm_atomic_helper_legacy_gamma_set() to alias the legacy gamma ramp with the + * "GAMMA_LUT" property above. + * + * Support for different non RGB color encodings is controlled through + * &drm_plane specific COLOR_ENCODING and COLOR_RANGE properties. They + * are set up by calling drm_plane_create_color_properties(). + * + * "COLOR_ENCODING" + * Optional plane enum property to support different non RGB + * color encodings. The driver can provide a subset of standard + * enum values supported by the DRM plane. + * + * "COLOR_RANGE" + * Optional plane enum property to support different non RGB + * color parameter ranges. The driver can provide a subset of + * standard enum values supported by the DRM plane. + */ + +/** + * drm_color_lut_extract - clamp and round LUT entries + * @user_input: input value + * @bit_precision: number of bits the hw LUT supports + * + * Extract a degamma/gamma LUT value provided by user (in the form of + * &drm_color_lut entries) and round it to the precision supported by the + * hardware. + */ +uint32_t drm_color_lut_extract(uint32_t user_input, uint32_t bit_precision) +{ + uint32_t val = user_input; + uint32_t max = 0xffff >> (16 - bit_precision); + + /* Round only if we're not using full precision. */ + if (bit_precision < 16) { + val += 1UL << (16 - bit_precision - 1); + val >>= 16 - bit_precision; + } + + return clamp_val(val, 0, max); +} +EXPORT_SYMBOL(drm_color_lut_extract); + +/** + * drm_crtc_enable_color_mgmt - enable color management properties + * @crtc: DRM CRTC + * @degamma_lut_size: the size of the degamma lut (before CSC) + * @has_ctm: whether to attach ctm_property for CSC matrix + * @gamma_lut_size: the size of the gamma lut (after CSC) + * + * This function lets the driver enable the color correction + * properties on a CRTC. This includes 3 degamma, csc and gamma + * properties that userspace can set and 2 size properties to inform + * the userspace of the lut sizes. Each of the properties are + * optional. The gamma and degamma properties are only attached if + * their size is not 0 and ctm_property is only attached if has_ctm is + * true. + * + * Drivers should use drm_atomic_helper_legacy_gamma_set() to implement the + * legacy &drm_crtc_funcs.gamma_set callback. + */ +void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc, + uint degamma_lut_size, + bool has_ctm, + uint gamma_lut_size) +{ + struct drm_device *dev = crtc->dev; + struct drm_mode_config *config = &dev->mode_config; + + if (degamma_lut_size) { + drm_object_attach_property(&crtc->base, + config->degamma_lut_property, 0); + drm_object_attach_property(&crtc->base, + config->degamma_lut_size_property, + degamma_lut_size); + } + + if (has_ctm) + drm_object_attach_property(&crtc->base, + config->ctm_property, 0); + + if (gamma_lut_size) { + drm_object_attach_property(&crtc->base, + config->gamma_lut_property, 0); + drm_object_attach_property(&crtc->base, + config->gamma_lut_size_property, + gamma_lut_size); + } +} +EXPORT_SYMBOL(drm_crtc_enable_color_mgmt); + +/** + * drm_mode_crtc_set_gamma_size - set the gamma table size + * @crtc: CRTC to set the gamma table size for + * @gamma_size: size of the gamma table + * + * Drivers which support gamma tables should set this to the supported gamma + * table size when initializing the CRTC. Currently the drm core only supports a + * fixed gamma table size. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, + int gamma_size) +{ + uint16_t *r_base, *g_base, *b_base; + int i; + + crtc->gamma_size = gamma_size; + + crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3, + GFP_KERNEL); + if (!crtc->gamma_store) { + crtc->gamma_size = 0; + return -ENOMEM; + } + + r_base = crtc->gamma_store; + g_base = r_base + gamma_size; + b_base = g_base + gamma_size; + for (i = 0; i < gamma_size; i++) { + r_base[i] = i << 8; + g_base[i] = i << 8; + b_base[i] = i << 8; + } + + + return 0; +} +EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size); + +/** + * drm_mode_gamma_set_ioctl - set the gamma table + * @dev: DRM device + * @data: ioctl data + * @file_priv: DRM file info + * + * Set the gamma table of a CRTC to the one passed in by the user. Userspace can + * inquire the required gamma table size through drm_mode_gamma_get_ioctl. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_gamma_set_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_mode_crtc_lut *crtc_lut = data; + struct drm_crtc *crtc; + void *r_base, *g_base, *b_base; + int size; + struct drm_modeset_acquire_ctx ctx; + int ret = 0; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EOPNOTSUPP; + + crtc = drm_crtc_find(dev, file_priv, crtc_lut->crtc_id); + if (!crtc) + return -ENOENT; + + if (crtc->funcs->gamma_set == NULL) + return -ENOSYS; + + /* memcpy into gamma store */ + if (crtc_lut->gamma_size != crtc->gamma_size) + return -EINVAL; + + DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret); + + size = crtc_lut->gamma_size * (sizeof(uint16_t)); + r_base = crtc->gamma_store; + if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) { + ret = -EFAULT; + goto out; + } + + g_base = r_base + size; + if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) { + ret = -EFAULT; + goto out; + } + + b_base = g_base + size; + if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) { + ret = -EFAULT; + goto out; + } + + ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, + crtc->gamma_size, &ctx); + +out: + DRM_MODESET_LOCK_ALL_END(ctx, ret); + return ret; + +} + +/** + * drm_mode_gamma_get_ioctl - get the gamma table + * @dev: DRM device + * @data: ioctl data + * @file_priv: DRM file info + * + * Copy the current gamma table into the storage provided. This also provides + * the gamma table size the driver expects, which can be used to size the + * allocated storage. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_gamma_get_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_mode_crtc_lut *crtc_lut = data; + struct drm_crtc *crtc; + void *r_base, *g_base, *b_base; + int size; + int ret = 0; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EOPNOTSUPP; + + crtc = drm_crtc_find(dev, file_priv, crtc_lut->crtc_id); + if (!crtc) + return -ENOENT; + + /* memcpy into gamma store */ + if (crtc_lut->gamma_size != crtc->gamma_size) + return -EINVAL; + + drm_modeset_lock(&crtc->mutex, NULL); + size = crtc_lut->gamma_size * (sizeof(uint16_t)); + r_base = crtc->gamma_store; + if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) { + ret = -EFAULT; + goto out; + } + + g_base = r_base + size; + if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) { + ret = -EFAULT; + goto out; + } + + b_base = g_base + size; + if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) { + ret = -EFAULT; + goto out; + } +out: + drm_modeset_unlock(&crtc->mutex); + return ret; +} + +static const char * const color_encoding_name[] = { + [DRM_COLOR_YCBCR_BT601] = "ITU-R BT.601 YCbCr", + [DRM_COLOR_YCBCR_BT709] = "ITU-R BT.709 YCbCr", + [DRM_COLOR_YCBCR_BT2020] = "ITU-R BT.2020 YCbCr", +}; + +static const char * const color_range_name[] = { + [DRM_COLOR_YCBCR_FULL_RANGE] = "YCbCr full range", + [DRM_COLOR_YCBCR_LIMITED_RANGE] = "YCbCr limited range", +}; + +/** + * drm_get_color_encoding_name - return a string for color encoding + * @encoding: color encoding to compute name of + * + * In contrast to the other drm_get_*_name functions this one here returns a + * const pointer and hence is threadsafe. + */ +const char *drm_get_color_encoding_name(enum drm_color_encoding encoding) +{ + if (WARN_ON(encoding >= ARRAY_SIZE(color_encoding_name))) + return "unknown"; + + return color_encoding_name[encoding]; +} + +/** + * drm_get_color_range_name - return a string for color range + * @range: color range to compute name of + * + * In contrast to the other drm_get_*_name functions this one here returns a + * const pointer and hence is threadsafe. + */ +const char *drm_get_color_range_name(enum drm_color_range range) +{ + if (WARN_ON(range >= ARRAY_SIZE(color_range_name))) + return "unknown"; + + return color_range_name[range]; +} + +/** + * drm_plane_create_color_properties - color encoding related plane properties + * @plane: plane object + * @supported_encodings: bitfield indicating supported color encodings + * @supported_ranges: bitfileld indicating supported color ranges + * @default_encoding: default color encoding + * @default_range: default color range + * + * Create and attach plane specific COLOR_ENCODING and COLOR_RANGE + * properties to @plane. The supported encodings and ranges should + * be provided in supported_encodings and supported_ranges bitmasks. + * Each bit set in the bitmask indicates that its number as enum + * value is supported. + */ +int drm_plane_create_color_properties(struct drm_plane *plane, + u32 supported_encodings, + u32 supported_ranges, + enum drm_color_encoding default_encoding, + enum drm_color_range default_range) +{ + struct drm_device *dev = plane->dev; + struct drm_property *prop; + struct drm_prop_enum_list enum_list[max_t(int, DRM_COLOR_ENCODING_MAX, + DRM_COLOR_RANGE_MAX)]; + int i, len; + + if (WARN_ON(supported_encodings == 0 || + (supported_encodings & -BIT(DRM_COLOR_ENCODING_MAX)) != 0 || + (supported_encodings & BIT(default_encoding)) == 0)) + return -EINVAL; + + if (WARN_ON(supported_ranges == 0 || + (supported_ranges & -BIT(DRM_COLOR_RANGE_MAX)) != 0 || + (supported_ranges & BIT(default_range)) == 0)) + return -EINVAL; + + len = 0; + for (i = 0; i < DRM_COLOR_ENCODING_MAX; i++) { + if ((supported_encodings & BIT(i)) == 0) + continue; + + enum_list[len].type = i; + enum_list[len].name = color_encoding_name[i]; + len++; + } + + prop = drm_property_create_enum(dev, 0, "COLOR_ENCODING", + enum_list, len); + if (!prop) + return -ENOMEM; + plane->color_encoding_property = prop; + drm_object_attach_property(&plane->base, prop, default_encoding); + if (plane->state) + plane->state->color_encoding = default_encoding; + + len = 0; + for (i = 0; i < DRM_COLOR_RANGE_MAX; i++) { + if ((supported_ranges & BIT(i)) == 0) + continue; + + enum_list[len].type = i; + enum_list[len].name = color_range_name[i]; + len++; + } + + prop = drm_property_create_enum(dev, 0, "COLOR_RANGE", + enum_list, len); + if (!prop) + return -ENOMEM; + plane->color_range_property = prop; + drm_object_attach_property(&plane->base, prop, default_range); + if (plane->state) + plane->state->color_range = default_range; + + return 0; +} +EXPORT_SYMBOL(drm_plane_create_color_properties); + +/** + * drm_color_lut_check - check validity of lookup table + * @lut: property blob containing LUT to check + * @tests: bitmask of tests to run + * + * Helper to check whether a userspace-provided lookup table is valid and + * satisfies hardware requirements. Drivers pass a bitmask indicating which of + * the tests in &drm_color_lut_tests should be performed. + * + * Returns 0 on success, -EINVAL on failure. + */ +int drm_color_lut_check(const struct drm_property_blob *lut, u32 tests) +{ + const struct drm_color_lut *entry; + int i; + + if (!lut || !tests) + return 0; + + entry = lut->data; + for (i = 0; i < drm_color_lut_size(lut); i++) { + if (tests & DRM_COLOR_LUT_EQUAL_CHANNELS) { + if (entry[i].red != entry[i].blue || + entry[i].red != entry[i].green) { + DRM_DEBUG_KMS("All LUT entries must have equal r/g/b\n"); + return -EINVAL; + } + } + + if (i > 0 && tests & DRM_COLOR_LUT_NON_DECREASING) { + if (entry[i].red < entry[i - 1].red || + entry[i].green < entry[i - 1].green || + entry[i].blue < entry[i - 1].blue) { + DRM_DEBUG_KMS("LUT entries must never decrease.\n"); + return -EINVAL; + } + } + } + + return 0; +} +EXPORT_SYMBOL(drm_color_lut_check); Index: sys/dev/drm/core/drm_connector.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_connector.c @@ -0,0 +1,2343 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +/* Due to header polution in Linux we need those files included */ +#ifdef __FreeBSD__ +#include +#include +#endif +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "drm_crtc_internal.h" +#include "drm_internal.h" + +/** + * DOC: overview + * + * In DRM connectors are the general abstraction for display sinks, and include + * als fixed panels or anything else that can display pixels in some form. As + * opposed to all other KMS objects representing hardware (like CRTC, encoder or + * plane abstractions) connectors can be hotplugged and unplugged at runtime. + * Hence they are reference-counted using drm_connector_get() and + * drm_connector_put(). + * + * KMS driver must create, initialize, register and attach at a &struct + * drm_connector for each such sink. The instance is created as other KMS + * objects and initialized by setting the following fields. The connector is + * initialized with a call to drm_connector_init() with a pointer to the + * &struct drm_connector_funcs and a connector type, and then exposed to + * userspace with a call to drm_connector_register(). + * + * Connectors must be attached to an encoder to be used. For devices that map + * connectors to encoders 1:1, the connector should be attached at + * initialization time with a call to drm_connector_attach_encoder(). The + * driver must also set the &drm_connector.encoder field to point to the + * attached encoder. + * + * For connectors which are not fixed (like built-in panels) the driver needs to + * support hotplug notifications. The simplest way to do that is by using the + * probe helpers, see drm_kms_helper_poll_init() for connectors which don't have + * hardware support for hotplug interrupts. Connectors with hardware hotplug + * support can instead use e.g. drm_helper_hpd_irq_event(). + */ + +struct drm_conn_prop_enum_list { + int type; + const char *name; + struct ida ida; +}; + +/* + * Connector and encoder types. + */ +static struct drm_conn_prop_enum_list drm_connector_enum_list[] = { + { DRM_MODE_CONNECTOR_Unknown, "Unknown" }, + { DRM_MODE_CONNECTOR_VGA, "VGA" }, + { DRM_MODE_CONNECTOR_DVII, "DVI-I" }, + { DRM_MODE_CONNECTOR_DVID, "DVI-D" }, + { DRM_MODE_CONNECTOR_DVIA, "DVI-A" }, + { DRM_MODE_CONNECTOR_Composite, "Composite" }, + { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO" }, + { DRM_MODE_CONNECTOR_LVDS, "LVDS" }, + { DRM_MODE_CONNECTOR_Component, "Component" }, + { DRM_MODE_CONNECTOR_9PinDIN, "DIN" }, + { DRM_MODE_CONNECTOR_DisplayPort, "DP" }, + { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" }, + { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" }, + { DRM_MODE_CONNECTOR_TV, "TV" }, + { DRM_MODE_CONNECTOR_eDP, "eDP" }, + { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" }, + { DRM_MODE_CONNECTOR_DSI, "DSI" }, + { DRM_MODE_CONNECTOR_DPI, "DPI" }, + { DRM_MODE_CONNECTOR_WRITEBACK, "Writeback" }, + { DRM_MODE_CONNECTOR_SPI, "SPI" }, +}; + +void drm_connector_ida_init(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++) + ida_init(&drm_connector_enum_list[i].ida); +} + +void drm_connector_ida_destroy(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++) + ida_destroy(&drm_connector_enum_list[i].ida); +} + +/** + * drm_connector_get_cmdline_mode - reads the user's cmdline mode + * @connector: connector to quwery + * + * The kernel supports per-connector configuration of its consoles through + * use of the video= parameter. This function parses that option and + * extracts the user's specified mode (or enable/disable status) for a + * particular connector. This is typically only used during the early fbdev + * setup. + */ +static void drm_connector_get_cmdline_mode(struct drm_connector *connector) +{ + struct drm_cmdline_mode *mode = &connector->cmdline_mode; + char *option = NULL; + + if (fb_get_options(connector->name, &option)) + return; + + if (!drm_mode_parse_command_line_for_connector(option, + connector, + mode)) + return; + + if (mode->force) { + DRM_INFO("forcing %s connector %s\n", connector->name, + drm_get_connector_force_name(mode->force)); + connector->force = mode->force; + } + + DRM_DEBUG_KMS("cmdline mode for connector %s %s %dx%d@%dHz%s%s%s\n", + connector->name, mode->name, + mode->xres, mode->yres, + mode->refresh_specified ? mode->refresh : 60, + mode->rb ? " reduced blanking" : "", + mode->margins ? " with margins" : "", + mode->interlace ? " interlaced" : ""); +} + +static void drm_connector_free(struct kref *kref) +{ + struct drm_connector *connector = + container_of(kref, struct drm_connector, base.refcount); + struct drm_device *dev = connector->dev; + + drm_mode_object_unregister(dev, &connector->base); + connector->funcs->destroy(connector); +} + +void drm_connector_free_work_fn(struct work_struct *work) +{ + struct drm_connector *connector, *n; + struct drm_device *dev = + container_of(work, struct drm_device, mode_config.connector_free_work); + struct drm_mode_config *config = &dev->mode_config; + unsigned long flags; + struct llist_node *freed; + + spin_lock_irqsave(&config->connector_list_lock, flags); + freed = llist_del_all(&config->connector_free_list); + spin_unlock_irqrestore(&config->connector_list_lock, flags); + + llist_for_each_entry_safe(connector, n, freed, free_node) { + drm_mode_object_unregister(dev, &connector->base); + connector->funcs->destroy(connector); + } +} + +/** + * drm_connector_init - Init a preallocated connector + * @dev: DRM device + * @connector: the connector to init + * @funcs: callbacks for this connector + * @connector_type: user visible type of the connector + * + * Initialises a preallocated connector. Connectors should be + * subclassed as part of driver connector objects. + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_connector_init(struct drm_device *dev, + struct drm_connector *connector, + const struct drm_connector_funcs *funcs, + int connector_type) +{ + struct drm_mode_config *config = &dev->mode_config; + int ret; + struct ida *connector_ida = + &drm_connector_enum_list[connector_type].ida; + + WARN_ON(drm_drv_uses_atomic_modeset(dev) && + (!funcs->atomic_destroy_state || + !funcs->atomic_duplicate_state)); + + ret = __drm_mode_object_add(dev, &connector->base, + DRM_MODE_OBJECT_CONNECTOR, + false, drm_connector_free); + if (ret) + return ret; + + connector->base.properties = &connector->properties; + connector->dev = dev; + connector->funcs = funcs; + + /* connector index is used with 32bit bitmasks */ + ret = ida_simple_get(&config->connector_ida, 0, 32, GFP_KERNEL); + if (ret < 0) { + DRM_DEBUG_KMS("Failed to allocate %s connector index: %d\n", + drm_connector_enum_list[connector_type].name, + ret); + goto out_put; + } + connector->index = ret; + ret = 0; + + connector->connector_type = connector_type; + connector->connector_type_id = + ida_simple_get(connector_ida, 1, 0, GFP_KERNEL); + if (connector->connector_type_id < 0) { + ret = connector->connector_type_id; + goto out_put_id; + } + connector->name = + kasprintf(GFP_KERNEL, "%s-%d", + drm_connector_enum_list[connector_type].name, + connector->connector_type_id); + if (!connector->name) { + ret = -ENOMEM; + goto out_put_type_id; + } + + INIT_LIST_HEAD(&connector->probed_modes); + INIT_LIST_HEAD(&connector->modes); + mutex_init(&connector->mutex); + connector->edid_blob_ptr = NULL; + connector->tile_blob_ptr = NULL; + connector->status = connector_status_unknown; + connector->display_info.panel_orientation = + DRM_MODE_PANEL_ORIENTATION_UNKNOWN; + + drm_connector_get_cmdline_mode(connector); + + /* We should add connectors at the end to avoid upsetting the connector + * index too much. */ + spin_lock_irq(&config->connector_list_lock); + list_add_tail(&connector->head, &config->connector_list); + config->num_connector++; + spin_unlock_irq(&config->connector_list_lock); + + if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL && + connector_type != DRM_MODE_CONNECTOR_WRITEBACK) + drm_connector_attach_edid_property(connector); + + drm_object_attach_property(&connector->base, + config->dpms_property, 0); + + drm_object_attach_property(&connector->base, + config->link_status_property, + 0); + + drm_object_attach_property(&connector->base, + config->non_desktop_property, + 0); + drm_object_attach_property(&connector->base, + config->tile_property, + 0); + + if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { + drm_object_attach_property(&connector->base, config->prop_crtc_id, 0); + } + + connector->debugfs_entry = NULL; +out_put_type_id: + if (ret) + ida_simple_remove(connector_ida, connector->connector_type_id); +out_put_id: + if (ret) + ida_simple_remove(&config->connector_ida, connector->index); +out_put: + if (ret) + drm_mode_object_unregister(dev, &connector->base); + + return ret; +} +EXPORT_SYMBOL(drm_connector_init); + +/** + * drm_connector_init_with_ddc - Init a preallocated connector + * @dev: DRM device + * @connector: the connector to init + * @funcs: callbacks for this connector + * @connector_type: user visible type of the connector + * @ddc: pointer to the associated ddc adapter + * + * Initialises a preallocated connector. Connectors should be + * subclassed as part of driver connector objects. + * + * Ensures that the ddc field of the connector is correctly set. + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_connector_init_with_ddc(struct drm_device *dev, + struct drm_connector *connector, + const struct drm_connector_funcs *funcs, + int connector_type, + struct i2c_adapter *ddc) +{ + int ret; + + ret = drm_connector_init(dev, connector, funcs, connector_type); + if (ret) + return ret; + + /* provide ddc symlink in sysfs */ + connector->ddc = ddc; + + return ret; +} +EXPORT_SYMBOL(drm_connector_init_with_ddc); + +/** + * drm_connector_attach_edid_property - attach edid property. + * @connector: the connector + * + * Some connector types like DRM_MODE_CONNECTOR_VIRTUAL do not get a + * edid property attached by default. This function can be used to + * explicitly enable the edid property in these cases. + */ +void drm_connector_attach_edid_property(struct drm_connector *connector) +{ + struct drm_mode_config *config = &connector->dev->mode_config; + + drm_object_attach_property(&connector->base, + config->edid_property, + 0); +} +EXPORT_SYMBOL(drm_connector_attach_edid_property); + +/** + * drm_connector_attach_encoder - attach a connector to an encoder + * @connector: connector to attach + * @encoder: encoder to attach @connector to + * + * This function links up a connector to an encoder. Note that the routing + * restrictions between encoders and crtcs are exposed to userspace through the + * possible_clones and possible_crtcs bitmasks. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_connector_attach_encoder(struct drm_connector *connector, + struct drm_encoder *encoder) +{ + int i; + + /* + * In the past, drivers have attempted to model the static association + * of connector to encoder in simple connector/encoder devices using a + * direct assignment of connector->encoder = encoder. This connection + * is a logical one and the responsibility of the core, so drivers are + * expected not to mess with this. + * + * Note that the error return should've been enough here, but a large + * majority of drivers ignores the return value, so add in a big WARN + * to get people's attention. + */ + if (WARN_ON(connector->encoder)) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(connector->encoder_ids); i++) { + if (connector->encoder_ids[i] == 0) { + connector->encoder_ids[i] = encoder->base.id; + return 0; + } + } + return -ENOMEM; +} +EXPORT_SYMBOL(drm_connector_attach_encoder); + +/** + * drm_connector_has_possible_encoder - check if the connector and encoder are assosicated with each other + * @connector: the connector + * @encoder: the encoder + * + * Returns: + * True if @encoder is one of the possible encoders for @connector. + */ +bool drm_connector_has_possible_encoder(struct drm_connector *connector, + struct drm_encoder *encoder) +{ + struct drm_encoder *enc; + int i; + + drm_connector_for_each_possible_encoder(connector, enc, i) { + if (enc == encoder) + return true; + } + + return false; +} +EXPORT_SYMBOL(drm_connector_has_possible_encoder); + +static void drm_mode_remove(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + list_del(&mode->head); + drm_mode_destroy(connector->dev, mode); +} + +/** + * drm_connector_cleanup - cleans up an initialised connector + * @connector: connector to cleanup + * + * Cleans up the connector but doesn't free the object. + */ +void drm_connector_cleanup(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_display_mode *mode, *t; + + /* The connector should have been removed from userspace long before + * it is finally destroyed. + */ + if (WARN_ON(connector->registration_state == + DRM_CONNECTOR_REGISTERED)) + drm_connector_unregister(connector); + + if (connector->tile_group) { + drm_mode_put_tile_group(dev, connector->tile_group); + connector->tile_group = NULL; + } + + list_for_each_entry_safe(mode, t, &connector->probed_modes, head) + drm_mode_remove(connector, mode); + + list_for_each_entry_safe(mode, t, &connector->modes, head) + drm_mode_remove(connector, mode); + + ida_simple_remove(&drm_connector_enum_list[connector->connector_type].ida, + connector->connector_type_id); + + ida_simple_remove(&dev->mode_config.connector_ida, + connector->index); + + kfree(connector->display_info.bus_formats); + drm_mode_object_unregister(dev, &connector->base); + kfree(connector->name); + connector->name = NULL; + spin_lock_irq(&dev->mode_config.connector_list_lock); + list_del(&connector->head); + dev->mode_config.num_connector--; + spin_unlock_irq(&dev->mode_config.connector_list_lock); + + WARN_ON(connector->state && !connector->funcs->atomic_destroy_state); + if (connector->state && connector->funcs->atomic_destroy_state) + connector->funcs->atomic_destroy_state(connector, + connector->state); + + mutex_destroy(&connector->mutex); + + memset(connector, 0, sizeof(*connector)); +} +EXPORT_SYMBOL(drm_connector_cleanup); + +/** + * drm_connector_register - register a connector + * @connector: the connector to register + * + * Register userspace interfaces for a connector + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_connector_register(struct drm_connector *connector) +{ + int ret = 0; + + if (!connector->dev->registered) + return 0; + + mutex_lock(&connector->mutex); + if (connector->registration_state != DRM_CONNECTOR_INITIALIZING) + goto unlock; + + ret = drm_sysfs_connector_add(connector); + if (ret) + goto unlock; + + drm_debugfs_connector_add(connector); + + if (connector->funcs->late_register) { + ret = connector->funcs->late_register(connector); + if (ret) + goto err_debugfs; + } + + drm_mode_object_register(connector->dev, &connector->base); + + connector->registration_state = DRM_CONNECTOR_REGISTERED; + goto unlock; + +err_debugfs: + drm_debugfs_connector_remove(connector); + drm_sysfs_connector_remove(connector); +unlock: + mutex_unlock(&connector->mutex); + return ret; +} +EXPORT_SYMBOL(drm_connector_register); + +/** + * drm_connector_unregister - unregister a connector + * @connector: the connector to unregister + * + * Unregister userspace interfaces for a connector + */ +void drm_connector_unregister(struct drm_connector *connector) +{ + mutex_lock(&connector->mutex); + if (connector->registration_state != DRM_CONNECTOR_REGISTERED) { + mutex_unlock(&connector->mutex); + return; + } + + if (connector->funcs->early_unregister) + connector->funcs->early_unregister(connector); + + drm_sysfs_connector_remove(connector); + drm_debugfs_connector_remove(connector); + + connector->registration_state = DRM_CONNECTOR_UNREGISTERED; + mutex_unlock(&connector->mutex); +} +EXPORT_SYMBOL(drm_connector_unregister); + +void drm_connector_unregister_all(struct drm_device *dev) +{ + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) + drm_connector_unregister(connector); + drm_connector_list_iter_end(&conn_iter); +} + +int drm_connector_register_all(struct drm_device *dev) +{ + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + int ret = 0; + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + ret = drm_connector_register(connector); + if (ret) + break; + } + drm_connector_list_iter_end(&conn_iter); + + if (ret) + drm_connector_unregister_all(dev); + return ret; +} + +/** + * drm_get_connector_status_name - return a string for connector status + * @status: connector status to compute name of + * + * In contrast to the other drm_get_*_name functions this one here returns a + * const pointer and hence is threadsafe. + */ +const char *drm_get_connector_status_name(enum drm_connector_status status) +{ + if (status == connector_status_connected) + return "connected"; + else if (status == connector_status_disconnected) + return "disconnected"; + else + return "unknown"; +} +EXPORT_SYMBOL(drm_get_connector_status_name); + +/** + * drm_get_connector_force_name - return a string for connector force + * @force: connector force to get name of + * + * Returns: const pointer to name. + */ +const char *drm_get_connector_force_name(enum drm_connector_force force) +{ + switch (force) { + case DRM_FORCE_UNSPECIFIED: + return "unspecified"; + case DRM_FORCE_OFF: + return "off"; + case DRM_FORCE_ON: + return "on"; + case DRM_FORCE_ON_DIGITAL: + return "digital"; + default: + return "unknown"; + } +} + +#ifdef CONFIG_LOCKDEP +static struct lockdep_map connector_list_iter_dep_map = { + .name = "drm_connector_list_iter" +}; +#endif + +/** + * drm_connector_list_iter_begin - initialize a connector_list iterator + * @dev: DRM device + * @iter: connector_list iterator + * + * Sets @iter up to walk the &drm_mode_config.connector_list of @dev. @iter + * must always be cleaned up again by calling drm_connector_list_iter_end(). + * Iteration itself happens using drm_connector_list_iter_next() or + * drm_for_each_connector_iter(). + */ +void drm_connector_list_iter_begin(struct drm_device *dev, + struct drm_connector_list_iter *iter) +{ + iter->dev = dev; + iter->conn = NULL; + lock_acquire_shared_recursive(&connector_list_iter_dep_map, 0, 1, NULL, _RET_IP_); +} +EXPORT_SYMBOL(drm_connector_list_iter_begin); + +/* + * Extra-safe connector put function that works in any context. Should only be + * used from the connector_iter functions, where we never really expect to + * actually release the connector when dropping our final reference. + */ +static void +__drm_connector_put_safe(struct drm_connector *conn) +{ + struct drm_mode_config *config = &conn->dev->mode_config; + +#ifdef __linux__ + lockdep_assert_held(&config->connector_list_lock); +#endif + + if (!refcount_dec_and_test(&conn->base.refcount.refcount)) + return; + + llist_add(&conn->free_node, &config->connector_free_list); + schedule_work(&config->connector_free_work); +} + +/** + * drm_connector_list_iter_next - return next connector + * @iter: connector_list iterator + * + * Returns the next connector for @iter, or NULL when the list walk has + * completed. + */ +struct drm_connector * +drm_connector_list_iter_next(struct drm_connector_list_iter *iter) +{ + struct drm_connector *old_conn = iter->conn; + struct drm_mode_config *config = &iter->dev->mode_config; + struct list_head *lhead; + unsigned long flags; + + spin_lock_irqsave(&config->connector_list_lock, flags); + lhead = old_conn ? &old_conn->head : &config->connector_list; + + do { + if (lhead->next == &config->connector_list) { + iter->conn = NULL; + break; + } + + lhead = lhead->next; + iter->conn = list_entry(lhead, struct drm_connector, head); + + /* loop until it's not a zombie connector */ + } while (!kref_get_unless_zero(&iter->conn->base.refcount)); + + if (old_conn) + __drm_connector_put_safe(old_conn); + spin_unlock_irqrestore(&config->connector_list_lock, flags); + + return iter->conn; +} +EXPORT_SYMBOL(drm_connector_list_iter_next); + +/** + * drm_connector_list_iter_end - tear down a connector_list iterator + * @iter: connector_list iterator + * + * Tears down @iter and releases any resources (like &drm_connector references) + * acquired while walking the list. This must always be called, both when the + * iteration completes fully or when it was aborted without walking the entire + * list. + */ +void drm_connector_list_iter_end(struct drm_connector_list_iter *iter) +{ + struct drm_mode_config *config = &iter->dev->mode_config; + unsigned long flags; + + iter->dev = NULL; + if (iter->conn) { + spin_lock_irqsave(&config->connector_list_lock, flags); + __drm_connector_put_safe(iter->conn); + spin_unlock_irqrestore(&config->connector_list_lock, flags); + } + lock_release(&connector_list_iter_dep_map, 0, _RET_IP_); +} +EXPORT_SYMBOL(drm_connector_list_iter_end); + +static const struct drm_prop_enum_list drm_subpixel_enum_list[] = { + { SubPixelUnknown, "Unknown" }, + { SubPixelHorizontalRGB, "Horizontal RGB" }, + { SubPixelHorizontalBGR, "Horizontal BGR" }, + { SubPixelVerticalRGB, "Vertical RGB" }, + { SubPixelVerticalBGR, "Vertical BGR" }, + { SubPixelNone, "None" }, +}; + +/** + * drm_get_subpixel_order_name - return a string for a given subpixel enum + * @order: enum of subpixel_order + * + * Note you could abuse this and return something out of bounds, but that + * would be a caller error. No unscrubbed user data should make it here. + */ +const char *drm_get_subpixel_order_name(enum subpixel_order order) +{ + return drm_subpixel_enum_list[order].name; +} +EXPORT_SYMBOL(drm_get_subpixel_order_name); + +static const struct drm_prop_enum_list drm_dpms_enum_list[] = { + { DRM_MODE_DPMS_ON, "On" }, + { DRM_MODE_DPMS_STANDBY, "Standby" }, + { DRM_MODE_DPMS_SUSPEND, "Suspend" }, + { DRM_MODE_DPMS_OFF, "Off" } +}; +DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list) + +static const struct drm_prop_enum_list drm_link_status_enum_list[] = { + { DRM_MODE_LINK_STATUS_GOOD, "Good" }, + { DRM_MODE_LINK_STATUS_BAD, "Bad" }, +}; + +/** + * drm_display_info_set_bus_formats - set the supported bus formats + * @info: display info to store bus formats in + * @formats: array containing the supported bus formats + * @num_formats: the number of entries in the fmts array + * + * Store the supported bus formats in display info structure. + * See MEDIA_BUS_FMT_* definitions in include/uapi/linux/media-bus-format.h for + * a full list of available formats. + */ +int drm_display_info_set_bus_formats(struct drm_display_info *info, + const u32 *formats, + unsigned int num_formats) +{ + u32 *fmts = NULL; + + if (!formats && num_formats) + return -EINVAL; + + if (formats && num_formats) { + fmts = kmemdup(formats, sizeof(*formats) * num_formats, + GFP_KERNEL); + if (!fmts) + return -ENOMEM; + } + + kfree(info->bus_formats); + info->bus_formats = fmts; + info->num_bus_formats = num_formats; + + return 0; +} +EXPORT_SYMBOL(drm_display_info_set_bus_formats); + +/* Optional connector properties. */ +static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] = { + { DRM_MODE_SCALE_NONE, "None" }, + { DRM_MODE_SCALE_FULLSCREEN, "Full" }, + { DRM_MODE_SCALE_CENTER, "Center" }, + { DRM_MODE_SCALE_ASPECT, "Full aspect" }, +}; + +static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = { + { DRM_MODE_PICTURE_ASPECT_NONE, "Automatic" }, + { DRM_MODE_PICTURE_ASPECT_4_3, "4:3" }, + { DRM_MODE_PICTURE_ASPECT_16_9, "16:9" }, +}; + +static const struct drm_prop_enum_list drm_content_type_enum_list[] = { + { DRM_MODE_CONTENT_TYPE_NO_DATA, "No Data" }, + { DRM_MODE_CONTENT_TYPE_GRAPHICS, "Graphics" }, + { DRM_MODE_CONTENT_TYPE_PHOTO, "Photo" }, + { DRM_MODE_CONTENT_TYPE_CINEMA, "Cinema" }, + { DRM_MODE_CONTENT_TYPE_GAME, "Game" }, +}; + +static const struct drm_prop_enum_list drm_panel_orientation_enum_list[] = { + { DRM_MODE_PANEL_ORIENTATION_NORMAL, "Normal" }, + { DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP, "Upside Down" }, + { DRM_MODE_PANEL_ORIENTATION_LEFT_UP, "Left Side Up" }, + { DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, "Right Side Up" }, +}; + +static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = { + { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */ + { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */ + { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */ +}; +DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list) + +static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = { + { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */ + { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */ + { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */ +}; +DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name, + drm_dvi_i_subconnector_enum_list) + +static const struct drm_prop_enum_list drm_tv_select_enum_list[] = { + { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */ + { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ + { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */ + { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */ + { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */ +}; +DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list) + +static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = { + { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */ + { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ + { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */ + { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */ + { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */ +}; +DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, + drm_tv_subconnector_enum_list) + +static const struct drm_prop_enum_list hdmi_colorspaces[] = { + /* For Default case, driver will set the colorspace */ + { DRM_MODE_COLORIMETRY_DEFAULT, "Default" }, + /* Standard Definition Colorimetry based on CEA 861 */ + { DRM_MODE_COLORIMETRY_SMPTE_170M_YCC, "SMPTE_170M_YCC" }, + { DRM_MODE_COLORIMETRY_BT709_YCC, "BT709_YCC" }, + /* Standard Definition Colorimetry based on IEC 61966-2-4 */ + { DRM_MODE_COLORIMETRY_XVYCC_601, "XVYCC_601" }, + /* High Definition Colorimetry based on IEC 61966-2-4 */ + { DRM_MODE_COLORIMETRY_XVYCC_709, "XVYCC_709" }, + /* Colorimetry based on IEC 61966-2-1/Amendment 1 */ + { DRM_MODE_COLORIMETRY_SYCC_601, "SYCC_601" }, + /* Colorimetry based on IEC 61966-2-5 [33] */ + { DRM_MODE_COLORIMETRY_OPYCC_601, "opYCC_601" }, + /* Colorimetry based on IEC 61966-2-5 */ + { DRM_MODE_COLORIMETRY_OPRGB, "opRGB" }, + /* Colorimetry based on ITU-R BT.2020 */ + { DRM_MODE_COLORIMETRY_BT2020_CYCC, "BT2020_CYCC" }, + /* Colorimetry based on ITU-R BT.2020 */ + { DRM_MODE_COLORIMETRY_BT2020_RGB, "BT2020_RGB" }, + /* Colorimetry based on ITU-R BT.2020 */ + { DRM_MODE_COLORIMETRY_BT2020_YCC, "BT2020_YCC" }, + /* Added as part of Additional Colorimetry Extension in 861.G */ + { DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65, "DCI-P3_RGB_D65" }, + { DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER, "DCI-P3_RGB_Theater" }, +}; + +/** + * DOC: standard connector properties + * + * DRM connectors have a few standardized properties: + * + * EDID: + * Blob property which contains the current EDID read from the sink. This + * is useful to parse sink identification information like vendor, model + * and serial. Drivers should update this property by calling + * drm_connector_update_edid_property(), usually after having parsed + * the EDID using drm_add_edid_modes(). Userspace cannot change this + * property. + * DPMS: + * Legacy property for setting the power state of the connector. For atomic + * drivers this is only provided for backwards compatibility with existing + * drivers, it remaps to controlling the "ACTIVE" property on the CRTC the + * connector is linked to. Drivers should never set this property directly, + * it is handled by the DRM core by calling the &drm_connector_funcs.dpms + * callback. For atomic drivers the remapping to the "ACTIVE" property is + * implemented in the DRM core. This is the only standard connector + * property that userspace can change. + * + * Note that this property cannot be set through the MODE_ATOMIC ioctl, + * userspace must use "ACTIVE" on the CRTC instead. + * + * WARNING: + * + * For userspace also running on legacy drivers the "DPMS" semantics are a + * lot more complicated. First, userspace cannot rely on the "DPMS" value + * returned by the GETCONNECTOR actually reflecting reality, because many + * drivers fail to update it. For atomic drivers this is taken care of in + * drm_atomic_helper_update_legacy_modeset_state(). + * + * The second issue is that the DPMS state is only well-defined when the + * connector is connected to a CRTC. In atomic the DRM core enforces that + * "ACTIVE" is off in such a case, no such checks exists for "DPMS". + * + * Finally, when enabling an output using the legacy SETCONFIG ioctl then + * "DPMS" is forced to ON. But see above, that might not be reflected in + * the software value on legacy drivers. + * + * Summarizing: Only set "DPMS" when the connector is known to be enabled, + * assume that a successful SETCONFIG call also sets "DPMS" to on, and + * never read back the value of "DPMS" because it can be incorrect. + * PATH: + * Connector path property to identify how this sink is physically + * connected. Used by DP MST. This should be set by calling + * drm_connector_set_path_property(), in the case of DP MST with the + * path property the MST manager created. Userspace cannot change this + * property. + * TILE: + * Connector tile group property to indicate how a set of DRM connector + * compose together into one logical screen. This is used by both high-res + * external screens (often only using a single cable, but exposing multiple + * DP MST sinks), or high-res integrated panels (like dual-link DSI) which + * are not gen-locked. Note that for tiled panels which are genlocked, like + * dual-link LVDS or dual-link DSI, the driver should try to not expose the + * tiling and virtualize both &drm_crtc and &drm_plane if needed. Drivers + * should update this value using drm_connector_set_tile_property(). + * Userspace cannot change this property. + * link-status: + * Connector link-status property to indicate the status of link. The + * default value of link-status is "GOOD". If something fails during or + * after modeset, the kernel driver may set this to "BAD" and issue a + * hotplug uevent. Drivers should update this value using + * drm_connector_set_link_status_property(). + * non_desktop: + * Indicates the output should be ignored for purposes of displaying a + * standard desktop environment or console. This is most likely because + * the output device is not rectilinear. + * Content Protection: + * This property is used by userspace to request the kernel protect future + * content communicated over the link. When requested, kernel will apply + * the appropriate means of protection (most often HDCP), and use the + * property to tell userspace the protection is active. + * + * Drivers can set this up by calling + * drm_connector_attach_content_protection_property() on initialization. + * + * The value of this property can be one of the following: + * + * DRM_MODE_CONTENT_PROTECTION_UNDESIRED = 0 + * The link is not protected, content is transmitted in the clear. + * DRM_MODE_CONTENT_PROTECTION_DESIRED = 1 + * Userspace has requested content protection, but the link is not + * currently protected. When in this state, kernel should enable + * Content Protection as soon as possible. + * DRM_MODE_CONTENT_PROTECTION_ENABLED = 2 + * Userspace has requested content protection, and the link is + * protected. Only the driver can set the property to this value. + * If userspace attempts to set to ENABLED, kernel will return + * -EINVAL. + * + * A few guidelines: + * + * - DESIRED state should be preserved until userspace de-asserts it by + * setting the property to UNDESIRED. This means ENABLED should only + * transition to UNDESIRED when the user explicitly requests it. + * - If the state is DESIRED, kernel should attempt to re-authenticate the + * link whenever possible. This includes across disable/enable, dpms, + * hotplug, downstream device changes, link status failures, etc.. + * - Kernel sends uevent with the connector id and property id through + * @drm_hdcp_update_content_protection, upon below kernel triggered + * scenarios: + * + * - DESIRED -> ENABLED (authentication success) + * - ENABLED -> DESIRED (termination of authentication) + * - Please note no uevents for userspace triggered property state changes, + * which can't fail such as + * + * - DESIRED/ENABLED -> UNDESIRED + * - UNDESIRED -> DESIRED + * - Userspace is responsible for polling the property or listen to uevents + * to determine when the value transitions from ENABLED to DESIRED. + * This signifies the link is no longer protected and userspace should + * take appropriate action (whatever that might be). + * + * HDCP Content Type: + * This Enum property is used by the userspace to declare the content type + * of the display stream, to kernel. Here display stream stands for any + * display content that userspace intended to display through HDCP + * encryption. + * + * Content Type of a stream is decided by the owner of the stream, as + * "HDCP Type0" or "HDCP Type1". + * + * The value of the property can be one of the below: + * - "HDCP Type0": DRM_MODE_HDCP_CONTENT_TYPE0 = 0 + * - "HDCP Type1": DRM_MODE_HDCP_CONTENT_TYPE1 = 1 + * + * When kernel starts the HDCP authentication (see "Content Protection" + * for details), it uses the content type in "HDCP Content Type" + * for performing the HDCP authentication with the display sink. + * + * Please note in HDCP spec versions, a link can be authenticated with + * HDCP 2.2 for Content Type 0/Content Type 1. Where as a link can be + * authenticated with HDCP1.4 only for Content Type 0(though it is implicit + * in nature. As there is no reference for Content Type in HDCP1.4). + * + * HDCP2.2 authentication protocol itself takes the "Content Type" as a + * parameter, which is a input for the DP HDCP2.2 encryption algo. + * + * In case of Type 0 content protection request, kernel driver can choose + * either of HDCP spec versions 1.4 and 2.2. When HDCP2.2 is used for + * "HDCP Type 0", a HDCP 2.2 capable repeater in the downstream can send + * that content to a HDCP 1.4 authenticated HDCP sink (Type0 link). + * But if the content is classified as "HDCP Type 1", above mentioned + * HDCP 2.2 repeater wont send the content to the HDCP sink as it can't + * authenticate the HDCP1.4 capable sink for "HDCP Type 1". + * + * Please note userspace can be ignorant of the HDCP versions used by the + * kernel driver to achieve the "HDCP Content Type". + * + * At current scenario, classifying a content as Type 1 ensures that the + * content will be displayed only through the HDCP2.2 encrypted link. + * + * Note that the HDCP Content Type property is introduced at HDCP 2.2, and + * defaults to type 0. It is only exposed by drivers supporting HDCP 2.2 + * (hence supporting Type 0 and Type 1). Based on how next versions of + * HDCP specs are defined content Type could be used for higher versions + * too. + * + * If content type is changed when "Content Protection" is not UNDESIRED, + * then kernel will disable the HDCP and re-enable with new type in the + * same atomic commit. And when "Content Protection" is ENABLED, it means + * that link is HDCP authenticated and encrypted, for the transmission of + * the Type of stream mentioned at "HDCP Content Type". + * + * HDR_OUTPUT_METADATA: + * Connector property to enable userspace to send HDR Metadata to + * driver. This metadata is based on the composition and blending + * policies decided by user, taking into account the hardware and + * sink capabilities. The driver gets this metadata and creates a + * Dynamic Range and Mastering Infoframe (DRM) in case of HDMI, + * SDP packet (Non-audio INFOFRAME SDP v1.3) for DP. This is then + * sent to sink. This notifies the sink of the upcoming frame's Color + * Encoding and Luminance parameters. + * + * Userspace first need to detect the HDR capabilities of sink by + * reading and parsing the EDID. Details of HDR metadata for HDMI + * are added in CTA 861.G spec. For DP , its defined in VESA DP + * Standard v1.4. It needs to then get the metadata information + * of the video/game/app content which are encoded in HDR (basically + * using HDR transfer functions). With this information it needs to + * decide on a blending policy and compose the relevant + * layers/overlays into a common format. Once this blending is done, + * userspace will be aware of the metadata of the composed frame to + * be send to sink. It then uses this property to communicate this + * metadata to driver which then make a Infoframe packet and sends + * to sink based on the type of encoder connected. + * + * Userspace will be responsible to do Tone mapping operation in case: + * - Some layers are HDR and others are SDR + * - HDR layers luminance is not same as sink + * + * It will even need to do colorspace conversion and get all layers + * to one common colorspace for blending. It can use either GL, Media + * or display engine to get this done based on the capabilties of the + * associated hardware. + * + * Driver expects metadata to be put in &struct hdr_output_metadata + * structure from userspace. This is received as blob and stored in + * &drm_connector_state.hdr_output_metadata. It parses EDID and saves the + * sink metadata in &struct hdr_sink_metadata, as + * &drm_connector.hdr_sink_metadata. Driver uses + * drm_hdmi_infoframe_set_hdr_metadata() helper to set the HDR metadata, + * hdmi_drm_infoframe_pack() to pack the infoframe as per spec, in case of + * HDMI encoder. + * + * max bpc: + * This range property is used by userspace to limit the bit depth. When + * used the driver would limit the bpc in accordance with the valid range + * supported by the hardware and sink. Drivers to use the function + * drm_connector_attach_max_bpc_property() to create and attach the + * property to the connector during initialization. + * + * Connectors also have one standardized atomic property: + * + * CRTC_ID: + * Mode object ID of the &drm_crtc this connector should be connected to. + * + * Connectors for LCD panels may also have one standardized property: + * + * panel orientation: + * On some devices the LCD panel is mounted in the casing in such a way + * that the up/top side of the panel does not match with the top side of + * the device. Userspace can use this property to check for this. + * Note that input coordinates from touchscreens (input devices with + * INPUT_PROP_DIRECT) will still map 1:1 to the actual LCD panel + * coordinates, so if userspace rotates the picture to adjust for + * the orientation it must also apply the same transformation to the + * touchscreen input coordinates. This property is initialized by calling + * drm_connector_init_panel_orientation_property(). + * + * scaling mode: + * This property defines how a non-native mode is upscaled to the native + * mode of an LCD panel: + * + * None: + * No upscaling happens, scaling is left to the panel. Not all + * drivers expose this mode. + * Full: + * The output is upscaled to the full resolution of the panel, + * ignoring the aspect ratio. + * Center: + * No upscaling happens, the output is centered within the native + * resolution the panel. + * Full aspect: + * The output is upscaled to maximize either the width or height + * while retaining the aspect ratio. + * + * This property should be set up by calling + * drm_connector_attach_scaling_mode_property(). Note that drivers + * can also expose this property to external outputs, in which case they + * must support "None", which should be the default (since external screens + * have a built-in scaler). + */ + +int drm_connector_create_standard_properties(struct drm_device *dev) +{ + struct drm_property *prop; + + prop = drm_property_create(dev, DRM_MODE_PROP_BLOB | + DRM_MODE_PROP_IMMUTABLE, + "EDID", 0); + if (!prop) + return -ENOMEM; + dev->mode_config.edid_property = prop; + + prop = drm_property_create_enum(dev, 0, + "DPMS", drm_dpms_enum_list, + ARRAY_SIZE(drm_dpms_enum_list)); + if (!prop) + return -ENOMEM; + dev->mode_config.dpms_property = prop; + + prop = drm_property_create(dev, + DRM_MODE_PROP_BLOB | + DRM_MODE_PROP_IMMUTABLE, + "PATH", 0); + if (!prop) + return -ENOMEM; + dev->mode_config.path_property = prop; + + prop = drm_property_create(dev, + DRM_MODE_PROP_BLOB | + DRM_MODE_PROP_IMMUTABLE, + "TILE", 0); + if (!prop) + return -ENOMEM; + dev->mode_config.tile_property = prop; + + prop = drm_property_create_enum(dev, 0, "link-status", + drm_link_status_enum_list, + ARRAY_SIZE(drm_link_status_enum_list)); + if (!prop) + return -ENOMEM; + dev->mode_config.link_status_property = prop; + + prop = drm_property_create_bool(dev, DRM_MODE_PROP_IMMUTABLE, "non-desktop"); + if (!prop) + return -ENOMEM; + dev->mode_config.non_desktop_property = prop; + + prop = drm_property_create(dev, DRM_MODE_PROP_BLOB, + "HDR_OUTPUT_METADATA", 0); + if (!prop) + return -ENOMEM; + dev->mode_config.hdr_output_metadata_property = prop; + + return 0; +} + +/** + * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties + * @dev: DRM device + * + * Called by a driver the first time a DVI-I connector is made. + */ +int drm_mode_create_dvi_i_properties(struct drm_device *dev) +{ + struct drm_property *dvi_i_selector; + struct drm_property *dvi_i_subconnector; + + if (dev->mode_config.dvi_i_select_subconnector_property) + return 0; + + dvi_i_selector = + drm_property_create_enum(dev, 0, + "select subconnector", + drm_dvi_i_select_enum_list, + ARRAY_SIZE(drm_dvi_i_select_enum_list)); + dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector; + + dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, + "subconnector", + drm_dvi_i_subconnector_enum_list, + ARRAY_SIZE(drm_dvi_i_subconnector_enum_list)); + dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector; + + return 0; +} +EXPORT_SYMBOL(drm_mode_create_dvi_i_properties); + +/** + * DOC: HDMI connector properties + * + * content type (HDMI specific): + * Indicates content type setting to be used in HDMI infoframes to indicate + * content type for the external device, so that it adjusts its display + * settings accordingly. + * + * The value of this property can be one of the following: + * + * No Data: + * Content type is unknown + * Graphics: + * Content type is graphics + * Photo: + * Content type is photo + * Cinema: + * Content type is cinema + * Game: + * Content type is game + * + * Drivers can set up this property by calling + * drm_connector_attach_content_type_property(). Decoding to + * infoframe values is done through drm_hdmi_avi_infoframe_content_type(). + */ + +/** + * drm_connector_attach_content_type_property - attach content-type property + * @connector: connector to attach content type property on. + * + * Called by a driver the first time a HDMI connector is made. + */ +int drm_connector_attach_content_type_property(struct drm_connector *connector) +{ + if (!drm_mode_create_content_type_property(connector->dev)) + drm_object_attach_property(&connector->base, + connector->dev->mode_config.content_type_property, + DRM_MODE_CONTENT_TYPE_NO_DATA); + return 0; +} +EXPORT_SYMBOL(drm_connector_attach_content_type_property); + + +/** + * drm_hdmi_avi_infoframe_content_type() - fill the HDMI AVI infoframe + * content type information, based + * on correspondent DRM property. + * @frame: HDMI AVI infoframe + * @conn_state: DRM display connector state + * + */ +void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame, + const struct drm_connector_state *conn_state) +{ + switch (conn_state->content_type) { + case DRM_MODE_CONTENT_TYPE_GRAPHICS: + frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS; + break; + case DRM_MODE_CONTENT_TYPE_CINEMA: + frame->content_type = HDMI_CONTENT_TYPE_CINEMA; + break; + case DRM_MODE_CONTENT_TYPE_GAME: + frame->content_type = HDMI_CONTENT_TYPE_GAME; + break; + case DRM_MODE_CONTENT_TYPE_PHOTO: + frame->content_type = HDMI_CONTENT_TYPE_PHOTO; + break; + default: + /* Graphics is the default(0) */ + frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS; + } + + frame->itc = conn_state->content_type != DRM_MODE_CONTENT_TYPE_NO_DATA; +} +EXPORT_SYMBOL(drm_hdmi_avi_infoframe_content_type); + +/** + * drm_mode_attach_tv_margin_properties - attach TV connector margin properties + * @connector: DRM connector + * + * Called by a driver when it needs to attach TV margin props to a connector. + * Typically used on SDTV and HDMI connectors. + */ +void drm_connector_attach_tv_margin_properties(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + + drm_object_attach_property(&connector->base, + dev->mode_config.tv_left_margin_property, + 0); + drm_object_attach_property(&connector->base, + dev->mode_config.tv_right_margin_property, + 0); + drm_object_attach_property(&connector->base, + dev->mode_config.tv_top_margin_property, + 0); + drm_object_attach_property(&connector->base, + dev->mode_config.tv_bottom_margin_property, + 0); +} +EXPORT_SYMBOL(drm_connector_attach_tv_margin_properties); + +/** + * drm_mode_create_tv_margin_properties - create TV connector margin properties + * @dev: DRM device + * + * Called by a driver's HDMI connector initialization routine, this function + * creates the TV margin properties for a given device. No need to call this + * function for an SDTV connector, it's already called from + * drm_mode_create_tv_properties(). + */ +int drm_mode_create_tv_margin_properties(struct drm_device *dev) +{ + if (dev->mode_config.tv_left_margin_property) + return 0; + + dev->mode_config.tv_left_margin_property = + drm_property_create_range(dev, 0, "left margin", 0, 100); + if (!dev->mode_config.tv_left_margin_property) + return -ENOMEM; + + dev->mode_config.tv_right_margin_property = + drm_property_create_range(dev, 0, "right margin", 0, 100); + if (!dev->mode_config.tv_right_margin_property) + return -ENOMEM; + + dev->mode_config.tv_top_margin_property = + drm_property_create_range(dev, 0, "top margin", 0, 100); + if (!dev->mode_config.tv_top_margin_property) + return -ENOMEM; + + dev->mode_config.tv_bottom_margin_property = + drm_property_create_range(dev, 0, "bottom margin", 0, 100); + if (!dev->mode_config.tv_bottom_margin_property) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL(drm_mode_create_tv_margin_properties); + +/** + * drm_mode_create_tv_properties - create TV specific connector properties + * @dev: DRM device + * @num_modes: number of different TV formats (modes) supported + * @modes: array of pointers to strings containing name of each format + * + * Called by a driver's TV initialization routine, this function creates + * the TV specific connector properties for a given device. Caller is + * responsible for allocating a list of format names and passing them to + * this routine. + */ +int drm_mode_create_tv_properties(struct drm_device *dev, + unsigned int num_modes, + const char * const modes[]) +{ + struct drm_property *tv_selector; + struct drm_property *tv_subconnector; + unsigned int i; + + if (dev->mode_config.tv_select_subconnector_property) + return 0; + + /* + * Basic connector properties + */ + tv_selector = drm_property_create_enum(dev, 0, + "select subconnector", + drm_tv_select_enum_list, + ARRAY_SIZE(drm_tv_select_enum_list)); + if (!tv_selector) + goto nomem; + + dev->mode_config.tv_select_subconnector_property = tv_selector; + + tv_subconnector = + drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, + "subconnector", + drm_tv_subconnector_enum_list, + ARRAY_SIZE(drm_tv_subconnector_enum_list)); + if (!tv_subconnector) + goto nomem; + dev->mode_config.tv_subconnector_property = tv_subconnector; + + /* + * Other, TV specific properties: margins & TV modes. + */ + if (drm_mode_create_tv_margin_properties(dev)) + goto nomem; + + dev->mode_config.tv_mode_property = + drm_property_create(dev, DRM_MODE_PROP_ENUM, + "mode", num_modes); + if (!dev->mode_config.tv_mode_property) + goto nomem; + + for (i = 0; i < num_modes; i++) + drm_property_add_enum(dev->mode_config.tv_mode_property, + i, modes[i]); + + dev->mode_config.tv_brightness_property = + drm_property_create_range(dev, 0, "brightness", 0, 100); + if (!dev->mode_config.tv_brightness_property) + goto nomem; + + dev->mode_config.tv_contrast_property = + drm_property_create_range(dev, 0, "contrast", 0, 100); + if (!dev->mode_config.tv_contrast_property) + goto nomem; + + dev->mode_config.tv_flicker_reduction_property = + drm_property_create_range(dev, 0, "flicker reduction", 0, 100); + if (!dev->mode_config.tv_flicker_reduction_property) + goto nomem; + + dev->mode_config.tv_overscan_property = + drm_property_create_range(dev, 0, "overscan", 0, 100); + if (!dev->mode_config.tv_overscan_property) + goto nomem; + + dev->mode_config.tv_saturation_property = + drm_property_create_range(dev, 0, "saturation", 0, 100); + if (!dev->mode_config.tv_saturation_property) + goto nomem; + + dev->mode_config.tv_hue_property = + drm_property_create_range(dev, 0, "hue", 0, 100); + if (!dev->mode_config.tv_hue_property) + goto nomem; + + return 0; +nomem: + return -ENOMEM; +} +EXPORT_SYMBOL(drm_mode_create_tv_properties); + +/** + * drm_mode_create_scaling_mode_property - create scaling mode property + * @dev: DRM device + * + * Called by a driver the first time it's needed, must be attached to desired + * connectors. + * + * Atomic drivers should use drm_connector_attach_scaling_mode_property() + * instead to correctly assign &drm_connector_state.picture_aspect_ratio + * in the atomic state. + */ +int drm_mode_create_scaling_mode_property(struct drm_device *dev) +{ + struct drm_property *scaling_mode; + + if (dev->mode_config.scaling_mode_property) + return 0; + + scaling_mode = + drm_property_create_enum(dev, 0, "scaling mode", + drm_scaling_mode_enum_list, + ARRAY_SIZE(drm_scaling_mode_enum_list)); + + dev->mode_config.scaling_mode_property = scaling_mode; + + return 0; +} +EXPORT_SYMBOL(drm_mode_create_scaling_mode_property); + +/** + * DOC: Variable refresh properties + * + * Variable refresh rate capable displays can dynamically adjust their + * refresh rate by extending the duration of their vertical front porch + * until page flip or timeout occurs. This can reduce or remove stuttering + * and latency in scenarios where the page flip does not align with the + * vblank interval. + * + * An example scenario would be an application flipping at a constant rate + * of 48Hz on a 60Hz display. The page flip will frequently miss the vblank + * interval and the same contents will be displayed twice. This can be + * observed as stuttering for content with motion. + * + * If variable refresh rate was active on a display that supported a + * variable refresh range from 35Hz to 60Hz no stuttering would be observable + * for the example scenario. The minimum supported variable refresh rate of + * 35Hz is below the page flip frequency and the vertical front porch can + * be extended until the page flip occurs. The vblank interval will be + * directly aligned to the page flip rate. + * + * Not all userspace content is suitable for use with variable refresh rate. + * Large and frequent changes in vertical front porch duration may worsen + * perceived stuttering for input sensitive applications. + * + * Panel brightness will also vary with vertical front porch duration. Some + * panels may have noticeable differences in brightness between the minimum + * vertical front porch duration and the maximum vertical front porch duration. + * Large and frequent changes in vertical front porch duration may produce + * observable flickering for such panels. + * + * Userspace control for variable refresh rate is supported via properties + * on the &drm_connector and &drm_crtc objects. + * + * "vrr_capable": + * Optional &drm_connector boolean property that drivers should attach + * with drm_connector_attach_vrr_capable_property() on connectors that + * could support variable refresh rates. Drivers should update the + * property value by calling drm_connector_set_vrr_capable_property(). + * + * Absence of the property should indicate absence of support. + * + * "VRR_ENABLED": + * Default &drm_crtc boolean property that notifies the driver that the + * content on the CRTC is suitable for variable refresh rate presentation. + * The driver will take this property as a hint to enable variable + * refresh rate support if the receiver supports it, ie. if the + * "vrr_capable" property is true on the &drm_connector object. The + * vertical front porch duration will be extended until page-flip or + * timeout when enabled. + * + * The minimum vertical front porch duration is defined as the vertical + * front porch duration for the current mode. + * + * The maximum vertical front porch duration is greater than or equal to + * the minimum vertical front porch duration. The duration is derived + * from the minimum supported variable refresh rate for the connector. + * + * The driver may place further restrictions within these minimum + * and maximum bounds. + */ + +/** + * drm_connector_attach_vrr_capable_property - creates the + * vrr_capable property + * @connector: connector to create the vrr_capable property on. + * + * This is used by atomic drivers to add support for querying + * variable refresh rate capability for a connector. + * + * Returns: + * Zero on success, negative errono on failure. + */ +int drm_connector_attach_vrr_capable_property( + struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_property *prop; + + if (!connector->vrr_capable_property) { + prop = drm_property_create_bool(dev, DRM_MODE_PROP_IMMUTABLE, + "vrr_capable"); + if (!prop) + return -ENOMEM; + + connector->vrr_capable_property = prop; + drm_object_attach_property(&connector->base, prop, 0); + } + + return 0; +} +EXPORT_SYMBOL(drm_connector_attach_vrr_capable_property); + +/** + * drm_connector_attach_scaling_mode_property - attach atomic scaling mode property + * @connector: connector to attach scaling mode property on. + * @scaling_mode_mask: or'ed mask of BIT(%DRM_MODE_SCALE_\*). + * + * This is used to add support for scaling mode to atomic drivers. + * The scaling mode will be set to &drm_connector_state.picture_aspect_ratio + * and can be used from &drm_connector_helper_funcs->atomic_check for validation. + * + * This is the atomic version of drm_mode_create_scaling_mode_property(). + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_connector_attach_scaling_mode_property(struct drm_connector *connector, + u32 scaling_mode_mask) +{ + struct drm_device *dev = connector->dev; + struct drm_property *scaling_mode_property; + int i; + const unsigned valid_scaling_mode_mask = + (1U << ARRAY_SIZE(drm_scaling_mode_enum_list)) - 1; + + if (WARN_ON(hweight32(scaling_mode_mask) < 2 || + scaling_mode_mask & ~valid_scaling_mode_mask)) + return -EINVAL; + + scaling_mode_property = + drm_property_create(dev, DRM_MODE_PROP_ENUM, "scaling mode", + hweight32(scaling_mode_mask)); + + if (!scaling_mode_property) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(drm_scaling_mode_enum_list); i++) { + int ret; + + if (!(BIT(i) & scaling_mode_mask)) + continue; + + ret = drm_property_add_enum(scaling_mode_property, + drm_scaling_mode_enum_list[i].type, + drm_scaling_mode_enum_list[i].name); + + if (ret) { + drm_property_destroy(dev, scaling_mode_property); + + return ret; + } + } + + drm_object_attach_property(&connector->base, + scaling_mode_property, 0); + + connector->scaling_mode_property = scaling_mode_property; + + return 0; +} +EXPORT_SYMBOL(drm_connector_attach_scaling_mode_property); + +/** + * drm_mode_create_aspect_ratio_property - create aspect ratio property + * @dev: DRM device + * + * Called by a driver the first time it's needed, must be attached to desired + * connectors. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_create_aspect_ratio_property(struct drm_device *dev) +{ + if (dev->mode_config.aspect_ratio_property) + return 0; + + dev->mode_config.aspect_ratio_property = + drm_property_create_enum(dev, 0, "aspect ratio", + drm_aspect_ratio_enum_list, + ARRAY_SIZE(drm_aspect_ratio_enum_list)); + + if (dev->mode_config.aspect_ratio_property == NULL) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property); + +/** + * DOC: standard connector properties + * + * Colorspace: + * drm_mode_create_colorspace_property - create colorspace property + * This property helps select a suitable colorspace based on the sink + * capability. Modern sink devices support wider gamut like BT2020. + * This helps switch to BT2020 mode if the BT2020 encoded video stream + * is being played by the user, same for any other colorspace. Thereby + * giving a good visual experience to users. + * + * The expectation from userspace is that it should parse the EDID + * and get supported colorspaces. Use this property and switch to the + * one supported. Sink supported colorspaces should be retrieved by + * userspace from EDID and driver will not explicitly expose them. + * + * Basically the expectation from userspace is: + * - Set up CRTC DEGAMMA/CTM/GAMMA to convert to some sink + * colorspace + * - Set this new property to let the sink know what it + * converted the CRTC output to. + * - This property is just to inform sink what colorspace + * source is trying to drive. + * + * Called by a driver the first time it's needed, must be attached to desired + * connectors. + */ +int drm_mode_create_colorspace_property(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_property *prop; + + if (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || + connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) { + prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, + "Colorspace", + hdmi_colorspaces, + ARRAY_SIZE(hdmi_colorspaces)); + if (!prop) + return -ENOMEM; + } else { + DRM_DEBUG_KMS("Colorspace property not supported\n"); + return 0; + } + + connector->colorspace_property = prop; + + return 0; +} +EXPORT_SYMBOL(drm_mode_create_colorspace_property); + +/** + * drm_mode_create_content_type_property - create content type property + * @dev: DRM device + * + * Called by a driver the first time it's needed, must be attached to desired + * connectors. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_create_content_type_property(struct drm_device *dev) +{ + if (dev->mode_config.content_type_property) + return 0; + + dev->mode_config.content_type_property = + drm_property_create_enum(dev, 0, "content type", + drm_content_type_enum_list, + ARRAY_SIZE(drm_content_type_enum_list)); + + if (dev->mode_config.content_type_property == NULL) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL(drm_mode_create_content_type_property); + +/** + * drm_mode_create_suggested_offset_properties - create suggests offset properties + * @dev: DRM device + * + * Create the the suggested x/y offset property for connectors. + */ +int drm_mode_create_suggested_offset_properties(struct drm_device *dev) +{ + if (dev->mode_config.suggested_x_property && dev->mode_config.suggested_y_property) + return 0; + + dev->mode_config.suggested_x_property = + drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested X", 0, 0xffffffff); + + dev->mode_config.suggested_y_property = + drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested Y", 0, 0xffffffff); + + if (dev->mode_config.suggested_x_property == NULL || + dev->mode_config.suggested_y_property == NULL) + return -ENOMEM; + return 0; +} +EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties); + +/** + * drm_connector_set_path_property - set tile property on connector + * @connector: connector to set property on. + * @path: path to use for property; must not be NULL. + * + * This creates a property to expose to userspace to specify a + * connector path. This is mainly used for DisplayPort MST where + * connectors have a topology and we want to allow userspace to give + * them more meaningful names. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_connector_set_path_property(struct drm_connector *connector, + const char *path) +{ + struct drm_device *dev = connector->dev; + int ret; + + ret = drm_property_replace_global_blob(dev, + &connector->path_blob_ptr, + strlen(path) + 1, + path, + &connector->base, + dev->mode_config.path_property); + return ret; +} +EXPORT_SYMBOL(drm_connector_set_path_property); + +/** + * drm_connector_set_tile_property - set tile property on connector + * @connector: connector to set property on. + * + * This looks up the tile information for a connector, and creates a + * property for userspace to parse if it exists. The property is of + * the form of 8 integers using ':' as a separator. + * This is used for dual port tiled displays with DisplayPort SST + * or DisplayPort MST connectors. + * + * Returns: + * Zero on success, errno on failure. + */ +int drm_connector_set_tile_property(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + char tile[256]; + int ret; + + if (!connector->has_tile) { + ret = drm_property_replace_global_blob(dev, + &connector->tile_blob_ptr, + 0, + NULL, + &connector->base, + dev->mode_config.tile_property); + return ret; + } + + snprintf(tile, 256, "%d:%d:%d:%d:%d:%d:%d:%d", + connector->tile_group->id, connector->tile_is_single_monitor, + connector->num_h_tile, connector->num_v_tile, + connector->tile_h_loc, connector->tile_v_loc, + connector->tile_h_size, connector->tile_v_size); + + ret = drm_property_replace_global_blob(dev, + &connector->tile_blob_ptr, + strlen(tile) + 1, + tile, + &connector->base, + dev->mode_config.tile_property); + return ret; +} +EXPORT_SYMBOL(drm_connector_set_tile_property); + +/** + * drm_connector_update_edid_property - update the edid property of a connector + * @connector: drm connector + * @edid: new value of the edid property + * + * This function creates a new blob modeset object and assigns its id to the + * connector's edid property. + * Since we also parse tile information from EDID's displayID block, we also + * set the connector's tile property here. See drm_connector_set_tile_property() + * for more details. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_connector_update_edid_property(struct drm_connector *connector, + const struct edid *edid) +{ + struct drm_device *dev = connector->dev; + size_t size = 0; + int ret; + + /* ignore requests to set edid when overridden */ + if (connector->override_edid) + return 0; + + if (edid) + size = EDID_LENGTH * (1 + edid->extensions); + + /* Set the display info, using edid if available, otherwise + * reseting the values to defaults. This duplicates the work + * done in drm_add_edid_modes, but that function is not + * consistently called before this one in all drivers and the + * computation is cheap enough that it seems better to + * duplicate it rather than attempt to ensure some arbitrary + * ordering of calls. + */ + if (edid) + drm_add_display_info(connector, edid); + else + drm_reset_display_info(connector); + + drm_object_property_set_value(&connector->base, + dev->mode_config.non_desktop_property, + connector->display_info.non_desktop); + + ret = drm_property_replace_global_blob(dev, + &connector->edid_blob_ptr, + size, + edid, + &connector->base, + dev->mode_config.edid_property); + if (ret) + return ret; + return drm_connector_set_tile_property(connector); +} +EXPORT_SYMBOL(drm_connector_update_edid_property); + +/** + * drm_connector_set_link_status_property - Set link status property of a connector + * @connector: drm connector + * @link_status: new value of link status property (0: Good, 1: Bad) + * + * In usual working scenario, this link status property will always be set to + * "GOOD". If something fails during or after a mode set, the kernel driver + * may set this link status property to "BAD". The caller then needs to send a + * hotplug uevent for userspace to re-check the valid modes through + * GET_CONNECTOR_IOCTL and retry modeset. + * + * Note: Drivers cannot rely on userspace to support this property and + * issue a modeset. As such, they may choose to handle issues (like + * re-training a link) without userspace's intervention. + * + * The reason for adding this property is to handle link training failures, but + * it is not limited to DP or link training. For example, if we implement + * asynchronous setcrtc, this property can be used to report any failures in that. + */ +void drm_connector_set_link_status_property(struct drm_connector *connector, + uint64_t link_status) +{ + struct drm_device *dev = connector->dev; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + connector->state->link_status = link_status; + drm_modeset_unlock(&dev->mode_config.connection_mutex); +} +EXPORT_SYMBOL(drm_connector_set_link_status_property); + +/** + * drm_connector_attach_max_bpc_property - attach "max bpc" property + * @connector: connector to attach max bpc property on. + * @min: The minimum bit depth supported by the connector. + * @max: The maximum bit depth supported by the connector. + * + * This is used to add support for limiting the bit depth on a connector. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_connector_attach_max_bpc_property(struct drm_connector *connector, + int min, int max) +{ + struct drm_device *dev = connector->dev; + struct drm_property *prop; + + prop = connector->max_bpc_property; + if (!prop) { + prop = drm_property_create_range(dev, 0, "max bpc", min, max); + if (!prop) + return -ENOMEM; + + connector->max_bpc_property = prop; + } + + drm_object_attach_property(&connector->base, prop, max); + connector->state->max_requested_bpc = max; + connector->state->max_bpc = max; + + return 0; +} +EXPORT_SYMBOL(drm_connector_attach_max_bpc_property); + +/** + * drm_connector_set_vrr_capable_property - sets the variable refresh rate + * capable property for a connector + * @connector: drm connector + * @capable: True if the connector is variable refresh rate capable + * + * Should be used by atomic drivers to update the indicated support for + * variable refresh rate over a connector. + */ +void drm_connector_set_vrr_capable_property( + struct drm_connector *connector, bool capable) +{ + drm_object_property_set_value(&connector->base, + connector->vrr_capable_property, + capable); +} +EXPORT_SYMBOL(drm_connector_set_vrr_capable_property); + +/** + * drm_connector_init_panel_orientation_property - + * initialize the connecters panel_orientation property + * @connector: connector for which to init the panel-orientation property. + * @width: width in pixels of the panel, used for panel quirk detection + * @height: height in pixels of the panel, used for panel quirk detection + * + * This function should only be called for built-in panels, after setting + * connector->display_info.panel_orientation first (if known). + * + * This function will check for platform specific (e.g. DMI based) quirks + * overriding display_info.panel_orientation first, then if panel_orientation + * is not DRM_MODE_PANEL_ORIENTATION_UNKNOWN it will attach the + * "panel orientation" property to the connector. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_connector_init_panel_orientation_property( + struct drm_connector *connector, int width, int height) +{ + struct drm_device *dev = connector->dev; + struct drm_display_info *info = &connector->display_info; + struct drm_property *prop; + int orientation_quirk; + + orientation_quirk = drm_get_panel_orientation_quirk(width, height); + if (orientation_quirk != DRM_MODE_PANEL_ORIENTATION_UNKNOWN) + info->panel_orientation = orientation_quirk; + + if (info->panel_orientation == DRM_MODE_PANEL_ORIENTATION_UNKNOWN) + return 0; + + prop = dev->mode_config.panel_orientation_property; + if (!prop) { + prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, + "panel orientation", + drm_panel_orientation_enum_list, + ARRAY_SIZE(drm_panel_orientation_enum_list)); + if (!prop) + return -ENOMEM; + + dev->mode_config.panel_orientation_property = prop; + } + + drm_object_attach_property(&connector->base, prop, + info->panel_orientation); + return 0; +} +EXPORT_SYMBOL(drm_connector_init_panel_orientation_property); + +int drm_connector_set_obj_prop(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t value) +{ + int ret = -EINVAL; + struct drm_connector *connector = obj_to_connector(obj); + + /* Do DPMS ourselves */ + if (property == connector->dev->mode_config.dpms_property) { + ret = (*connector->funcs->dpms)(connector, (int)value); + } else if (connector->funcs->set_property) + ret = connector->funcs->set_property(connector, property, value); + + if (!ret) + drm_object_property_set_value(&connector->base, property, value); + return ret; +} + +int drm_connector_property_set_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_mode_connector_set_property *conn_set_prop = data; + struct drm_mode_obj_set_property obj_set_prop = { + .value = conn_set_prop->value, + .prop_id = conn_set_prop->prop_id, + .obj_id = conn_set_prop->connector_id, + .obj_type = DRM_MODE_OBJECT_CONNECTOR + }; + + /* It does all the locking and checking we need */ + return drm_mode_obj_set_property_ioctl(dev, &obj_set_prop, file_priv); +} + +static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *connector) +{ + /* For atomic drivers only state objects are synchronously updated and + * protected by modeset locks, so check those first. */ + if (connector->state) + return connector->state->best_encoder; + return connector->encoder; +} + +static bool +drm_mode_expose_to_userspace(const struct drm_display_mode *mode, + const struct list_head *export_list, + const struct drm_file *file_priv) +{ + /* + * If user-space hasn't configured the driver to expose the stereo 3D + * modes, don't expose them. + */ + if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode)) + return false; + /* + * If user-space hasn't configured the driver to expose the modes + * with aspect-ratio, don't expose them. However if such a mode + * is unique, let it be exposed, but reset the aspect-ratio flags + * while preparing the list of user-modes. + */ + if (!file_priv->aspect_ratio_allowed) { + struct drm_display_mode *mode_itr; + + list_for_each_entry(mode_itr, export_list, export_head) + if (drm_mode_match(mode_itr, mode, + DRM_MODE_MATCH_TIMINGS | + DRM_MODE_MATCH_CLOCK | + DRM_MODE_MATCH_FLAGS | + DRM_MODE_MATCH_3D_FLAGS)) + return false; + } + + return true; +} + +int drm_mode_getconnector(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_mode_get_connector *out_resp = data; + struct drm_connector *connector; + struct drm_encoder *encoder; + struct drm_display_mode *mode; + int mode_count = 0; + int encoders_count = 0; + int ret = 0; + int copied = 0; + int i; + struct drm_mode_modeinfo u_mode; + struct drm_mode_modeinfo __user *mode_ptr; + uint32_t __user *encoder_ptr; + LIST_HEAD(export_list); + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EOPNOTSUPP; + + memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo)); + + connector = drm_connector_lookup(dev, file_priv, out_resp->connector_id); + if (!connector) + return -ENOENT; + + drm_connector_for_each_possible_encoder(connector, encoder, i) + encoders_count++; + + if ((out_resp->count_encoders >= encoders_count) && encoders_count) { + copied = 0; + encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr); + + drm_connector_for_each_possible_encoder(connector, encoder, i) { + if (put_user(encoder->base.id, encoder_ptr + copied)) { + ret = -EFAULT; + goto out; + } + copied++; + } + } + out_resp->count_encoders = encoders_count; + + out_resp->connector_id = connector->base.id; + out_resp->connector_type = connector->connector_type; + out_resp->connector_type_id = connector->connector_type_id; + + mutex_lock(&dev->mode_config.mutex); + if (out_resp->count_modes == 0) { + connector->funcs->fill_modes(connector, + dev->mode_config.max_width, + dev->mode_config.max_height); + } + + out_resp->mm_width = connector->display_info.width_mm; + out_resp->mm_height = connector->display_info.height_mm; + out_resp->subpixel = connector->display_info.subpixel_order; + out_resp->connection = connector->status; + + /* delayed so we get modes regardless of pre-fill_modes state */ + list_for_each_entry(mode, &connector->modes, head) + if (drm_mode_expose_to_userspace(mode, &export_list, + file_priv)) { + list_add_tail(&mode->export_head, &export_list); + mode_count++; + } + + /* + * This ioctl is called twice, once to determine how much space is + * needed, and the 2nd time to fill it. + * The modes that need to be exposed to the user are maintained in the + * 'export_list'. When the ioctl is called first time to determine the, + * space, the export_list gets filled, to find the no.of modes. In the + * 2nd time, the user modes are filled, one by one from the export_list. + */ + if ((out_resp->count_modes >= mode_count) && mode_count) { + copied = 0; + mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr; + list_for_each_entry(mode, &export_list, export_head) { + drm_mode_convert_to_umode(&u_mode, mode); + /* + * Reset aspect ratio flags of user-mode, if modes with + * aspect-ratio are not supported. + */ + if (!file_priv->aspect_ratio_allowed) + u_mode.flags &= ~DRM_MODE_FLAG_PIC_AR_MASK; + if (copy_to_user(mode_ptr + copied, + &u_mode, sizeof(u_mode))) { + ret = -EFAULT; + mutex_unlock(&dev->mode_config.mutex); + + goto out; + } + copied++; + } + } + out_resp->count_modes = mode_count; + mutex_unlock(&dev->mode_config.mutex); + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + encoder = drm_connector_get_encoder(connector); + if (encoder) + out_resp->encoder_id = encoder->base.id; + else + out_resp->encoder_id = 0; + + /* Only grab properties after probing, to make sure EDID and other + * properties reflect the latest status. */ + ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic, + (uint32_t __user *)(unsigned long)(out_resp->props_ptr), + (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr), + &out_resp->count_props); + drm_modeset_unlock(&dev->mode_config.connection_mutex); + +out: + drm_connector_put(connector); + + return ret; +} + + +/** + * DOC: Tile group + * + * Tile groups are used to represent tiled monitors with a unique integer + * identifier. Tiled monitors using DisplayID v1.3 have a unique 8-byte handle, + * we store this in a tile group, so we have a common identifier for all tiles + * in a monitor group. The property is called "TILE". Drivers can manage tile + * groups using drm_mode_create_tile_group(), drm_mode_put_tile_group() and + * drm_mode_get_tile_group(). But this is only needed for internal panels where + * the tile group information is exposed through a non-standard way. + */ + +static void drm_tile_group_free(struct kref *kref) +{ + struct drm_tile_group *tg = container_of(kref, struct drm_tile_group, refcount); + struct drm_device *dev = tg->dev; + mutex_lock(&dev->mode_config.idr_mutex); + idr_remove(&dev->mode_config.tile_idr, tg->id); + mutex_unlock(&dev->mode_config.idr_mutex); + kfree(tg); +} + +/** + * drm_mode_put_tile_group - drop a reference to a tile group. + * @dev: DRM device + * @tg: tile group to drop reference to. + * + * drop reference to tile group and free if 0. + */ +void drm_mode_put_tile_group(struct drm_device *dev, + struct drm_tile_group *tg) +{ + kref_put(&tg->refcount, drm_tile_group_free); +} +EXPORT_SYMBOL(drm_mode_put_tile_group); + +/** + * drm_mode_get_tile_group - get a reference to an existing tile group + * @dev: DRM device + * @topology: 8-bytes unique per monitor. + * + * Use the unique bytes to get a reference to an existing tile group. + * + * RETURNS: + * tile group or NULL if not found. + */ +struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev, + char topology[8]) +{ + struct drm_tile_group *tg; + int id; + mutex_lock(&dev->mode_config.idr_mutex); + idr_for_each_entry(&dev->mode_config.tile_idr, tg, id) { + if (!memcmp(tg->group_data, topology, 8)) { + if (!kref_get_unless_zero(&tg->refcount)) + tg = NULL; + mutex_unlock(&dev->mode_config.idr_mutex); + return tg; + } + } + mutex_unlock(&dev->mode_config.idr_mutex); + return NULL; +} +EXPORT_SYMBOL(drm_mode_get_tile_group); + +/** + * drm_mode_create_tile_group - create a tile group from a displayid description + * @dev: DRM device + * @topology: 8-bytes unique per monitor. + * + * Create a tile group for the unique monitor, and get a unique + * identifier for the tile group. + * + * RETURNS: + * new tile group or NULL. + */ +struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev, + char topology[8]) +{ + struct drm_tile_group *tg; + int ret; + + tg = kzalloc(sizeof(*tg), GFP_KERNEL); + if (!tg) + return NULL; + + kref_init(&tg->refcount); + memcpy(tg->group_data, topology, 8); + tg->dev = dev; + + mutex_lock(&dev->mode_config.idr_mutex); + ret = idr_alloc(&dev->mode_config.tile_idr, tg, 1, 0, GFP_KERNEL); + if (ret >= 0) { + tg->id = ret; + } else { + kfree(tg); + tg = NULL; + } + + mutex_unlock(&dev->mode_config.idr_mutex); + return tg; +} +EXPORT_SYMBOL(drm_mode_create_tile_group); Index: sys/dev/drm/core/drm_crtc.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_crtc.c @@ -0,0 +1,750 @@ +/* + * Copyright (c) 2006-2008 Intel Corporation + * Copyright (c) 2007 Dave Airlie + * Copyright (c) 2008 Red Hat Inc. + * + * DRM core CRTC related functions + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + * + * Authors: + * Keith Packard + * Eric Anholt + * Dave Airlie + * Jesse Barnes + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "drm_crtc_internal.h" +#include "drm_internal.h" + +/** + * DOC: overview + * + * A CRTC represents the overall display pipeline. It receives pixel data from + * &drm_plane and blends them together. The &drm_display_mode is also attached + * to the CRTC, specifying display timings. On the output side the data is fed + * to one or more &drm_encoder, which are then each connected to one + * &drm_connector. + * + * To create a CRTC, a KMS drivers allocates and zeroes an instances of + * &struct drm_crtc (possibly as part of a larger structure) and registers it + * with a call to drm_crtc_init_with_planes(). + * + * The CRTC is also the entry point for legacy modeset operations, see + * &drm_crtc_funcs.set_config, legacy plane operations, see + * &drm_crtc_funcs.page_flip and &drm_crtc_funcs.cursor_set2, and other legacy + * operations like &drm_crtc_funcs.gamma_set. For atomic drivers all these + * features are controlled through &drm_property and + * &drm_mode_config_funcs.atomic_check and &drm_mode_config_funcs.atomic_check. + */ + +/** + * drm_crtc_from_index - find the registered CRTC at an index + * @dev: DRM device + * @idx: index of registered CRTC to find for + * + * Given a CRTC index, return the registered CRTC from DRM device's + * list of CRTCs with matching index. This is the inverse of drm_crtc_index(). + * It's useful in the vblank callbacks (like &drm_driver.enable_vblank or + * &drm_driver.disable_vblank), since that still deals with indices instead + * of pointers to &struct drm_crtc." + */ +struct drm_crtc *drm_crtc_from_index(struct drm_device *dev, int idx) +{ + struct drm_crtc *crtc; + + drm_for_each_crtc(crtc, dev) + if (idx == crtc->index) + return crtc; + + return NULL; +} +EXPORT_SYMBOL(drm_crtc_from_index); + +int drm_crtc_force_disable(struct drm_crtc *crtc) +{ + struct drm_mode_set set = { + .crtc = crtc, + }; + + WARN_ON(drm_drv_uses_atomic_modeset(crtc->dev)); + + return drm_mode_set_config_internal(&set); +} + +static unsigned int drm_num_crtcs(struct drm_device *dev) +{ + unsigned int num = 0; + struct drm_crtc *tmp; + + drm_for_each_crtc(tmp, dev) { + num++; + } + + return num; +} + +int drm_crtc_register_all(struct drm_device *dev) +{ + struct drm_crtc *crtc; + int ret = 0; + + drm_for_each_crtc(crtc, dev) { + drm_debugfs_crtc_add(crtc); + + if (crtc->funcs->late_register) + ret = crtc->funcs->late_register(crtc); + if (ret) + return ret; + } + + return 0; +} + +void drm_crtc_unregister_all(struct drm_device *dev) +{ + struct drm_crtc *crtc; + + drm_for_each_crtc(crtc, dev) { + if (crtc->funcs->early_unregister) + crtc->funcs->early_unregister(crtc); + drm_debugfs_crtc_remove(crtc); + } +} + +static int drm_crtc_crc_init(struct drm_crtc *crtc) +{ +#ifdef CONFIG_DEBUG_FS + spin_lock_init(&crtc->crc.lock); + init_waitqueue_head(&crtc->crc.wq); + crtc->crc.source = kstrdup("auto", GFP_KERNEL); + if (!crtc->crc.source) + return -ENOMEM; +#endif + return 0; +} + +static void drm_crtc_crc_fini(struct drm_crtc *crtc) +{ +#ifdef CONFIG_DEBUG_FS + kfree(crtc->crc.source); +#endif +} + +static const struct dma_fence_ops drm_crtc_fence_ops; + +static struct drm_crtc *fence_to_crtc(struct dma_fence *fence) +{ + BUG_ON(fence->ops != &drm_crtc_fence_ops); + return container_of(fence->lock, struct drm_crtc, fence_lock); +} + +static const char *drm_crtc_fence_get_driver_name(struct dma_fence *fence) +{ + struct drm_crtc *crtc = fence_to_crtc(fence); + + return crtc->dev->driver->name; +} + +static const char *drm_crtc_fence_get_timeline_name(struct dma_fence *fence) +{ + struct drm_crtc *crtc = fence_to_crtc(fence); + + return crtc->timeline_name; +} + +static const struct dma_fence_ops drm_crtc_fence_ops = { + .get_driver_name = drm_crtc_fence_get_driver_name, + .get_timeline_name = drm_crtc_fence_get_timeline_name, +}; + +struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc) +{ + struct dma_fence *fence; + + fence = kzalloc(sizeof(*fence), GFP_KERNEL); + if (!fence) + return NULL; + + dma_fence_init(fence, &drm_crtc_fence_ops, &crtc->fence_lock, + crtc->fence_context, ++crtc->fence_seqno); + + return fence; +} + +/** + * drm_crtc_init_with_planes - Initialise a new CRTC object with + * specified primary and cursor planes. + * @dev: DRM device + * @crtc: CRTC object to init + * @primary: Primary plane for CRTC + * @cursor: Cursor plane for CRTC + * @funcs: callbacks for the new CRTC + * @name: printf style format string for the CRTC name, or NULL for default name + * + * Inits a new object created as base part of a driver crtc object. Drivers + * should use this function instead of drm_crtc_init(), which is only provided + * for backwards compatibility with drivers which do not yet support universal + * planes). For really simple hardware which has only 1 plane look at + * drm_simple_display_pipe_init() instead. + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, + struct drm_plane *primary, + struct drm_plane *cursor, + const struct drm_crtc_funcs *funcs, + const char *name, ...) +{ + struct drm_mode_config *config = &dev->mode_config; + int ret; + + WARN_ON(primary && primary->type != DRM_PLANE_TYPE_PRIMARY); + WARN_ON(cursor && cursor->type != DRM_PLANE_TYPE_CURSOR); + + /* crtc index is used with 32bit bitmasks */ + if (WARN_ON(config->num_crtc >= 32)) + return -EINVAL; + + WARN_ON(drm_drv_uses_atomic_modeset(dev) && + (!funcs->atomic_destroy_state || + !funcs->atomic_duplicate_state)); + + crtc->dev = dev; + crtc->funcs = funcs; + + INIT_LIST_HEAD(&crtc->commit_list); + spin_lock_init(&crtc->commit_lock); + + drm_modeset_lock_init(&crtc->mutex); + ret = drm_mode_object_add(dev, &crtc->base, DRM_MODE_OBJECT_CRTC); + if (ret) + return ret; + + if (name) { + va_list ap; + + va_start(ap, name); + crtc->name = kvasprintf(GFP_KERNEL, name, ap); + va_end(ap); + } else { + crtc->name = kasprintf(GFP_KERNEL, "crtc-%d", + drm_num_crtcs(dev)); + } + if (!crtc->name) { + drm_mode_object_unregister(dev, &crtc->base); + return -ENOMEM; + } + + crtc->fence_context = dma_fence_context_alloc(1); + spin_lock_init(&crtc->fence_lock); + snprintf(crtc->timeline_name, sizeof(crtc->timeline_name), + "CRTC:%d-%s", crtc->base.id, crtc->name); + + crtc->base.properties = &crtc->properties; + + list_add_tail(&crtc->head, &config->crtc_list); + crtc->index = config->num_crtc++; + + crtc->primary = primary; + crtc->cursor = cursor; + if (primary && !primary->possible_crtcs) + primary->possible_crtcs = drm_crtc_mask(crtc); + if (cursor && !cursor->possible_crtcs) + cursor->possible_crtcs = drm_crtc_mask(crtc); + + ret = drm_crtc_crc_init(crtc); + if (ret) { + drm_mode_object_unregister(dev, &crtc->base); + return ret; + } + + if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { + drm_object_attach_property(&crtc->base, config->prop_active, 0); + drm_object_attach_property(&crtc->base, config->prop_mode_id, 0); + drm_object_attach_property(&crtc->base, + config->prop_out_fence_ptr, 0); + drm_object_attach_property(&crtc->base, + config->prop_vrr_enabled, 0); + } + + return 0; +} +EXPORT_SYMBOL(drm_crtc_init_with_planes); + +/** + * drm_crtc_cleanup - Clean up the core crtc usage + * @crtc: CRTC to cleanup + * + * This function cleans up @crtc and removes it from the DRM mode setting + * core. Note that the function does *not* free the crtc structure itself, + * this is the responsibility of the caller. + */ +void drm_crtc_cleanup(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + + /* Note that the crtc_list is considered to be static; should we + * remove the drm_crtc at runtime we would have to decrement all + * the indices on the drm_crtc after us in the crtc_list. + */ + + drm_crtc_crc_fini(crtc); + + kfree(crtc->gamma_store); + crtc->gamma_store = NULL; + + drm_modeset_lock_fini(&crtc->mutex); + + drm_mode_object_unregister(dev, &crtc->base); + list_del(&crtc->head); + dev->mode_config.num_crtc--; + + WARN_ON(crtc->state && !crtc->funcs->atomic_destroy_state); + if (crtc->state && crtc->funcs->atomic_destroy_state) + crtc->funcs->atomic_destroy_state(crtc, crtc->state); + + kfree(crtc->name); + + memset(crtc, 0, sizeof(*crtc)); +} +EXPORT_SYMBOL(drm_crtc_cleanup); + +/** + * drm_mode_getcrtc - get CRTC configuration + * @dev: drm device for the ioctl + * @data: data pointer for the ioctl + * @file_priv: drm file for the ioctl call + * + * Construct a CRTC configuration structure to return to the user. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_getcrtc(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_mode_crtc *crtc_resp = data; + struct drm_crtc *crtc; + struct drm_plane *plane; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EOPNOTSUPP; + + crtc = drm_crtc_find(dev, file_priv, crtc_resp->crtc_id); + if (!crtc) + return -ENOENT; + + plane = crtc->primary; + + crtc_resp->gamma_size = crtc->gamma_size; + + drm_modeset_lock(&plane->mutex, NULL); + if (plane->state && plane->state->fb) + crtc_resp->fb_id = plane->state->fb->base.id; + else if (!plane->state && plane->fb) + crtc_resp->fb_id = plane->fb->base.id; + else + crtc_resp->fb_id = 0; + + if (plane->state) { + crtc_resp->x = plane->state->src_x >> 16; + crtc_resp->y = plane->state->src_y >> 16; + } + drm_modeset_unlock(&plane->mutex); + + drm_modeset_lock(&crtc->mutex, NULL); + if (crtc->state) { + if (crtc->state->enable) { + drm_mode_convert_to_umode(&crtc_resp->mode, &crtc->state->mode); + crtc_resp->mode_valid = 1; + } else { + crtc_resp->mode_valid = 0; + } + } else { + crtc_resp->x = crtc->x; + crtc_resp->y = crtc->y; + + if (crtc->enabled) { + drm_mode_convert_to_umode(&crtc_resp->mode, &crtc->mode); + crtc_resp->mode_valid = 1; + + } else { + crtc_resp->mode_valid = 0; + } + } + if (!file_priv->aspect_ratio_allowed) + crtc_resp->mode.flags &= ~DRM_MODE_FLAG_PIC_AR_MASK; + drm_modeset_unlock(&crtc->mutex); + + return 0; +} + +static int __drm_mode_set_config_internal(struct drm_mode_set *set, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_crtc *crtc = set->crtc; + struct drm_framebuffer *fb; + struct drm_crtc *tmp; + int ret; + + WARN_ON(drm_drv_uses_atomic_modeset(crtc->dev)); + + /* + * NOTE: ->set_config can also disable other crtcs (if we steal all + * connectors from it), hence we need to refcount the fbs across all + * crtcs. Atomic modeset will have saner semantics ... + */ + drm_for_each_crtc(tmp, crtc->dev) { + struct drm_plane *plane = tmp->primary; + + plane->old_fb = plane->fb; + } + + fb = set->fb; + + ret = crtc->funcs->set_config(set, ctx); + if (ret == 0) { + struct drm_plane *plane = crtc->primary; + + plane->crtc = fb ? crtc : NULL; + plane->fb = fb; + } + + drm_for_each_crtc(tmp, crtc->dev) { + struct drm_plane *plane = tmp->primary; + + if (plane->fb) + drm_framebuffer_get(plane->fb); + if (plane->old_fb) + drm_framebuffer_put(plane->old_fb); + plane->old_fb = NULL; + } + + return ret; +} + +/** + * drm_mode_set_config_internal - helper to call &drm_mode_config_funcs.set_config + * @set: modeset config to set + * + * This is a little helper to wrap internal calls to the + * &drm_mode_config_funcs.set_config driver interface. The only thing it adds is + * correct refcounting dance. + * + * This should only be used by non-atomic legacy drivers. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_set_config_internal(struct drm_mode_set *set) +{ + WARN_ON(drm_drv_uses_atomic_modeset(set->crtc->dev)); + + return __drm_mode_set_config_internal(set, NULL); +} +EXPORT_SYMBOL(drm_mode_set_config_internal); + +/** + * drm_crtc_check_viewport - Checks that a framebuffer is big enough for the + * CRTC viewport + * @crtc: CRTC that framebuffer will be displayed on + * @x: x panning + * @y: y panning + * @mode: mode that framebuffer will be displayed under + * @fb: framebuffer to check size of + */ +int drm_crtc_check_viewport(const struct drm_crtc *crtc, + int x, int y, + const struct drm_display_mode *mode, + const struct drm_framebuffer *fb) + +{ + int hdisplay, vdisplay; + + drm_mode_get_hv_timing(mode, &hdisplay, &vdisplay); + + if (crtc->state && + drm_rotation_90_or_270(crtc->primary->state->rotation)) + swap(hdisplay, vdisplay); + + return drm_framebuffer_check_src_coords(x << 16, y << 16, + hdisplay << 16, vdisplay << 16, + fb); +} +EXPORT_SYMBOL(drm_crtc_check_viewport); + +/** + * drm_mode_setcrtc - set CRTC configuration + * @dev: drm device for the ioctl + * @data: data pointer for the ioctl + * @file_priv: drm file for the ioctl call + * + * Build a new CRTC configuration based on user request. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_setcrtc(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_mode_config *config = &dev->mode_config; + struct drm_mode_crtc *crtc_req = data; + struct drm_crtc *crtc; + struct drm_plane *plane; + struct drm_connector **connector_set = NULL, *connector; + struct drm_framebuffer *fb = NULL; + struct drm_display_mode *mode = NULL; + struct drm_mode_set set; + uint32_t __user *set_connectors_ptr; + struct drm_modeset_acquire_ctx ctx; + int ret; + int i; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EOPNOTSUPP; + + /* + * Universal plane src offsets are only 16.16, prevent havoc for + * drivers using universal plane code internally. + */ + if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000) + return -ERANGE; + + crtc = drm_crtc_find(dev, file_priv, crtc_req->crtc_id); + if (!crtc) { + DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id); + return -ENOENT; + } + DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); + + plane = crtc->primary; + + /* allow disabling with the primary plane leased */ + if (crtc_req->mode_valid && !drm_lease_held(file_priv, plane->base.id)) + return -EACCES; + + mutex_lock(&crtc->dev->mode_config.mutex); + DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, + DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret); + + if (crtc_req->mode_valid) { + /* If we have a mode we need a framebuffer. */ + /* If we pass -1, set the mode with the currently bound fb */ + if (crtc_req->fb_id == -1) { + struct drm_framebuffer *old_fb; + + if (plane->state) + old_fb = plane->state->fb; + else + old_fb = plane->fb; + + if (!old_fb) { + DRM_DEBUG_KMS("CRTC doesn't have current FB\n"); + ret = -EINVAL; + goto out; + } + + fb = old_fb; + /* Make refcounting symmetric with the lookup path. */ + drm_framebuffer_get(fb); + } else { + fb = drm_framebuffer_lookup(dev, file_priv, crtc_req->fb_id); + if (!fb) { + DRM_DEBUG_KMS("Unknown FB ID%d\n", + crtc_req->fb_id); + ret = -ENOENT; + goto out; + } + } + + mode = drm_mode_create(dev); + if (!mode) { + ret = -ENOMEM; + goto out; + } + if (!file_priv->aspect_ratio_allowed && + (crtc_req->mode.flags & DRM_MODE_FLAG_PIC_AR_MASK) != DRM_MODE_FLAG_PIC_AR_NONE) { + DRM_DEBUG_KMS("Unexpected aspect-ratio flag bits\n"); + ret = -EINVAL; + goto out; + } + + + ret = drm_mode_convert_umode(dev, mode, &crtc_req->mode); + if (ret) { + DRM_DEBUG_KMS("Invalid mode (ret=%d, status=%s)\n", + ret, drm_get_mode_status_name(mode->status)); + drm_mode_debug_printmodeline(mode); + goto out; + } + + /* + * Check whether the primary plane supports the fb pixel format. + * Drivers not implementing the universal planes API use a + * default formats list provided by the DRM core which doesn't + * match real hardware capabilities. Skip the check in that + * case. + */ + if (!plane->format_default) { + ret = drm_plane_check_pixel_format(plane, + fb->format->format, + fb->modifier); + if (ret) { + struct drm_format_name_buf format_name; + DRM_DEBUG_KMS("Invalid pixel format %s, modifier 0x%llx\n", + drm_get_format_name(fb->format->format, + &format_name), + fb->modifier); + goto out; + } + } + + ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y, + mode, fb); + if (ret) + goto out; + + } + + if (crtc_req->count_connectors == 0 && mode) { + DRM_DEBUG_KMS("Count connectors is 0 but mode set\n"); + ret = -EINVAL; + goto out; + } + + if (crtc_req->count_connectors > 0 && (!mode || !fb)) { + DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n", + crtc_req->count_connectors); + ret = -EINVAL; + goto out; + } + + if (crtc_req->count_connectors > 0) { + u32 out_id; + + /* Avoid unbounded kernel memory allocation */ + if (crtc_req->count_connectors > config->num_connector) { + ret = -EINVAL; + goto out; + } + + connector_set = kmalloc_array(crtc_req->count_connectors, + sizeof(struct drm_connector *), + GFP_KERNEL); + if (!connector_set) { + ret = -ENOMEM; + goto out; + } + + for (i = 0; i < crtc_req->count_connectors; i++) { + connector_set[i] = NULL; + set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr; + if (get_user(out_id, &set_connectors_ptr[i])) { + ret = -EFAULT; + goto out; + } + + connector = drm_connector_lookup(dev, file_priv, out_id); + if (!connector) { + DRM_DEBUG_KMS("Connector id %d unknown\n", + out_id); + ret = -ENOENT; + goto out; + } + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", + connector->base.id, + connector->name); + + connector_set[i] = connector; + } + } + + set.crtc = crtc; + set.x = crtc_req->x; + set.y = crtc_req->y; + set.mode = mode; + set.connectors = connector_set; + set.num_connectors = crtc_req->count_connectors; + set.fb = fb; + + if (drm_drv_uses_atomic_modeset(dev)) + ret = crtc->funcs->set_config(&set, &ctx); + else + ret = __drm_mode_set_config_internal(&set, &ctx); + +out: + if (fb) + drm_framebuffer_put(fb); + + if (connector_set) { + for (i = 0; i < crtc_req->count_connectors; i++) { + if (connector_set[i]) + drm_connector_put(connector_set[i]); + } + } + kfree(connector_set); + drm_mode_destroy(dev, mode); + + /* In case we need to retry... */ + connector_set = NULL; + fb = NULL; + mode = NULL; + + DRM_MODESET_LOCK_ALL_END(ctx, ret); + mutex_unlock(&crtc->dev->mode_config.mutex); + + return ret; +} + +int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t value) +{ + int ret = -EINVAL; + struct drm_crtc *crtc = obj_to_crtc(obj); + + if (crtc->funcs->set_property) + ret = crtc->funcs->set_property(crtc, property, value); + if (!ret) + drm_object_property_set_value(obj, property, value); + + return ret; +} Index: sys/dev/drm/core/drm_crtc_helper.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_crtc_helper.c @@ -0,0 +1,1000 @@ +/* + * Copyright (c) 2006-2008 Intel Corporation + * Copyright (c) 2007 Dave Airlie + * + * DRM core CRTC related functions + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + * + * Authors: + * Keith Packard + * Eric Anholt + * Dave Airlie + * Jesse Barnes + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * DOC: overview + * + * The CRTC modeset helper library provides a default set_config implementation + * in drm_crtc_helper_set_config(). Plus a few other convenience functions using + * the same callbacks which drivers can use to e.g. restore the modeset + * configuration on resume with drm_helper_resume_force_mode(). + * + * Note that this helper library doesn't track the current power state of CRTCs + * and encoders. It can call callbacks like &drm_encoder_helper_funcs.dpms even + * though the hardware is already in the desired state. This deficiency has been + * fixed in the atomic helpers. + * + * The driver callbacks are mostly compatible with the atomic modeset helpers, + * except for the handling of the primary plane: Atomic helpers require that the + * primary plane is implemented as a real standalone plane and not directly tied + * to the CRTC state. For easier transition this library provides functions to + * implement the old semantics required by the CRTC helpers using the new plane + * and atomic helper callbacks. + * + * Drivers are strongly urged to convert to the atomic helpers (by way of first + * converting to the plane helpers). New drivers must not use these functions + * but need to implement the atomic interface instead, potentially using the + * atomic helpers for that. + * + * These legacy modeset helpers use the same function table structures as + * all other modesetting helpers. See the documentation for struct + * &drm_crtc_helper_funcs, &struct drm_encoder_helper_funcs and struct + * &drm_connector_helper_funcs. + */ + +/** + * drm_helper_encoder_in_use - check if a given encoder is in use + * @encoder: encoder to check + * + * Checks whether @encoder is with the current mode setting output configuration + * in use by any connector. This doesn't mean that it is actually enabled since + * the DPMS state is tracked separately. + * + * Returns: + * True if @encoder is used, false otherwise. + */ +bool drm_helper_encoder_in_use(struct drm_encoder *encoder) +{ + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct drm_device *dev = encoder->dev; + + WARN_ON(drm_drv_uses_atomic_modeset(dev)); + + /* + * We can expect this mutex to be locked if we are not panicking. + * Locking is currently fubar in the panic handler. + */ + if (!oops_in_progress) { + WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); + WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + } + + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->encoder == encoder) { + drm_connector_list_iter_end(&conn_iter); + return true; + } + } + drm_connector_list_iter_end(&conn_iter); + return false; +} +EXPORT_SYMBOL(drm_helper_encoder_in_use); + +/** + * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config + * @crtc: CRTC to check + * + * Checks whether @crtc is with the current mode setting output configuration + * in use by any connector. This doesn't mean that it is actually enabled since + * the DPMS state is tracked separately. + * + * Returns: + * True if @crtc is used, false otherwise. + */ +bool drm_helper_crtc_in_use(struct drm_crtc *crtc) +{ + struct drm_encoder *encoder; + struct drm_device *dev = crtc->dev; + + WARN_ON(drm_drv_uses_atomic_modeset(dev)); + + /* + * We can expect this mutex to be locked if we are not panicking. + * Locking is currently fubar in the panic handler. + */ + if (!oops_in_progress) + WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); + + drm_for_each_encoder(encoder, dev) + if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder)) + return true; + return false; +} +EXPORT_SYMBOL(drm_helper_crtc_in_use); + +static void +drm_encoder_disable(struct drm_encoder *encoder) +{ + const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; + + if (!encoder_funcs) + return; + + if (encoder_funcs->disable) + (*encoder_funcs->disable)(encoder); + else if (encoder_funcs->dpms) + (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); +} + +static void __drm_helper_disable_unused_functions(struct drm_device *dev) +{ + struct drm_encoder *encoder; + struct drm_crtc *crtc; + + drm_warn_on_modeset_not_all_locked(dev); + + drm_for_each_encoder(encoder, dev) { + if (!drm_helper_encoder_in_use(encoder)) { + drm_encoder_disable(encoder); + /* disconnect encoder from any connector */ + encoder->crtc = NULL; + } + } + + drm_for_each_crtc(crtc, dev) { + const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + crtc->enabled = drm_helper_crtc_in_use(crtc); + if (!crtc->enabled) { + if (crtc_funcs->disable) + (*crtc_funcs->disable)(crtc); + else + (*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF); + crtc->primary->fb = NULL; + } + } +} + +/** + * drm_helper_disable_unused_functions - disable unused objects + * @dev: DRM device + * + * This function walks through the entire mode setting configuration of @dev. It + * will remove any CRTC links of unused encoders and encoder links of + * disconnected connectors. Then it will disable all unused encoders and CRTCs + * either by calling their disable callback if available or by calling their + * dpms callback with DRM_MODE_DPMS_OFF. + * + * NOTE: + * + * This function is part of the legacy modeset helper library and will cause + * major confusion with atomic drivers. This is because atomic helpers guarantee + * to never call ->disable() hooks on a disabled function, or ->enable() hooks + * on an enabled functions. drm_helper_disable_unused_functions() on the other + * hand throws such guarantees into the wind and calls disable hooks + * unconditionally on unused functions. + */ +void drm_helper_disable_unused_functions(struct drm_device *dev) +{ + WARN_ON(drm_drv_uses_atomic_modeset(dev)); + + drm_modeset_lock_all(dev); + __drm_helper_disable_unused_functions(dev); + drm_modeset_unlock_all(dev); +} +EXPORT_SYMBOL(drm_helper_disable_unused_functions); + +/* + * Check the CRTC we're going to map each output to vs. its current + * CRTC. If they don't match, we have to disable the output and the CRTC + * since the driver will have to re-route things. + */ +static void +drm_crtc_prepare_encoders(struct drm_device *dev) +{ + const struct drm_encoder_helper_funcs *encoder_funcs; + struct drm_encoder *encoder; + + drm_for_each_encoder(encoder, dev) { + encoder_funcs = encoder->helper_private; + if (!encoder_funcs) + continue; + + /* Disable unused encoders */ + if (encoder->crtc == NULL) + drm_encoder_disable(encoder); + /* Disable encoders whose CRTC is about to change */ + if (encoder_funcs->get_crtc && + encoder->crtc != (*encoder_funcs->get_crtc)(encoder)) + drm_encoder_disable(encoder); + } +} + +/** + * drm_crtc_helper_set_mode - internal helper to set a mode + * @crtc: CRTC to program + * @mode: mode to use + * @x: horizontal offset into the surface + * @y: vertical offset into the surface + * @old_fb: old framebuffer, for cleanup + * + * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance + * to fixup or reject the mode prior to trying to set it. This is an internal + * helper that drivers could e.g. use to update properties that require the + * entire output pipe to be disabled and re-enabled in a new configuration. For + * example for changing whether audio is enabled on a hdmi link or for changing + * panel fitter or dither attributes. It is also called by the + * drm_crtc_helper_set_config() helper function to drive the mode setting + * sequence. + * + * Returns: + * True if the mode was set successfully, false otherwise. + */ +bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, + struct drm_display_mode *mode, + int x, int y, + struct drm_framebuffer *old_fb) +{ + struct drm_device *dev = crtc->dev; + struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode; + const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + const struct drm_encoder_helper_funcs *encoder_funcs; + int saved_x, saved_y; + bool saved_enabled; + struct drm_encoder *encoder; + bool ret = true; + + WARN_ON(drm_drv_uses_atomic_modeset(dev)); + + drm_warn_on_modeset_not_all_locked(dev); + + saved_enabled = crtc->enabled; + crtc->enabled = drm_helper_crtc_in_use(crtc); + if (!crtc->enabled) + return true; + + adjusted_mode = drm_mode_duplicate(dev, mode); + if (!adjusted_mode) { + crtc->enabled = saved_enabled; + return false; + } + + saved_mode = crtc->mode; + saved_hwmode = crtc->hwmode; + saved_x = crtc->x; + saved_y = crtc->y; + + /* Update crtc values up front so the driver can rely on them for mode + * setting. + */ + crtc->mode = *mode; + crtc->x = x; + crtc->y = y; + + /* Pass our mode to the connectors and the CRTC to give them a chance to + * adjust it according to limitations or connector properties, and also + * a chance to reject the mode entirely. + */ + drm_for_each_encoder(encoder, dev) { + + if (encoder->crtc != crtc) + continue; + + encoder_funcs = encoder->helper_private; + if (!encoder_funcs) + continue; + + encoder_funcs = encoder->helper_private; + if (encoder_funcs->mode_fixup) { + if (!(ret = encoder_funcs->mode_fixup(encoder, mode, + adjusted_mode))) { + DRM_DEBUG_KMS("Encoder fixup failed\n"); + goto done; + } + } + } + + if (crtc_funcs->mode_fixup) { + if (!(ret = crtc_funcs->mode_fixup(crtc, mode, + adjusted_mode))) { + DRM_DEBUG_KMS("CRTC fixup failed\n"); + goto done; + } + } + DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); + + crtc->hwmode = *adjusted_mode; + + /* Prepare the encoders and CRTCs before setting the mode. */ + drm_for_each_encoder(encoder, dev) { + + if (encoder->crtc != crtc) + continue; + + encoder_funcs = encoder->helper_private; + if (!encoder_funcs) + continue; + + /* Disable the encoders as the first thing we do. */ + if (encoder_funcs->prepare) + encoder_funcs->prepare(encoder); + } + + drm_crtc_prepare_encoders(dev); + + crtc_funcs->prepare(crtc); + + /* Set up the DPLL and any encoders state that needs to adjust or depend + * on the DPLL. + */ + ret = !crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb); + if (!ret) + goto done; + + drm_for_each_encoder(encoder, dev) { + + if (encoder->crtc != crtc) + continue; + + encoder_funcs = encoder->helper_private; + if (!encoder_funcs) + continue; + + DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%s]\n", + encoder->base.id, encoder->name, mode->name); + if (encoder_funcs->mode_set) + encoder_funcs->mode_set(encoder, mode, adjusted_mode); + } + + /* Now enable the clocks, plane, pipe, and connectors that we set up. */ + crtc_funcs->commit(crtc); + + drm_for_each_encoder(encoder, dev) { + + if (encoder->crtc != crtc) + continue; + + encoder_funcs = encoder->helper_private; + if (!encoder_funcs) + continue; + + if (encoder_funcs->commit) + encoder_funcs->commit(encoder); + } + + /* Calculate and store various constants which + * are later needed by vblank and swap-completion + * timestamping. They are derived from true hwmode. + */ + drm_calc_timestamping_constants(crtc, &crtc->hwmode); + + /* FIXME: add subpixel order */ +done: + drm_mode_destroy(dev, adjusted_mode); + if (!ret) { + crtc->enabled = saved_enabled; + crtc->mode = saved_mode; + crtc->hwmode = saved_hwmode; + crtc->x = saved_x; + crtc->y = saved_y; + } + + return ret; +} +EXPORT_SYMBOL(drm_crtc_helper_set_mode); + +static void +drm_crtc_helper_disable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_connector *connector; + struct drm_encoder *encoder; + + /* Decouple all encoders and their attached connectors from this crtc */ + drm_for_each_encoder(encoder, dev) { + struct drm_connector_list_iter conn_iter; + + if (encoder->crtc != crtc) + continue; + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->encoder != encoder) + continue; + + connector->encoder = NULL; + + /* + * drm_helper_disable_unused_functions() ought to be + * doing this, but since we've decoupled the encoder + * from the connector above, the required connection + * between them is henceforth no longer available. + */ + connector->dpms = DRM_MODE_DPMS_OFF; + + /* we keep a reference while the encoder is bound */ + drm_connector_put(connector); + } + drm_connector_list_iter_end(&conn_iter); + } + + __drm_helper_disable_unused_functions(dev); +} + +/** + * drm_crtc_helper_set_config - set a new config from userspace + * @set: mode set configuration + * @ctx: lock acquire context, not used here + * + * The drm_crtc_helper_set_config() helper function implements the of + * &drm_crtc_funcs.set_config callback for drivers using the legacy CRTC + * helpers. + * + * It first tries to locate the best encoder for each connector by calling the + * connector @drm_connector_helper_funcs.best_encoder helper operation. + * + * After locating the appropriate encoders, the helper function will call the + * mode_fixup encoder and CRTC helper operations to adjust the requested mode, + * or reject it completely in which case an error will be returned to the + * application. If the new configuration after mode adjustment is identical to + * the current configuration the helper function will return without performing + * any other operation. + * + * If the adjusted mode is identical to the current mode but changes to the + * frame buffer need to be applied, the drm_crtc_helper_set_config() function + * will call the CRTC &drm_crtc_helper_funcs.mode_set_base helper operation. + * + * If the adjusted mode differs from the current mode, or if the + * ->mode_set_base() helper operation is not provided, the helper function + * performs a full mode set sequence by calling the ->prepare(), ->mode_set() + * and ->commit() CRTC and encoder helper operations, in that order. + * Alternatively it can also use the dpms and disable helper operations. For + * details see &struct drm_crtc_helper_funcs and struct + * &drm_encoder_helper_funcs. + * + * This function is deprecated. New drivers must implement atomic modeset + * support, for which this function is unsuitable. Instead drivers should use + * drm_atomic_helper_set_config(). + * + * Returns: + * Returns 0 on success, negative errno numbers on failure. + */ +int drm_crtc_helper_set_config(struct drm_mode_set *set, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_device *dev; + struct drm_crtc **save_encoder_crtcs, *new_crtc; + struct drm_encoder **save_connector_encoders, *new_encoder, *encoder; + bool mode_changed = false; /* if true do a full mode set */ + bool fb_changed = false; /* if true and !mode_changed just do a flip */ + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + int count = 0, ro, fail = 0; + const struct drm_crtc_helper_funcs *crtc_funcs; + struct drm_mode_set save_set; + int ret; + int i; + + DRM_DEBUG_KMS("\n"); + + BUG_ON(!set); + BUG_ON(!set->crtc); + BUG_ON(!set->crtc->helper_private); + + /* Enforce sane interface api - has been abused by the fb helper. */ + BUG_ON(!set->mode && set->fb); + BUG_ON(set->fb && set->num_connectors == 0); + + crtc_funcs = set->crtc->helper_private; + + dev = set->crtc->dev; + WARN_ON(drm_drv_uses_atomic_modeset(dev)); + + if (!set->mode) + set->fb = NULL; + + if (set->fb) { + DRM_DEBUG_KMS("[CRTC:%d:%s] [FB:%d] #connectors=%d (x y) (%i %i)\n", + set->crtc->base.id, set->crtc->name, + set->fb->base.id, + (int)set->num_connectors, set->x, set->y); + } else { + DRM_DEBUG_KMS("[CRTC:%d:%s] [NOFB]\n", + set->crtc->base.id, set->crtc->name); + drm_crtc_helper_disable(set->crtc); + return 0; + } + + drm_warn_on_modeset_not_all_locked(dev); + + /* + * Allocate space for the backup of all (non-pointer) encoder and + * connector data. + */ + save_encoder_crtcs = kcalloc(dev->mode_config.num_encoder, + sizeof(struct drm_crtc *), GFP_KERNEL); + if (!save_encoder_crtcs) + return -ENOMEM; + + save_connector_encoders = kcalloc(dev->mode_config.num_connector, + sizeof(struct drm_encoder *), GFP_KERNEL); + if (!save_connector_encoders) { + kfree(save_encoder_crtcs); + return -ENOMEM; + } + + /* + * Copy data. Note that driver private data is not affected. + * Should anything bad happen only the expected state is + * restored, not the drivers personal bookkeeping. + */ + count = 0; + drm_for_each_encoder(encoder, dev) { + save_encoder_crtcs[count++] = encoder->crtc; + } + + count = 0; + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) + save_connector_encoders[count++] = connector->encoder; + drm_connector_list_iter_end(&conn_iter); + + save_set.crtc = set->crtc; + save_set.mode = &set->crtc->mode; + save_set.x = set->crtc->x; + save_set.y = set->crtc->y; + save_set.fb = set->crtc->primary->fb; + + /* We should be able to check here if the fb has the same properties + * and then just flip_or_move it */ + if (set->crtc->primary->fb != set->fb) { + /* If we have no fb then treat it as a full mode set */ + if (set->crtc->primary->fb == NULL) { + DRM_DEBUG_KMS("crtc has no fb, full mode set\n"); + mode_changed = true; + } else if (set->fb->format != set->crtc->primary->fb->format) { + mode_changed = true; + } else + fb_changed = true; + } + + if (set->x != set->crtc->x || set->y != set->crtc->y) + fb_changed = true; + + if (!drm_mode_equal(set->mode, &set->crtc->mode)) { + DRM_DEBUG_KMS("modes are different, full mode set\n"); + drm_mode_debug_printmodeline(&set->crtc->mode); + drm_mode_debug_printmodeline(set->mode); + mode_changed = true; + } + + /* take a reference on all unbound connectors in set, reuse the + * already taken reference for bound connectors + */ + for (ro = 0; ro < set->num_connectors; ro++) { + if (set->connectors[ro]->encoder) + continue; + drm_connector_get(set->connectors[ro]); + } + + /* a) traverse passed in connector list and get encoders for them */ + count = 0; + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + const struct drm_connector_helper_funcs *connector_funcs = + connector->helper_private; + new_encoder = connector->encoder; + for (ro = 0; ro < set->num_connectors; ro++) { + if (set->connectors[ro] == connector) { + new_encoder = connector_funcs->best_encoder(connector); + /* if we can't get an encoder for a connector + we are setting now - then fail */ + if (new_encoder == NULL) + /* don't break so fail path works correct */ + fail = 1; + + if (connector->dpms != DRM_MODE_DPMS_ON) { + DRM_DEBUG_KMS("connector dpms not on, full mode switch\n"); + mode_changed = true; + } + + break; + } + } + + if (new_encoder != connector->encoder) { + DRM_DEBUG_KMS("encoder changed, full mode switch\n"); + mode_changed = true; + /* If the encoder is reused for another connector, then + * the appropriate crtc will be set later. + */ + if (connector->encoder) + connector->encoder->crtc = NULL; + connector->encoder = new_encoder; + } + } + drm_connector_list_iter_end(&conn_iter); + + if (fail) { + ret = -EINVAL; + goto fail; + } + + count = 0; + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (!connector->encoder) + continue; + + if (connector->encoder->crtc == set->crtc) + new_crtc = NULL; + else + new_crtc = connector->encoder->crtc; + + for (ro = 0; ro < set->num_connectors; ro++) { + if (set->connectors[ro] == connector) + new_crtc = set->crtc; + } + + /* Make sure the new CRTC will work with the encoder */ + if (new_crtc && + !drm_encoder_crtc_ok(connector->encoder, new_crtc)) { + ret = -EINVAL; + drm_connector_list_iter_end(&conn_iter); + goto fail; + } + if (new_crtc != connector->encoder->crtc) { + DRM_DEBUG_KMS("crtc changed, full mode switch\n"); + mode_changed = true; + connector->encoder->crtc = new_crtc; + } + if (new_crtc) { + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d:%s]\n", + connector->base.id, connector->name, + new_crtc->base.id, new_crtc->name); + } else { + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", + connector->base.id, connector->name); + } + } + drm_connector_list_iter_end(&conn_iter); + + /* mode_set_base is not a required function */ + if (fb_changed && !crtc_funcs->mode_set_base) + mode_changed = true; + + if (mode_changed) { + if (drm_helper_crtc_in_use(set->crtc)) { + DRM_DEBUG_KMS("attempting to set mode from" + " userspace\n"); + drm_mode_debug_printmodeline(set->mode); + set->crtc->primary->fb = set->fb; + if (!drm_crtc_helper_set_mode(set->crtc, set->mode, + set->x, set->y, + save_set.fb)) { + DRM_ERROR("failed to set mode on [CRTC:%d:%s]\n", + set->crtc->base.id, set->crtc->name); + set->crtc->primary->fb = save_set.fb; + ret = -EINVAL; + goto fail; + } + DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); + for (i = 0; i < set->num_connectors; i++) { + DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, + set->connectors[i]->name); + set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON); + } + } + __drm_helper_disable_unused_functions(dev); + } else if (fb_changed) { + set->crtc->x = set->x; + set->crtc->y = set->y; + set->crtc->primary->fb = set->fb; + ret = crtc_funcs->mode_set_base(set->crtc, + set->x, set->y, save_set.fb); + if (ret != 0) { + set->crtc->x = save_set.x; + set->crtc->y = save_set.y; + set->crtc->primary->fb = save_set.fb; + goto fail; + } + } + + kfree(save_connector_encoders); + kfree(save_encoder_crtcs); + return 0; + +fail: + /* Restore all previous data. */ + count = 0; + drm_for_each_encoder(encoder, dev) { + encoder->crtc = save_encoder_crtcs[count++]; + } + + count = 0; + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) + connector->encoder = save_connector_encoders[count++]; + drm_connector_list_iter_end(&conn_iter); + + /* after fail drop reference on all unbound connectors in set, let + * bound connectors keep their reference + */ + for (ro = 0; ro < set->num_connectors; ro++) { + if (set->connectors[ro]->encoder) + continue; + drm_connector_put(set->connectors[ro]); + } + + /* Try to restore the config */ + if (mode_changed && + !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x, + save_set.y, save_set.fb)) + DRM_ERROR("failed to restore config after modeset failure\n"); + + kfree(save_connector_encoders); + kfree(save_encoder_crtcs); + return ret; +} +EXPORT_SYMBOL(drm_crtc_helper_set_config); + +static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder) +{ + int dpms = DRM_MODE_DPMS_OFF; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct drm_device *dev = encoder->dev; + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) + if (connector->encoder == encoder) + if (connector->dpms < dpms) + dpms = connector->dpms; + drm_connector_list_iter_end(&conn_iter); + + return dpms; +} + +/* Helper which handles bridge ordering around encoder dpms */ +static void drm_helper_encoder_dpms(struct drm_encoder *encoder, int mode) +{ + const struct drm_encoder_helper_funcs *encoder_funcs; + + encoder_funcs = encoder->helper_private; + if (!encoder_funcs) + return; + + if (encoder_funcs->dpms) + encoder_funcs->dpms(encoder, mode); +} + +static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc) +{ + int dpms = DRM_MODE_DPMS_OFF; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct drm_device *dev = crtc->dev; + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) + if (connector->encoder && connector->encoder->crtc == crtc) + if (connector->dpms < dpms) + dpms = connector->dpms; + drm_connector_list_iter_end(&conn_iter); + + return dpms; +} + +/** + * drm_helper_connector_dpms() - connector dpms helper implementation + * @connector: affected connector + * @mode: DPMS mode + * + * The drm_helper_connector_dpms() helper function implements the + * &drm_connector_funcs.dpms callback for drivers using the legacy CRTC + * helpers. + * + * This is the main helper function provided by the CRTC helper framework for + * implementing the DPMS connector attribute. It computes the new desired DPMS + * state for all encoders and CRTCs in the output mesh and calls the + * &drm_crtc_helper_funcs.dpms and &drm_encoder_helper_funcs.dpms callbacks + * provided by the driver. + * + * This function is deprecated. New drivers must implement atomic modeset + * support, where DPMS is handled in the DRM core. + * + * Returns: + * Always returns 0. + */ +int drm_helper_connector_dpms(struct drm_connector *connector, int mode) +{ + struct drm_encoder *encoder = connector->encoder; + struct drm_crtc *crtc = encoder ? encoder->crtc : NULL; + int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF; + + WARN_ON(drm_drv_uses_atomic_modeset(connector->dev)); + + if (mode == connector->dpms) + return 0; + + old_dpms = connector->dpms; + connector->dpms = mode; + + if (encoder) + encoder_dpms = drm_helper_choose_encoder_dpms(encoder); + + /* from off to on, do crtc then encoder */ + if (mode < old_dpms) { + if (crtc) { + const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + if (crtc_funcs->dpms) + (*crtc_funcs->dpms) (crtc, + drm_helper_choose_crtc_dpms(crtc)); + } + if (encoder) + drm_helper_encoder_dpms(encoder, encoder_dpms); + } + + /* from on to off, do encoder then crtc */ + if (mode > old_dpms) { + if (encoder) + drm_helper_encoder_dpms(encoder, encoder_dpms); + if (crtc) { + const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + if (crtc_funcs->dpms) + (*crtc_funcs->dpms) (crtc, + drm_helper_choose_crtc_dpms(crtc)); + } + } + + return 0; +} +EXPORT_SYMBOL(drm_helper_connector_dpms); + +/** + * drm_helper_resume_force_mode - force-restore mode setting configuration + * @dev: drm_device which should be restored + * + * Drivers which use the mode setting helpers can use this function to + * force-restore the mode setting configuration e.g. on resume or when something + * else might have trampled over the hw state (like some overzealous old BIOSen + * tended to do). + * + * This helper doesn't provide a error return value since restoring the old + * config should never fail due to resource allocation issues since the driver + * has successfully set the restored configuration already. Hence this should + * boil down to the equivalent of a few dpms on calls, which also don't provide + * an error code. + * + * Drivers where simply restoring an old configuration again might fail (e.g. + * due to slight differences in allocating shared resources when the + * configuration is restored in a different order than when userspace set it up) + * need to use their own restore logic. + * + * This function is deprecated. New drivers should implement atomic mode- + * setting and use the atomic suspend/resume helpers. + * + * See also: + * drm_atomic_helper_suspend(), drm_atomic_helper_resume() + */ +void drm_helper_resume_force_mode(struct drm_device *dev) +{ + struct drm_crtc *crtc; + struct drm_encoder *encoder; + const struct drm_crtc_helper_funcs *crtc_funcs; + int encoder_dpms; + bool ret; + + WARN_ON(drm_drv_uses_atomic_modeset(dev)); + + drm_modeset_lock_all(dev); + drm_for_each_crtc(crtc, dev) { + + if (!crtc->enabled) + continue; + + ret = drm_crtc_helper_set_mode(crtc, &crtc->mode, + crtc->x, crtc->y, crtc->primary->fb); + + /* Restoring the old config should never fail! */ + if (ret == false) + DRM_ERROR("failed to set mode on crtc %p\n", crtc); + + /* Turn off outputs that were already powered off */ + if (drm_helper_choose_crtc_dpms(crtc)) { + drm_for_each_encoder(encoder, dev) { + + if(encoder->crtc != crtc) + continue; + + encoder_dpms = drm_helper_choose_encoder_dpms( + encoder); + + drm_helper_encoder_dpms(encoder, encoder_dpms); + } + + crtc_funcs = crtc->helper_private; + if (crtc_funcs->dpms) + (*crtc_funcs->dpms) (crtc, + drm_helper_choose_crtc_dpms(crtc)); + } + } + + /* disable the unused connectors while restoring the modesetting */ + __drm_helper_disable_unused_functions(dev); + drm_modeset_unlock_all(dev); +} +EXPORT_SYMBOL(drm_helper_resume_force_mode); + +/** + * drm_helper_force_disable_all - Forcibly turn off all enabled CRTCs + * @dev: DRM device whose CRTCs to turn off + * + * Drivers may want to call this on unload to ensure that all displays are + * unlit and the GPU is in a consistent, low power state. Takes modeset locks. + * + * Note: This should only be used by non-atomic legacy drivers. For an atomic + * version look at drm_atomic_helper_shutdown(). + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_helper_force_disable_all(struct drm_device *dev) +{ + struct drm_crtc *crtc; + int ret = 0; + + drm_modeset_lock_all(dev); + drm_for_each_crtc(crtc, dev) + if (crtc->enabled) { + struct drm_mode_set set = { + .crtc = crtc, + }; + + ret = drm_mode_set_config_internal(&set); + if (ret) + goto out; + } +out: + drm_modeset_unlock_all(dev); + return ret; +} +EXPORT_SYMBOL(drm_helper_force_disable_all); Index: sys/dev/drm/core/drm_crtc_helper_internal.h =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_crtc_helper_internal.h @@ -0,0 +1,77 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * This header file contains mode setting related functions and definitions + * which are only used within the drm kms helper module as internal + * implementation details and are not exported to drivers. + */ + +#include +#include +#include +#include +#include + +/* drm_fb_helper.c */ +#ifdef CONFIG_DRM_FBDEV_EMULATION +int drm_fb_helper_modinit(void); +#else +static inline int drm_fb_helper_modinit(void) +{ + return 0; +} +#endif + +/* drm_dp_aux_dev.c */ +#ifdef CONFIG_DRM_DP_AUX_CHARDEV +int drm_dp_aux_dev_init(void); +void drm_dp_aux_dev_exit(void); +int drm_dp_aux_register_devnode(struct drm_dp_aux *aux); +void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux); +#else +static inline int drm_dp_aux_dev_init(void) +{ + return 0; +} + +static inline void drm_dp_aux_dev_exit(void) +{ +} + +static inline int drm_dp_aux_register_devnode(struct drm_dp_aux *aux) +{ + return 0; +} + +static inline void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux) +{ +} +#endif + +/* drm_probe_helper.c */ +enum drm_mode_status drm_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode); +enum drm_mode_status drm_encoder_mode_valid(struct drm_encoder *encoder, + const struct drm_display_mode *mode); +enum drm_mode_status drm_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode); Index: sys/dev/drm/core/drm_crtc_internal.h =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_crtc_internal.h @@ -0,0 +1,278 @@ +/* + * Copyright © 2006 Keith Packard + * Copyright © 2007-2008 Dave Airlie + * Copyright © 2007-2008 Intel Corporation + * Jesse Barnes + * Copyright © 2014 Intel Corporation + * Daniel Vetter + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * This header file contains mode setting related functions and definitions + * which are only used within the drm module as internal implementation details + * and are not exported to drivers. + */ + +#include + +enum drm_color_encoding; +enum drm_color_range; +enum drm_connector_force; +enum drm_mode_status; + +struct drm_atomic_state; +struct drm_bridge; +struct drm_connector; +struct drm_crtc; +struct drm_device; +struct drm_display_mode; +struct drm_file; +struct drm_framebuffer; +struct drm_mode_create_dumb; +struct drm_mode_fb_cmd2; +struct drm_mode_fb_cmd; +struct drm_mode_object; +struct drm_mode_set; +struct drm_plane; +struct drm_plane_state; +struct drm_property; +struct edid; +struct kref; +struct work_struct; + +/* drm_crtc.c */ +int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t value); +int drm_crtc_check_viewport(const struct drm_crtc *crtc, + int x, int y, + const struct drm_display_mode *mode, + const struct drm_framebuffer *fb); +int drm_crtc_register_all(struct drm_device *dev); +void drm_crtc_unregister_all(struct drm_device *dev); +int drm_crtc_force_disable(struct drm_crtc *crtc); + +struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc); + +/* IOCTLs */ +int drm_mode_getcrtc(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_setcrtc(struct drm_device *dev, + void *data, struct drm_file *file_priv); + + +/* drm_mode_config.c */ +int drm_modeset_register_all(struct drm_device *dev); +void drm_modeset_unregister_all(struct drm_device *dev); + +/* drm_modes.c */ +const char *drm_get_mode_status_name(enum drm_mode_status status); + +/* IOCTLs */ +int drm_mode_getresources(struct drm_device *dev, + void *data, struct drm_file *file_priv); + + +/* drm_dumb_buffers.c */ +int drm_mode_create_dumb(struct drm_device *dev, + struct drm_mode_create_dumb *args, + struct drm_file *file_priv); +int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle, + struct drm_file *file_priv); + +/* IOCTLs */ +int drm_mode_create_dumb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_mmap_dumb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_destroy_dumb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +/* drm_color_mgmt.c */ +const char *drm_get_color_encoding_name(enum drm_color_encoding encoding); +const char *drm_get_color_range_name(enum drm_color_range range); + +/* IOCTLs */ +int drm_mode_gamma_get_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_gamma_set_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +/* drm_property.c */ +void drm_property_destroy_user_blobs(struct drm_device *dev, + struct drm_file *file_priv); +bool drm_property_change_valid_get(struct drm_property *property, + uint64_t value, + struct drm_mode_object **ref); +void drm_property_change_valid_put(struct drm_property *property, + struct drm_mode_object *ref); + +/* IOCTL */ +int drm_mode_getproperty_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_getblob_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_createblob_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_destroyblob_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +/* drm_mode_object.c */ +int __drm_mode_object_add(struct drm_device *dev, struct drm_mode_object *obj, + uint32_t obj_type, bool register_obj, + void (*obj_free_cb)(struct kref *kref)); +int drm_mode_object_add(struct drm_device *dev, struct drm_mode_object *obj, + uint32_t obj_type); +void drm_mode_object_register(struct drm_device *dev, + struct drm_mode_object *obj); +struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id, uint32_t type); +void drm_mode_object_unregister(struct drm_device *dev, + struct drm_mode_object *object); +int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic, + uint32_t __user *prop_ptr, + uint64_t __user *prop_values, + uint32_t *arg_count_props); +struct drm_property *drm_mode_obj_find_prop_id(struct drm_mode_object *obj, + uint32_t prop_id); + +/* IOCTL */ + +int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +/* drm_encoder.c */ +int drm_encoder_register_all(struct drm_device *dev); +void drm_encoder_unregister_all(struct drm_device *dev); + +/* IOCTL */ +int drm_mode_getencoder(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +/* drm_connector.c */ +void drm_connector_ida_init(void); +void drm_connector_ida_destroy(void); +void drm_connector_unregister_all(struct drm_device *dev); +int drm_connector_register_all(struct drm_device *dev); +int drm_connector_set_obj_prop(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t value); +int drm_connector_create_standard_properties(struct drm_device *dev); +const char *drm_get_connector_force_name(enum drm_connector_force force); +void drm_connector_free_work_fn(struct work_struct *work); + +/* IOCTL */ +int drm_connector_property_set_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_getconnector(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +/* drm_framebuffer.c */ +struct drm_framebuffer * +drm_internal_framebuffer_create(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *r, + struct drm_file *file_priv); +void drm_framebuffer_free(struct kref *kref); +int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h, + const struct drm_framebuffer *fb); +void drm_fb_release(struct drm_file *file_priv); + +int drm_mode_addfb(struct drm_device *dev, struct drm_mode_fb_cmd *or, + struct drm_file *file_priv); +int drm_mode_addfb2(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_rmfb(struct drm_device *dev, u32 fb_id, + struct drm_file *file_priv); + + +/* IOCTL */ +int drm_mode_addfb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_addfb2_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_rmfb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_getfb(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_dirtyfb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +/* drm_atomic.c */ +#ifdef CONFIG_DEBUG_FS +struct drm_minor; +int drm_atomic_debugfs_init(struct drm_minor *minor); +#endif + +int __drm_atomic_helper_disable_plane(struct drm_plane *plane, + struct drm_plane_state *plane_state); +int __drm_atomic_helper_set_config(struct drm_mode_set *set, + struct drm_atomic_state *state); + +void drm_atomic_print_state(const struct drm_atomic_state *state); + +/* drm_atomic_uapi.c */ +int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state, + struct drm_connector *connector, + int mode); +int drm_atomic_set_property(struct drm_atomic_state *state, + struct drm_file *file_priv, + struct drm_mode_object *obj, + struct drm_property *prop, + uint64_t prop_value); +int drm_atomic_get_property(struct drm_mode_object *obj, + struct drm_property *property, uint64_t *val); + +/* IOCTL */ +int drm_mode_atomic_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); + + +/* drm_plane.c */ +int drm_plane_register_all(struct drm_device *dev); +void drm_plane_unregister_all(struct drm_device *dev); +int drm_plane_check_pixel_format(struct drm_plane *plane, + u32 format, u64 modifier); + +/* drm_bridge.c */ +void drm_bridge_detach(struct drm_bridge *bridge); + +/* IOCTL */ +int drm_mode_getplane_res(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_mode_getplane(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_setplane(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_cursor_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_cursor2_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_page_flip_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +/* drm_edid.c */ +void drm_mode_fixup_1366x768(struct drm_display_mode *mode); +void drm_reset_display_info(struct drm_connector *connector); +u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid); Index: sys/dev/drm/core/drm_damage_helper.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_damage_helper.c @@ -0,0 +1,377 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/************************************************************************** + * + * Copyright (c) 2018 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Deepak Rawat + * Rob Clark + * + **************************************************************************/ + +#include +#include +#include + +/** + * DOC: overview + * + * FB_DAMAGE_CLIPS is an optional plane property which provides a means to + * specify a list of damage rectangles on a plane in framebuffer coordinates of + * the framebuffer attached to the plane. In current context damage is the area + * of plane framebuffer that has changed since last plane update (also called + * page-flip), irrespective of whether currently attached framebuffer is same as + * framebuffer attached during last plane update or not. + * + * FB_DAMAGE_CLIPS is a hint to kernel which could be helpful for some drivers + * to optimize internally especially for virtual devices where each framebuffer + * change needs to be transmitted over network, usb, etc. + * + * Since FB_DAMAGE_CLIPS is a hint so it is an optional property. User-space can + * ignore damage clips property and in that case driver will do a full plane + * update. In case damage clips are provided then it is guaranteed that the area + * inside damage clips will be updated to plane. For efficiency driver can do + * full update or can update more than specified in damage clips. Since driver + * is free to read more, user-space must always render the entire visible + * framebuffer. Otherwise there can be corruptions. Also, if a user-space + * provides damage clips which doesn't encompass the actual damage to + * framebuffer (since last plane update) can result in incorrect rendering. + * + * FB_DAMAGE_CLIPS is a blob property with the layout of blob data is simply an + * array of &drm_mode_rect. Unlike plane &drm_plane_state.src coordinates, + * damage clips are not in 16.16 fixed point. Similar to plane src in + * framebuffer, damage clips cannot be negative. In damage clip, x1/y1 are + * inclusive and x2/y2 are exclusive. While kernel does not error for overlapped + * damage clips, it is strongly discouraged. + * + * Drivers that are interested in damage interface for plane should enable + * FB_DAMAGE_CLIPS property by calling drm_plane_enable_fb_damage_clips(). + * Drivers implementing damage can use drm_atomic_helper_damage_iter_init() and + * drm_atomic_helper_damage_iter_next() helper iterator function to get damage + * rectangles clipped to &drm_plane_state.src. + */ + +static void convert_clip_rect_to_rect(const struct drm_clip_rect *src, + struct drm_mode_rect *dest, + uint32_t num_clips, uint32_t src_inc) +{ + while (num_clips > 0) { + dest->x1 = src->x1; + dest->y1 = src->y1; + dest->x2 = src->x2; + dest->y2 = src->y2; + src += src_inc; + dest++; + num_clips--; + } +} + +/** + * drm_plane_enable_fb_damage_clips - Enables plane fb damage clips property. + * @plane: Plane on which to enable damage clips property. + * + * This function lets driver to enable the damage clips property on a plane. + */ +void drm_plane_enable_fb_damage_clips(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct drm_mode_config *config = &dev->mode_config; + + drm_object_attach_property(&plane->base, config->prop_fb_damage_clips, + 0); +} +EXPORT_SYMBOL(drm_plane_enable_fb_damage_clips); + +/** + * drm_atomic_helper_check_plane_damage - Verify plane damage on atomic_check. + * @state: The driver state object. + * @plane_state: Plane state for which to verify damage. + * + * This helper function makes sure that damage from plane state is discarded + * for full modeset. If there are more reasons a driver would want to do a full + * plane update rather than processing individual damage regions, then those + * cases should be taken care of here. + * + * Note that &drm_plane_state.fb_damage_clips == NULL in plane state means that + * full plane update should happen. It also ensure helper iterator will return + * &drm_plane_state.src as damage. + */ +void drm_atomic_helper_check_plane_damage(struct drm_atomic_state *state, + struct drm_plane_state *plane_state) +{ + struct drm_crtc_state *crtc_state; + + if (plane_state->crtc) { + crtc_state = drm_atomic_get_new_crtc_state(state, + plane_state->crtc); + + if (WARN_ON(!crtc_state)) + return; + + if (drm_atomic_crtc_needs_modeset(crtc_state)) { + drm_property_blob_put(plane_state->fb_damage_clips); + plane_state->fb_damage_clips = NULL; + } + } +} +EXPORT_SYMBOL(drm_atomic_helper_check_plane_damage); + +/** + * drm_atomic_helper_dirtyfb - Helper for dirtyfb. + * @fb: DRM framebuffer. + * @file_priv: Drm file for the ioctl call. + * @flags: Dirty fb annotate flags. + * @color: Color for annotate fill. + * @clips: Dirty region. + * @num_clips: Count of clip in clips. + * + * A helper to implement &drm_framebuffer_funcs.dirty using damage interface + * during plane update. If num_clips is 0 then this helper will do a full plane + * update. This is the same behaviour expected by DIRTFB IOCTL. + * + * Note that this helper is blocking implementation. This is what current + * drivers and userspace expect in their DIRTYFB IOCTL implementation, as a way + * to rate-limit userspace and make sure its rendering doesn't get ahead of + * uploading new data too much. + * + * Return: Zero on success, negative errno on failure. + */ +int drm_atomic_helper_dirtyfb(struct drm_framebuffer *fb, + struct drm_file *file_priv, unsigned int flags, + unsigned int color, struct drm_clip_rect *clips, + unsigned int num_clips) +{ + struct drm_modeset_acquire_ctx ctx; + struct drm_property_blob *damage = NULL; + struct drm_mode_rect *rects = NULL; + struct drm_atomic_state *state; + struct drm_plane *plane; + int ret = 0; + + /* + * When called from ioctl, we are interruptable, but not when called + * internally (ie. defio worker) + */ + drm_modeset_acquire_init(&ctx, + file_priv ? DRM_MODESET_ACQUIRE_INTERRUPTIBLE : 0); + + state = drm_atomic_state_alloc(fb->dev); + if (!state) { + ret = -ENOMEM; + goto out_drop_locks; + } + state->acquire_ctx = &ctx; + + if (clips) { + uint32_t inc = 1; + + if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { + inc = 2; + num_clips /= 2; + } + + rects = kcalloc(num_clips, sizeof(*rects), GFP_KERNEL); + if (!rects) { + ret = -ENOMEM; + goto out; + } + + convert_clip_rect_to_rect(clips, rects, num_clips, inc); + damage = drm_property_create_blob(fb->dev, + num_clips * sizeof(*rects), + rects); + if (IS_ERR(damage)) { + ret = PTR_ERR(damage); + damage = NULL; + goto out; + } + } + +retry: + drm_for_each_plane(plane, fb->dev) { + struct drm_plane_state *plane_state; + + if (plane->state->fb != fb) + continue; + + plane_state = drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) { + ret = PTR_ERR(plane_state); + goto out; + } + + drm_property_replace_blob(&plane_state->fb_damage_clips, + damage); + } + + ret = drm_atomic_commit(state); + +out: + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + ret = drm_modeset_backoff(&ctx); + if (!ret) + goto retry; + } + + drm_property_blob_put(damage); + kfree(rects); + drm_atomic_state_put(state); + +out_drop_locks: + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + return ret; + +} +EXPORT_SYMBOL(drm_atomic_helper_dirtyfb); + +/** + * drm_atomic_helper_damage_iter_init - Initialize the damage iterator. + * @iter: The iterator to initialize. + * @old_state: Old plane state for validation. + * @state: Plane state from which to iterate the damage clips. + * + * Initialize an iterator, which clips plane damage + * &drm_plane_state.fb_damage_clips to plane &drm_plane_state.src. This iterator + * returns full plane src in case damage is not present because either + * user-space didn't sent or driver discarded it (it want to do full plane + * update). Currently this iterator returns full plane src in case plane src + * changed but that can be changed in future to return damage. + * + * For the case when plane is not visible or plane update should not happen the + * first call to iter_next will return false. Note that this helper use clipped + * &drm_plane_state.src, so driver calling this helper should have called + * drm_atomic_helper_check_plane_state() earlier. + */ +void +drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter, + const struct drm_plane_state *old_state, + const struct drm_plane_state *state) +{ + memset(iter, 0, sizeof(*iter)); + + if (!state || !state->crtc || !state->fb || !state->visible) + return; + + iter->clips = drm_helper_get_plane_damage_clips(state); + iter->num_clips = drm_plane_get_damage_clips_count(state); + + /* Round down for x1/y1 and round up for x2/y2 to catch all pixels */ + iter->plane_src.x1 = state->src.x1 >> 16; + iter->plane_src.y1 = state->src.y1 >> 16; + iter->plane_src.x2 = (state->src.x2 >> 16) + !!(state->src.x2 & 0xFFFF); + iter->plane_src.y2 = (state->src.y2 >> 16) + !!(state->src.y2 & 0xFFFF); + + if (!iter->clips || !drm_rect_equals(&state->src, &old_state->src)) { + iter->clips = NULL; + iter->num_clips = 0; + iter->full_update = true; + } +} +EXPORT_SYMBOL(drm_atomic_helper_damage_iter_init); + +/** + * drm_atomic_helper_damage_iter_next - Advance the damage iterator. + * @iter: The iterator to advance. + * @rect: Return a rectangle in fb coordinate clipped to plane src. + * + * Since plane src is in 16.16 fixed point and damage clips are whole number, + * this iterator round off clips that intersect with plane src. Round down for + * x1/y1 and round up for x2/y2 for the intersected coordinate. Similar rounding + * off for full plane src, in case it's returned as damage. This iterator will + * skip damage clips outside of plane src. + * + * Return: True if the output is valid, false if reached the end. + * + * If the first call to iterator next returns false then it means no need to + * update the plane. + */ +bool +drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter, + struct drm_rect *rect) +{ + bool ret = false; + + if (iter->full_update) { + *rect = iter->plane_src; + iter->full_update = false; + return true; + } + + while (iter->curr_clip < iter->num_clips) { + *rect = iter->clips[iter->curr_clip]; + iter->curr_clip++; + + if (drm_rect_intersect(rect, &iter->plane_src)) { + ret = true; + break; + } + } + + return ret; +} +EXPORT_SYMBOL(drm_atomic_helper_damage_iter_next); + +/** + * drm_atomic_helper_damage_merged - Merged plane damage + * @old_state: Old plane state for validation. + * @state: Plane state from which to iterate the damage clips. + * @rect: Returns the merged damage rectangle + * + * This function merges any valid plane damage clips into one rectangle and + * returns it in @rect. + * + * For details see: drm_atomic_helper_damage_iter_init() and + * drm_atomic_helper_damage_iter_next(). + * + * Returns: + * True if there is valid plane damage otherwise false. + */ +bool drm_atomic_helper_damage_merged(const struct drm_plane_state *old_state, + struct drm_plane_state *state, + struct drm_rect *rect) +{ + struct drm_atomic_helper_damage_iter iter; + struct drm_rect clip; + bool valid = false; + + rect->x1 = INT_MAX; + rect->y1 = INT_MAX; + rect->x2 = 0; + rect->y2 = 0; + + drm_atomic_helper_damage_iter_init(&iter, old_state, state); + drm_atomic_for_each_plane_damage(&iter, &clip) { + rect->x1 = min(rect->x1, clip.x1); + rect->y1 = min(rect->y1, clip.y1); + rect->x2 = max(rect->x2, clip.x2); + rect->y2 = max(rect->y2, clip.y2); + valid = true; + } + + return valid; +} +EXPORT_SYMBOL(drm_atomic_helper_damage_merged); Index: sys/dev/drm/core/drm_dp_aux_dev.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_dp_aux_dev.c @@ -0,0 +1,356 @@ +/* + * Copyright © 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Rafael Antognolli + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "drm_crtc_helper_internal.h" + +struct drm_dp_aux_dev { + unsigned index; + struct drm_dp_aux *aux; + struct device *dev; + struct kref refcount; + atomic_t usecount; +}; + +#define DRM_AUX_MINORS 256 +#define AUX_MAX_OFFSET (1 << 20) +static DEFINE_IDR(aux_idr); +static DEFINE_MUTEX(aux_idr_mutex); +static struct class *drm_dp_aux_dev_class; +static int drm_dev_major = -1; + +static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_minor(unsigned index) +{ + struct drm_dp_aux_dev *aux_dev = NULL; + + mutex_lock(&aux_idr_mutex); + aux_dev = idr_find(&aux_idr, index); + if (!kref_get_unless_zero(&aux_dev->refcount)) + aux_dev = NULL; + mutex_unlock(&aux_idr_mutex); + + return aux_dev; +} + +static struct drm_dp_aux_dev *alloc_drm_dp_aux_dev(struct drm_dp_aux *aux) +{ + struct drm_dp_aux_dev *aux_dev; + int index; + + aux_dev = kzalloc(sizeof(*aux_dev), GFP_KERNEL); + if (!aux_dev) + return ERR_PTR(-ENOMEM); + aux_dev->aux = aux; + atomic_set(&aux_dev->usecount, 1); + kref_init(&aux_dev->refcount); + + mutex_lock(&aux_idr_mutex); + index = idr_alloc(&aux_idr, aux_dev, 0, DRM_AUX_MINORS, GFP_KERNEL); + mutex_unlock(&aux_idr_mutex); + if (index < 0) { + kfree(aux_dev); + return ERR_PTR(index); + } + aux_dev->index = index; + + return aux_dev; +} + +static void release_drm_dp_aux_dev(struct kref *ref) +{ + struct drm_dp_aux_dev *aux_dev = + container_of(ref, struct drm_dp_aux_dev, refcount); + + kfree(aux_dev); +} + +static ssize_t name_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t res; + struct drm_dp_aux_dev *aux_dev = + drm_dp_aux_dev_get_by_minor(MINOR(dev->devt)); + + if (!aux_dev) + return -ENODEV; + + res = sprintf(buf, "%s\n", aux_dev->aux->name); + kref_put(&aux_dev->refcount, release_drm_dp_aux_dev); + + return res; +} +static DEVICE_ATTR_RO(name); + +static struct attribute *drm_dp_aux_attrs[] = { + &dev_attr_name.attr, + NULL, +}; +ATTRIBUTE_GROUPS(drm_dp_aux); + +static int auxdev_open(struct inode *inode, struct file *file) +{ + unsigned int minor = iminor(inode); + struct drm_dp_aux_dev *aux_dev; + + aux_dev = drm_dp_aux_dev_get_by_minor(minor); + if (!aux_dev) + return -ENODEV; + + file->private_data = aux_dev; + return 0; +} + +static loff_t auxdev_llseek(struct file *file, loff_t offset, int whence) +{ + return fixed_size_llseek(file, offset, whence, AUX_MAX_OFFSET); +} + +static ssize_t auxdev_read_iter(struct kiocb *iocb, struct iov_iter *to) +{ + struct drm_dp_aux_dev *aux_dev = iocb->ki_filp->private_data; + loff_t pos = iocb->ki_pos; + ssize_t res = 0; + + if (!atomic_inc_not_zero(&aux_dev->usecount)) + return -ENODEV; + + iov_iter_truncate(to, AUX_MAX_OFFSET - pos); + + while (iov_iter_count(to)) { + uint8_t buf[DP_AUX_MAX_PAYLOAD_BYTES]; + ssize_t todo = min(iov_iter_count(to), sizeof(buf)); + + if (signal_pending(current)) { + res = -ERESTARTSYS; + break; + } + + if (aux_dev->aux->is_remote) + res = drm_dp_mst_dpcd_read(aux_dev->aux, pos, buf, + todo); + else + res = drm_dp_dpcd_read(aux_dev->aux, pos, buf, todo); + + if (res <= 0) + break; + + if (copy_to_iter(buf, res, to) != res) { + res = -EFAULT; + break; + } + + pos += res; + } + + if (pos != iocb->ki_pos) + res = pos - iocb->ki_pos; + iocb->ki_pos = pos; + + if (atomic_dec_and_test(&aux_dev->usecount)) + wake_up_var(&aux_dev->usecount); + + return res; +} + +static ssize_t auxdev_write_iter(struct kiocb *iocb, struct iov_iter *from) +{ + struct drm_dp_aux_dev *aux_dev = iocb->ki_filp->private_data; + loff_t pos = iocb->ki_pos; + ssize_t res = 0; + + if (!atomic_inc_not_zero(&aux_dev->usecount)) + return -ENODEV; + + iov_iter_truncate(from, AUX_MAX_OFFSET - pos); + + while (iov_iter_count(from)) { + uint8_t buf[DP_AUX_MAX_PAYLOAD_BYTES]; + ssize_t todo = min(iov_iter_count(from), sizeof(buf)); + + if (signal_pending(current)) { + res = -ERESTARTSYS; + break; + } + + if (!copy_from_iter_full(buf, todo, from)) { + res = -EFAULT; + break; + } + + if (aux_dev->aux->is_remote) + res = drm_dp_mst_dpcd_write(aux_dev->aux, pos, buf, + todo); + else + res = drm_dp_dpcd_write(aux_dev->aux, pos, buf, todo); + + if (res <= 0) + break; + + pos += res; + } + + if (pos != iocb->ki_pos) + res = pos - iocb->ki_pos; + iocb->ki_pos = pos; + + if (atomic_dec_and_test(&aux_dev->usecount)) + wake_up_var(&aux_dev->usecount); + + return res; +} + +static int auxdev_release(struct inode *inode, struct file *file) +{ + struct drm_dp_aux_dev *aux_dev = file->private_data; + + kref_put(&aux_dev->refcount, release_drm_dp_aux_dev); + return 0; +} + +static const struct file_operations auxdev_fops = { + .owner = THIS_MODULE, + .llseek = auxdev_llseek, + .read_iter = auxdev_read_iter, + .write_iter = auxdev_write_iter, + .open = auxdev_open, + .release = auxdev_release, +}; + +#define to_auxdev(d) container_of(d, struct drm_dp_aux_dev, aux) + +static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_aux(struct drm_dp_aux *aux) +{ + struct drm_dp_aux_dev *iter, *aux_dev = NULL; + int id; + + /* don't increase kref count here because this function should only be + * used by drm_dp_aux_unregister_devnode. Thus, it will always have at + * least one reference - the one that drm_dp_aux_register_devnode + * created + */ + mutex_lock(&aux_idr_mutex); + idr_for_each_entry(&aux_idr, iter, id) { + if (iter->aux == aux) { + aux_dev = iter; + break; + } + } + mutex_unlock(&aux_idr_mutex); + return aux_dev; +} + +void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux) +{ + struct drm_dp_aux_dev *aux_dev; + unsigned int minor; + + aux_dev = drm_dp_aux_dev_get_by_aux(aux); + if (!aux_dev) /* attach must have failed */ + return; + + mutex_lock(&aux_idr_mutex); + idr_remove(&aux_idr, aux_dev->index); + mutex_unlock(&aux_idr_mutex); + + atomic_dec(&aux_dev->usecount); + wait_var_event(&aux_dev->usecount, !atomic_read(&aux_dev->usecount)); + + minor = aux_dev->index; + if (aux_dev->dev) + device_destroy(drm_dp_aux_dev_class, + MKDEV(drm_dev_major, minor)); + + DRM_DEBUG("drm_dp_aux_dev: aux [%s] unregistering\n", aux->name); + kref_put(&aux_dev->refcount, release_drm_dp_aux_dev); +} + +int drm_dp_aux_register_devnode(struct drm_dp_aux *aux) +{ + struct drm_dp_aux_dev *aux_dev; + int res; + + aux_dev = alloc_drm_dp_aux_dev(aux); + if (IS_ERR(aux_dev)) + return PTR_ERR(aux_dev); + + aux_dev->dev = device_create(drm_dp_aux_dev_class, aux->dev, + MKDEV(drm_dev_major, aux_dev->index), NULL, + "drm_dp_aux%d", aux_dev->index); + if (IS_ERR(aux_dev->dev)) { + res = PTR_ERR(aux_dev->dev); + aux_dev->dev = NULL; + goto error; + } + + DRM_DEBUG("drm_dp_aux_dev: aux [%s] registered as minor %d\n", + aux->name, aux_dev->index); + return 0; +error: + drm_dp_aux_unregister_devnode(aux); + return res; +} + +int drm_dp_aux_dev_init(void) +{ + int res; + + drm_dp_aux_dev_class = class_create(THIS_MODULE, "drm_dp_aux_dev"); + if (IS_ERR(drm_dp_aux_dev_class)) { + return PTR_ERR(drm_dp_aux_dev_class); + } + drm_dp_aux_dev_class->dev_groups = drm_dp_aux_groups; + + res = register_chrdev(0, "aux", &auxdev_fops); + if (res < 0) + goto out; + drm_dev_major = res; + + return 0; +out: + class_destroy(drm_dp_aux_dev_class); + return res; +} + +void drm_dp_aux_dev_exit(void) +{ + unregister_chrdev(drm_dev_major, "aux"); + class_destroy(drm_dp_aux_dev_class); +} Index: sys/dev/drm/core/drm_dp_dual_mode_helper.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_dp_dual_mode_helper.c @@ -0,0 +1,526 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +/** + * DOC: dp dual mode helpers + * + * Helper functions to deal with DP dual mode (aka. DP++) adaptors. + * + * Type 1: + * Adaptor registers (if any) and the sink DDC bus may be accessed via I2C. + * + * Type 2: + * Adaptor registers and sink DDC bus can be accessed either via I2C or + * I2C-over-AUX. Source devices may choose to implement either of these + * access methods. + */ + +#define DP_DUAL_MODE_SLAVE_ADDRESS 0x40 + +/** + * drm_dp_dual_mode_read - Read from the DP dual mode adaptor register(s) + * @adapter: I2C adapter for the DDC bus + * @offset: register offset + * @buffer: buffer for return data + * @size: sizo of the buffer + * + * Reads @size bytes from the DP dual mode adaptor registers + * starting at @offset. + * + * Returns: + * 0 on success, negative error code on failure + */ +ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter, + u8 offset, void *buffer, size_t size) +{ + struct i2c_msg msgs[] = { + { + .addr = DP_DUAL_MODE_SLAVE_ADDRESS, + .flags = 0, + .len = 1, + .buf = &offset, + }, + { + .addr = DP_DUAL_MODE_SLAVE_ADDRESS, + .flags = I2C_M_RD, + .len = size, + .buf = buffer, + }, + }; + int ret; + + ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs)); + if (ret < 0) + return ret; + if (ret != ARRAY_SIZE(msgs)) + return -EPROTO; + + return 0; +} +EXPORT_SYMBOL(drm_dp_dual_mode_read); + +/** + * drm_dp_dual_mode_write - Write to the DP dual mode adaptor register(s) + * @adapter: I2C adapter for the DDC bus + * @offset: register offset + * @buffer: buffer for write data + * @size: sizo of the buffer + * + * Writes @size bytes to the DP dual mode adaptor registers + * starting at @offset. + * + * Returns: + * 0 on success, negative error code on failure + */ +ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter, + u8 offset, const void *buffer, size_t size) +{ + struct i2c_msg msg = { + .addr = DP_DUAL_MODE_SLAVE_ADDRESS, + .flags = 0, + .len = 1 + size, + .buf = NULL, + }; + void *data; + int ret; + + data = kmalloc(msg.len, GFP_KERNEL); + if (!data) + return -ENOMEM; + + msg.buf = data; + + memcpy(data, &offset, 1); + memcpy(data + 1, buffer, size); + + ret = i2c_transfer(adapter, &msg, 1); + + kfree(data); + + if (ret < 0) + return ret; + if (ret != 1) + return -EPROTO; + + return 0; +} +EXPORT_SYMBOL(drm_dp_dual_mode_write); + +static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN]) +{ + static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] = + "DP-HDMI ADAPTOR\x04"; + + return memcmp(hdmi_id, dp_dual_mode_hdmi_id, + sizeof(dp_dual_mode_hdmi_id)) == 0; +} + +static bool is_type1_adaptor(uint8_t adaptor_id) +{ + return adaptor_id == 0 || adaptor_id == 0xff; +} + +static bool is_type2_adaptor(uint8_t adaptor_id) +{ + return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 | + DP_DUAL_MODE_REV_TYPE2); +} + +static bool is_lspcon_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN], + const uint8_t adaptor_id) +{ + return is_hdmi_adaptor(hdmi_id) && + (adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 | + DP_DUAL_MODE_TYPE_HAS_DPCD)); +} + +/** + * drm_dp_dual_mode_detect - Identify the DP dual mode adaptor + * @adapter: I2C adapter for the DDC bus + * + * Attempt to identify the type of the DP dual mode adaptor used. + * + * Note that when the answer is @DRM_DP_DUAL_MODE_UNKNOWN it's not + * certain whether we're dealing with a native HDMI port or + * a type 1 DVI dual mode adaptor. The driver will have to use + * some other hardware/driver specific mechanism to make that + * distinction. + * + * Returns: + * The type of the DP dual mode adaptor used + */ +enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter) +{ + char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] = {}; + uint8_t adaptor_id = 0x00; + ssize_t ret; + + /* + * Let's see if the adaptor is there the by reading the + * HDMI ID registers. + * + * Note that type 1 DVI adaptors are not required to implemnt + * any registers, and that presents a problem for detection. + * If the i2c transfer is nacked, we may or may not be dealing + * with a type 1 DVI adaptor. Some other mechanism of detecting + * the presence of the adaptor is required. One way would be + * to check the state of the CONFIG1 pin, Another method would + * simply require the driver to know whether the port is a DP++ + * port or a native HDMI port. Both of these methods are entirely + * hardware/driver specific so we can't deal with them here. + */ + ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID, + hdmi_id, sizeof(hdmi_id)); + DRM_DEBUG_KMS("DP dual mode HDMI ID: %*pE (err %zd)\n", + ret ? 0 : (int)sizeof(hdmi_id), hdmi_id, ret); + if (ret) + return DRM_DP_DUAL_MODE_UNKNOWN; + + /* + * Sigh. Some (maybe all?) type 1 adaptors are broken and ack + * the offset but ignore it, and instead they just always return + * data from the start of the HDMI ID buffer. So for a broken + * type 1 HDMI adaptor a single byte read will always give us + * 0x44, and for a type 1 DVI adaptor it should give 0x00 + * (assuming it implements any registers). Fortunately neither + * of those values will match the type 2 signature of the + * DP_DUAL_MODE_ADAPTOR_ID register so we can proceed with + * the type 2 adaptor detection safely even in the presence + * of broken type 1 adaptors. + */ + ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID, + &adaptor_id, sizeof(adaptor_id)); + DRM_DEBUG_KMS("DP dual mode adaptor ID: %02x (err %zd)\n", + adaptor_id, ret); + if (ret == 0) { + if (is_lspcon_adaptor(hdmi_id, adaptor_id)) + return DRM_DP_DUAL_MODE_LSPCON; + if (is_type2_adaptor(adaptor_id)) { + if (is_hdmi_adaptor(hdmi_id)) + return DRM_DP_DUAL_MODE_TYPE2_HDMI; + else + return DRM_DP_DUAL_MODE_TYPE2_DVI; + } + /* + * If neither a proper type 1 ID nor a broken type 1 adaptor + * as described above, assume type 1, but let the user know + * that we may have misdetected the type. + */ + if (!is_type1_adaptor(adaptor_id) && adaptor_id != hdmi_id[0]) + DRM_ERROR("Unexpected DP dual mode adaptor ID %02x\n", + adaptor_id); + + } + + if (is_hdmi_adaptor(hdmi_id)) + return DRM_DP_DUAL_MODE_TYPE1_HDMI; + else + return DRM_DP_DUAL_MODE_TYPE1_DVI; +} +EXPORT_SYMBOL(drm_dp_dual_mode_detect); + +/** + * drm_dp_dual_mode_max_tmds_clock - Max TMDS clock for DP dual mode adaptor + * @type: DP dual mode adaptor type + * @adapter: I2C adapter for the DDC bus + * + * Determine the max TMDS clock the adaptor supports based on the + * type of the dual mode adaptor and the DP_DUAL_MODE_MAX_TMDS_CLOCK + * register (on type2 adaptors). As some type 1 adaptors have + * problems with registers (see comments in drm_dp_dual_mode_detect()) + * we don't read the register on those, instead we simply assume + * a 165 MHz limit based on the specification. + * + * Returns: + * Maximum supported TMDS clock rate for the DP dual mode adaptor in kHz. + */ +int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type, + struct i2c_adapter *adapter) +{ + uint8_t max_tmds_clock; + ssize_t ret; + + /* native HDMI so no limit */ + if (type == DRM_DP_DUAL_MODE_NONE) + return 0; + + /* + * Type 1 adaptors are limited to 165MHz + * Type 2 adaptors can tells us their limit + */ + if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) + return 165000; + + ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_MAX_TMDS_CLOCK, + &max_tmds_clock, sizeof(max_tmds_clock)); + if (ret || max_tmds_clock == 0x00 || max_tmds_clock == 0xff) { + DRM_DEBUG_KMS("Failed to query max TMDS clock\n"); + return 165000; + } + + return max_tmds_clock * 5000 / 2; +} +EXPORT_SYMBOL(drm_dp_dual_mode_max_tmds_clock); + +/** + * drm_dp_dual_mode_get_tmds_output - Get the state of the TMDS output buffers in the DP dual mode adaptor + * @type: DP dual mode adaptor type + * @adapter: I2C adapter for the DDC bus + * @enabled: current state of the TMDS output buffers + * + * Get the state of the TMDS output buffers in the adaptor. For + * type2 adaptors this is queried from the DP_DUAL_MODE_TMDS_OEN + * register. As some type 1 adaptors have problems with registers + * (see comments in drm_dp_dual_mode_detect()) we don't read the + * register on those, instead we simply assume that the buffers + * are always enabled. + * + * Returns: + * 0 on success, negative error code on failure + */ +int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type, + struct i2c_adapter *adapter, + bool *enabled) +{ + uint8_t tmds_oen; + ssize_t ret; + + if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) { + *enabled = true; + return 0; + } + + ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN, + &tmds_oen, sizeof(tmds_oen)); + if (ret) { + DRM_DEBUG_KMS("Failed to query state of TMDS output buffers\n"); + return ret; + } + + *enabled = !(tmds_oen & DP_DUAL_MODE_TMDS_DISABLE); + + return 0; +} +EXPORT_SYMBOL(drm_dp_dual_mode_get_tmds_output); + +/** + * drm_dp_dual_mode_set_tmds_output - Enable/disable TMDS output buffers in the DP dual mode adaptor + * @type: DP dual mode adaptor type + * @adapter: I2C adapter for the DDC bus + * @enable: enable (as opposed to disable) the TMDS output buffers + * + * Set the state of the TMDS output buffers in the adaptor. For + * type2 this is set via the DP_DUAL_MODE_TMDS_OEN register. As + * some type 1 adaptors have problems with registers (see comments + * in drm_dp_dual_mode_detect()) we avoid touching the register, + * making this function a no-op on type 1 adaptors. + * + * Returns: + * 0 on success, negative error code on failure + */ +int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type, + struct i2c_adapter *adapter, bool enable) +{ + uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE; + ssize_t ret; + int retry; + + if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) + return 0; + + /* + * LSPCON adapters in low-power state may ignore the first write, so + * read back and verify the written value a few times. + */ + for (retry = 0; retry < 3; retry++) { + uint8_t tmp; + + ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN, + &tmds_oen, sizeof(tmds_oen)); + if (ret) { + DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d attempts)\n", + enable ? "enable" : "disable", + retry + 1); + return ret; + } + + ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN, + &tmp, sizeof(tmp)); + if (ret) { + DRM_DEBUG_KMS("I2C read failed during TMDS output buffer %s (%d attempts)\n", + enable ? "enabling" : "disabling", + retry + 1); + return ret; + } + + if (tmp == tmds_oen) + return 0; + } + + DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n", + enable ? "enabling" : "disabling"); + + return -EIO; +} +EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output); + +/** + * drm_dp_get_dual_mode_type_name - Get the name of the DP dual mode adaptor type as a string + * @type: DP dual mode adaptor type + * + * Returns: + * String representation of the DP dual mode adaptor type + */ +const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type) +{ + switch (type) { + case DRM_DP_DUAL_MODE_NONE: + return "none"; + case DRM_DP_DUAL_MODE_TYPE1_DVI: + return "type 1 DVI"; + case DRM_DP_DUAL_MODE_TYPE1_HDMI: + return "type 1 HDMI"; + case DRM_DP_DUAL_MODE_TYPE2_DVI: + return "type 2 DVI"; + case DRM_DP_DUAL_MODE_TYPE2_HDMI: + return "type 2 HDMI"; + case DRM_DP_DUAL_MODE_LSPCON: + return "lspcon"; + default: + WARN_ON(type != DRM_DP_DUAL_MODE_UNKNOWN); + return "unknown"; + } +} +EXPORT_SYMBOL(drm_dp_get_dual_mode_type_name); + +/** + * drm_lspcon_get_mode: Get LSPCON's current mode of operation by + * reading offset (0x80, 0x41) + * @adapter: I2C-over-aux adapter + * @mode: current lspcon mode of operation output variable + * + * Returns: + * 0 on success, sets the current_mode value to appropriate mode + * -error on failure + */ +int drm_lspcon_get_mode(struct i2c_adapter *adapter, + enum drm_lspcon_mode *mode) +{ + u8 data; + int ret = 0; + int retry; + + if (!mode) { + DRM_ERROR("NULL input\n"); + return -EINVAL; + } + + /* Read Status: i2c over aux */ + for (retry = 0; retry < 6; retry++) { + if (retry) + usleep_range(500, 1000); + + ret = drm_dp_dual_mode_read(adapter, + DP_DUAL_MODE_LSPCON_CURRENT_MODE, + &data, sizeof(data)); + if (!ret) + break; + } + + if (ret < 0) { + DRM_DEBUG_KMS("LSPCON read(0x80, 0x41) failed\n"); + return -EFAULT; + } + + if (data & DP_DUAL_MODE_LSPCON_MODE_PCON) + *mode = DRM_LSPCON_MODE_PCON; + else + *mode = DRM_LSPCON_MODE_LS; + return 0; +} +EXPORT_SYMBOL(drm_lspcon_get_mode); + +/** + * drm_lspcon_set_mode: Change LSPCON's mode of operation by + * writing offset (0x80, 0x40) + * @adapter: I2C-over-aux adapter + * @mode: required mode of operation + * + * Returns: + * 0 on success, -error on failure/timeout + */ +int drm_lspcon_set_mode(struct i2c_adapter *adapter, + enum drm_lspcon_mode mode) +{ + u8 data = 0; + int ret; + int time_out = 200; + enum drm_lspcon_mode current_mode; + + if (mode == DRM_LSPCON_MODE_PCON) + data = DP_DUAL_MODE_LSPCON_MODE_PCON; + + /* Change mode */ + ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_LSPCON_MODE_CHANGE, + &data, sizeof(data)); + if (ret < 0) { + DRM_ERROR("LSPCON mode change failed\n"); + return ret; + } + + /* + * Confirm mode change by reading the status bit. + * Sometimes, it takes a while to change the mode, + * so wait and retry until time out or done. + */ + do { + ret = drm_lspcon_get_mode(adapter, ¤t_mode); + if (ret) { + DRM_ERROR("can't confirm LSPCON mode change\n"); + return ret; + } else { + if (current_mode != mode) { + msleep(10); + time_out -= 10; + } else { + DRM_DEBUG_KMS("LSPCON mode changed to %s\n", + mode == DRM_LSPCON_MODE_LS ? + "LS" : "PCON"); + return 0; + } + } + } while (time_out); + + DRM_ERROR("LSPCON mode change timed out\n"); + return -ETIMEDOUT; +} +EXPORT_SYMBOL(drm_lspcon_set_mode); Index: sys/dev/drm/core/drm_dp_helper.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_dp_helper.c @@ -0,0 +1,1478 @@ +/* + * Copyright © 2009 Keith Packard + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "drm_crtc_helper_internal.h" + +/** + * DOC: dp helpers + * + * These functions contain some common logic and helpers at various abstraction + * levels to deal with Display Port sink devices and related things like DP aux + * channel transfers, EDID reading over DP aux channels, decoding certain DPCD + * blocks, ... + */ + +/* Helpers for DP link training */ +static u8 dp_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r) +{ + return link_status[r - DP_LANE0_1_STATUS]; +} + +static u8 dp_get_lane_status(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane) +{ + int i = DP_LANE0_1_STATUS + (lane >> 1); + int s = (lane & 1) * 4; + u8 l = dp_link_status(link_status, i); + return (l >> s) & 0xf; +} + +bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count) +{ + u8 lane_align; + u8 lane_status; + int lane; + + lane_align = dp_link_status(link_status, + DP_LANE_ALIGN_STATUS_UPDATED); + if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) + return false; + for (lane = 0; lane < lane_count; lane++) { + lane_status = dp_get_lane_status(link_status, lane); + if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS) + return false; + } + return true; +} +EXPORT_SYMBOL(drm_dp_channel_eq_ok); + +bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count) +{ + int lane; + u8 lane_status; + + for (lane = 0; lane < lane_count; lane++) { + lane_status = dp_get_lane_status(link_status, lane); + if ((lane_status & DP_LANE_CR_DONE) == 0) + return false; + } + return true; +} +EXPORT_SYMBOL(drm_dp_clock_recovery_ok); + +u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane) +{ + int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); + int s = ((lane & 1) ? + DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : + DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); + u8 l = dp_link_status(link_status, i); + + return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; +} +EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage); + +u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane) +{ + int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); + int s = ((lane & 1) ? + DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : + DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); + u8 l = dp_link_status(link_status, i); + + return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; +} +EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis); + +void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { + int rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & + DP_TRAINING_AUX_RD_MASK; + + if (rd_interval > 4) + DRM_DEBUG_KMS("AUX interval %d, out of range (max 4)\n", + rd_interval); + + if (rd_interval == 0 || dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14) + udelay(100); + else + mdelay(rd_interval * 4); +} +EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay); + +void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { + int rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & + DP_TRAINING_AUX_RD_MASK; + + if (rd_interval > 4) + DRM_DEBUG_KMS("AUX interval %d, out of range (max 4)\n", + rd_interval); + + if (rd_interval == 0) + udelay(400); + else + mdelay(rd_interval * 4); +} +EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay); + +u8 drm_dp_link_rate_to_bw_code(int link_rate) +{ + /* Spec says link_bw = link_rate / 0.27Gbps */ + return link_rate / 27000; +} +EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code); + +int drm_dp_bw_code_to_link_rate(u8 link_bw) +{ + /* Spec says link_rate = link_bw * 0.27Gbps */ + return link_bw * 27000; +} +EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate); + +#define AUX_RETRY_INTERVAL 500 /* us */ + +static inline void +drm_dp_dump_access(const struct drm_dp_aux *aux, + u8 request, uint offset, void *buffer, int ret) +{ + const char *arrow = request == DP_AUX_NATIVE_READ ? "->" : "<-"; + + if (ret > 0) + DRM_DEBUG_DP("%s: 0x%05x AUX %s (ret=%3d) %*ph\n", + aux->name, offset, arrow, ret, min(ret, 20), buffer); + else + DRM_DEBUG_DP("%s: 0x%05x AUX %s (ret=%3d)\n", + aux->name, offset, arrow, ret); +} + +/** + * DOC: dp helpers + * + * The DisplayPort AUX channel is an abstraction to allow generic, driver- + * independent access to AUX functionality. Drivers can take advantage of + * this by filling in the fields of the drm_dp_aux structure. + * + * Transactions are described using a hardware-independent drm_dp_aux_msg + * structure, which is passed into a driver's .transfer() implementation. + * Both native and I2C-over-AUX transactions are supported. + */ + +static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request, + unsigned int offset, void *buffer, size_t size) +{ + struct drm_dp_aux_msg msg; + unsigned int retry, native_reply; + int err = 0, ret = 0; + + memset(&msg, 0, sizeof(msg)); + msg.address = offset; + msg.request = request; + msg.buffer = buffer; + msg.size = size; + + mutex_lock(&aux->hw_mutex); + + /* + * The specification doesn't give any recommendation on how often to + * retry native transactions. We used to retry 7 times like for + * aux i2c transactions but real world devices this wasn't + * sufficient, bump to 32 which makes Dell 4k monitors happier. + */ + for (retry = 0; retry < 32; retry++) { + if (ret != 0 && ret != -ETIMEDOUT) { + usleep_range(AUX_RETRY_INTERVAL, + AUX_RETRY_INTERVAL + 100); + } + + ret = aux->transfer(aux, &msg); + + if (ret >= 0) { + native_reply = msg.reply & DP_AUX_NATIVE_REPLY_MASK; + if (native_reply == DP_AUX_NATIVE_REPLY_ACK) { + if (ret == size) + goto unlock; + + ret = -EPROTO; + } else + ret = -EIO; + } + + /* + * We want the error we return to be the error we received on + * the first transaction, since we may get a different error the + * next time we retry + */ + if (!err) + err = ret; + } + + DRM_DEBUG_KMS("Too many retries, giving up. First error: %d\n", err); + ret = err; + +unlock: + mutex_unlock(&aux->hw_mutex); + return ret; +} + +/** + * drm_dp_dpcd_read() - read a series of bytes from the DPCD + * @aux: DisplayPort AUX channel + * @offset: address of the (first) register to read + * @buffer: buffer to store the register values + * @size: number of bytes in @buffer + * + * Returns the number of bytes transferred on success, or a negative error + * code on failure. -EIO is returned if the request was NAKed by the sink or + * if the retry count was exceeded. If not all bytes were transferred, this + * function returns -EPROTO. Errors from the underlying AUX channel transfer + * function, with the exception of -EBUSY (which causes the transaction to + * be retried), are propagated to the caller. + */ +ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, + void *buffer, size_t size) +{ + int ret; + + /* + * HP ZR24w corrupts the first DPCD access after entering power save + * mode. Eg. on a read, the entire buffer will be filled with the same + * byte. Do a throw away read to avoid corrupting anything we care + * about. Afterwards things will work correctly until the monitor + * gets woken up and subsequently re-enters power save mode. + * + * The user pressing any button on the monitor is enough to wake it + * up, so there is no particularly good place to do the workaround. + * We just have to do it before any DPCD access and hope that the + * monitor doesn't power down exactly after the throw away read. + */ + ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV, buffer, + 1); + if (ret != 1) + goto out; + + ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer, + size); + +out: + drm_dp_dump_access(aux, DP_AUX_NATIVE_READ, offset, buffer, ret); + return ret; +} +EXPORT_SYMBOL(drm_dp_dpcd_read); + +/** + * drm_dp_dpcd_write() - write a series of bytes to the DPCD + * @aux: DisplayPort AUX channel + * @offset: address of the (first) register to write + * @buffer: buffer containing the values to write + * @size: number of bytes in @buffer + * + * Returns the number of bytes transferred on success, or a negative error + * code on failure. -EIO is returned if the request was NAKed by the sink or + * if the retry count was exceeded. If not all bytes were transferred, this + * function returns -EPROTO. Errors from the underlying AUX channel transfer + * function, with the exception of -EBUSY (which causes the transaction to + * be retried), are propagated to the caller. + */ +ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset, + void *buffer, size_t size) +{ + int ret; + + ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer, + size); + drm_dp_dump_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer, ret); + return ret; +} +EXPORT_SYMBOL(drm_dp_dpcd_write); + +/** + * drm_dp_dpcd_read_link_status() - read DPCD link status (bytes 0x202-0x207) + * @aux: DisplayPort AUX channel + * @status: buffer to store the link status in (must be at least 6 bytes) + * + * Returns the number of bytes transferred on success or a negative error + * code on failure. + */ +int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux, + u8 status[DP_LINK_STATUS_SIZE]) +{ + return drm_dp_dpcd_read(aux, DP_LANE0_1_STATUS, status, + DP_LINK_STATUS_SIZE); +} +EXPORT_SYMBOL(drm_dp_dpcd_read_link_status); + +/** + * drm_dp_link_probe() - probe a DisplayPort link for capabilities + * @aux: DisplayPort AUX channel + * @link: pointer to structure in which to return link capabilities + * + * The structure filled in by this function can usually be passed directly + * into drm_dp_link_power_up() and drm_dp_link_configure() to power up and + * configure the link based on the link's capabilities. + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link) +{ + u8 values[3]; + int err; + + memset(link, 0, sizeof(*link)); + + err = drm_dp_dpcd_read(aux, DP_DPCD_REV, values, sizeof(values)); + if (err < 0) + return err; + + link->revision = values[0]; + link->rate = drm_dp_bw_code_to_link_rate(values[1]); + link->num_lanes = values[2] & DP_MAX_LANE_COUNT_MASK; + + if (values[2] & DP_ENHANCED_FRAME_CAP) + link->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING; + + return 0; +} +EXPORT_SYMBOL(drm_dp_link_probe); + +/** + * drm_dp_link_power_up() - power up a DisplayPort link + * @aux: DisplayPort AUX channel + * @link: pointer to a structure containing the link configuration + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link) +{ + u8 value; + int err; + + /* DP_SET_POWER register is only available on DPCD v1.1 and later */ + if (link->revision < 0x11) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (err < 0) + return err; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D0; + + err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (err < 0) + return err; + + /* + * According to the DP 1.1 specification, a "Sink Device must exit the + * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink + * Control Field" (register 0x600). + */ + usleep_range(1000, 2000); + + return 0; +} +EXPORT_SYMBOL(drm_dp_link_power_up); + +/** + * drm_dp_link_power_down() - power down a DisplayPort link + * @aux: DisplayPort AUX channel + * @link: pointer to a structure containing the link configuration + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link) +{ + u8 value; + int err; + + /* DP_SET_POWER register is only available on DPCD v1.1 and later */ + if (link->revision < 0x11) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (err < 0) + return err; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D3; + + err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (err < 0) + return err; + + return 0; +} +EXPORT_SYMBOL(drm_dp_link_power_down); + +/** + * drm_dp_link_configure() - configure a DisplayPort link + * @aux: DisplayPort AUX channel + * @link: pointer to a structure containing the link configuration + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link) +{ + u8 values[2]; + int err; + + values[0] = drm_dp_link_rate_to_bw_code(link->rate); + values[1] = link->num_lanes; + + if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING) + values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values)); + if (err < 0) + return err; + + return 0; +} +EXPORT_SYMBOL(drm_dp_link_configure); + +/** + * drm_dp_downstream_max_clock() - extract branch device max + * pixel rate for legacy VGA + * converter or max TMDS clock + * rate for others + * @dpcd: DisplayPort configuration data + * @port_cap: port capabilities + * + * Returns max clock in kHz on success or 0 if max clock not defined + */ +int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]) +{ + int type = port_cap[0] & DP_DS_PORT_TYPE_MASK; + bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] & + DP_DETAILED_CAP_INFO_AVAILABLE; + + if (!detailed_cap_info) + return 0; + + switch (type) { + case DP_DS_PORT_TYPE_VGA: + return port_cap[1] * 8 * 1000; + case DP_DS_PORT_TYPE_DVI: + case DP_DS_PORT_TYPE_HDMI: + case DP_DS_PORT_TYPE_DP_DUALMODE: + return port_cap[1] * 2500; + default: + return 0; + } +} +EXPORT_SYMBOL(drm_dp_downstream_max_clock); + +/** + * drm_dp_downstream_max_bpc() - extract branch device max + * bits per component + * @dpcd: DisplayPort configuration data + * @port_cap: port capabilities + * + * Returns max bpc on success or 0 if max bpc not defined + */ +int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]) +{ + int type = port_cap[0] & DP_DS_PORT_TYPE_MASK; + bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] & + DP_DETAILED_CAP_INFO_AVAILABLE; + int bpc; + + if (!detailed_cap_info) + return 0; + + switch (type) { + case DP_DS_PORT_TYPE_VGA: + case DP_DS_PORT_TYPE_DVI: + case DP_DS_PORT_TYPE_HDMI: + case DP_DS_PORT_TYPE_DP_DUALMODE: + bpc = port_cap[2] & DP_DS_MAX_BPC_MASK; + + switch (bpc) { + case DP_DS_8BPC: + return 8; + case DP_DS_10BPC: + return 10; + case DP_DS_12BPC: + return 12; + case DP_DS_16BPC: + return 16; + } + /* fall through */ + default: + return 0; + } +} +EXPORT_SYMBOL(drm_dp_downstream_max_bpc); + +/** + * drm_dp_downstream_id() - identify branch device + * @aux: DisplayPort AUX channel + * @id: DisplayPort branch device id + * + * Returns branch device id on success or NULL on failure + */ +int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]) +{ + return drm_dp_dpcd_read(aux, DP_BRANCH_ID, id, 6); +} +EXPORT_SYMBOL(drm_dp_downstream_id); + +/** + * drm_dp_downstream_debug() - debug DP branch devices + * @m: pointer for debugfs file + * @dpcd: DisplayPort configuration data + * @port_cap: port capabilities + * @aux: DisplayPort AUX channel + * + */ +void drm_dp_downstream_debug(struct seq_file *m, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], struct drm_dp_aux *aux) +{ + bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] & + DP_DETAILED_CAP_INFO_AVAILABLE; + int clk; + int bpc; + char id[7]; + int len; + uint8_t rev[2]; + int type = port_cap[0] & DP_DS_PORT_TYPE_MASK; + bool branch_device = dpcd[DP_DOWNSTREAMPORT_PRESENT] & + DP_DWN_STRM_PORT_PRESENT; + + seq_printf(m, "\tDP branch device present: %s\n", + branch_device ? "yes" : "no"); + + if (!branch_device) + return; + + switch (type) { + case DP_DS_PORT_TYPE_DP: + seq_puts(m, "\t\tType: DisplayPort\n"); + break; + case DP_DS_PORT_TYPE_VGA: + seq_puts(m, "\t\tType: VGA\n"); + break; + case DP_DS_PORT_TYPE_DVI: + seq_puts(m, "\t\tType: DVI\n"); + break; + case DP_DS_PORT_TYPE_HDMI: + seq_puts(m, "\t\tType: HDMI\n"); + break; + case DP_DS_PORT_TYPE_NON_EDID: + seq_puts(m, "\t\tType: others without EDID support\n"); + break; + case DP_DS_PORT_TYPE_DP_DUALMODE: + seq_puts(m, "\t\tType: DP++\n"); + break; + case DP_DS_PORT_TYPE_WIRELESS: + seq_puts(m, "\t\tType: Wireless\n"); + break; + default: + seq_puts(m, "\t\tType: N/A\n"); + } + + memset(id, 0, sizeof(id)); + drm_dp_downstream_id(aux, id); + seq_printf(m, "\t\tID: %s\n", id); + + len = drm_dp_dpcd_read(aux, DP_BRANCH_HW_REV, &rev[0], 1); + if (len > 0) + seq_printf(m, "\t\tHW: %d.%d\n", + (rev[0] & 0xf0) >> 4, rev[0] & 0xf); + + len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, rev, 2); + if (len > 0) + seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]); + + if (detailed_cap_info) { + clk = drm_dp_downstream_max_clock(dpcd, port_cap); + + if (clk > 0) { + if (type == DP_DS_PORT_TYPE_VGA) + seq_printf(m, "\t\tMax dot clock: %d kHz\n", clk); + else + seq_printf(m, "\t\tMax TMDS clock: %d kHz\n", clk); + } + + bpc = drm_dp_downstream_max_bpc(dpcd, port_cap); + + if (bpc > 0) + seq_printf(m, "\t\tMax bpc: %d\n", bpc); + } +} +EXPORT_SYMBOL(drm_dp_downstream_debug); + +/* + * I2C-over-AUX implementation + */ + +static u32 drm_dp_i2c_functionality(struct i2c_adapter *adapter) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | + I2C_FUNC_SMBUS_READ_BLOCK_DATA | + I2C_FUNC_SMBUS_BLOCK_PROC_CALL | + I2C_FUNC_10BIT_ADDR; +} + +static void drm_dp_i2c_msg_write_status_update(struct drm_dp_aux_msg *msg) +{ + /* + * In case of i2c defer or short i2c ack reply to a write, + * we need to switch to WRITE_STATUS_UPDATE to drain the + * rest of the message + */ + if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_I2C_WRITE) { + msg->request &= DP_AUX_I2C_MOT; + msg->request |= DP_AUX_I2C_WRITE_STATUS_UPDATE; + } +} + +#define AUX_PRECHARGE_LEN 10 /* 10 to 16 */ +#define AUX_SYNC_LEN (16 + 4) /* preamble + AUX_SYNC_END */ +#define AUX_STOP_LEN 4 +#define AUX_CMD_LEN 4 +#define AUX_ADDRESS_LEN 20 +#define AUX_REPLY_PAD_LEN 4 +#define AUX_LENGTH_LEN 8 + +/* + * Calculate the duration of the AUX request/reply in usec. Gives the + * "best" case estimate, ie. successful while as short as possible. + */ +static int drm_dp_aux_req_duration(const struct drm_dp_aux_msg *msg) +{ + int len = AUX_PRECHARGE_LEN + AUX_SYNC_LEN + AUX_STOP_LEN + + AUX_CMD_LEN + AUX_ADDRESS_LEN + AUX_LENGTH_LEN; + + if ((msg->request & DP_AUX_I2C_READ) == 0) + len += msg->size * 8; + + return len; +} + +static int drm_dp_aux_reply_duration(const struct drm_dp_aux_msg *msg) +{ + int len = AUX_PRECHARGE_LEN + AUX_SYNC_LEN + AUX_STOP_LEN + + AUX_CMD_LEN + AUX_REPLY_PAD_LEN; + + /* + * For read we expect what was asked. For writes there will + * be 0 or 1 data bytes. Assume 0 for the "best" case. + */ + if (msg->request & DP_AUX_I2C_READ) + len += msg->size * 8; + + return len; +} + +#define I2C_START_LEN 1 +#define I2C_STOP_LEN 1 +#define I2C_ADDR_LEN 9 /* ADDRESS + R/W + ACK/NACK */ +#define I2C_DATA_LEN 9 /* DATA + ACK/NACK */ + +/* + * Calculate the length of the i2c transfer in usec, assuming + * the i2c bus speed is as specified. Gives the the "worst" + * case estimate, ie. successful while as long as possible. + * Doesn't account the the "MOT" bit, and instead assumes each + * message includes a START, ADDRESS and STOP. Neither does it + * account for additional random variables such as clock stretching. + */ +static int drm_dp_i2c_msg_duration(const struct drm_dp_aux_msg *msg, + int i2c_speed_khz) +{ + /* AUX bitrate is 1MHz, i2c bitrate as specified */ + return DIV_ROUND_UP((I2C_START_LEN + I2C_ADDR_LEN + + msg->size * I2C_DATA_LEN + + I2C_STOP_LEN) * 1000, i2c_speed_khz); +} + +/* + * Deterine how many retries should be attempted to successfully transfer + * the specified message, based on the estimated durations of the + * i2c and AUX transfers. + */ +static int drm_dp_i2c_retry_count(const struct drm_dp_aux_msg *msg, + int i2c_speed_khz) +{ + int aux_time_us = drm_dp_aux_req_duration(msg) + + drm_dp_aux_reply_duration(msg); + int i2c_time_us = drm_dp_i2c_msg_duration(msg, i2c_speed_khz); + + return DIV_ROUND_UP(i2c_time_us, aux_time_us + AUX_RETRY_INTERVAL); +} + +/* + * FIXME currently assumes 10 kHz as some real world devices seem + * to require it. We should query/set the speed via DPCD if supported. + */ +static int dp_aux_i2c_speed_khz __read_mostly = 10; +module_param_unsafe(dp_aux_i2c_speed_khz, int, 0644); +MODULE_PARM_DESC(dp_aux_i2c_speed_khz, + "Assumed speed of the i2c bus in kHz, (1-400, default 10)"); + +/* + * Transfer a single I2C-over-AUX message and handle various error conditions, + * retrying the transaction as appropriate. It is assumed that the + * &drm_dp_aux.transfer function does not modify anything in the msg other than the + * reply field. + * + * Returns bytes transferred on success, or a negative error code on failure. + */ +static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) +{ + unsigned int retry, defer_i2c; + int ret; + /* + * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device + * is required to retry at least seven times upon receiving AUX_DEFER + * before giving up the AUX transaction. + * + * We also try to account for the i2c bus speed. + */ + int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz)); + + for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) { + ret = aux->transfer(aux, msg); + if (ret < 0) { + if (ret == -EBUSY) + continue; + + /* + * While timeouts can be errors, they're usually normal + * behavior (for instance, when a driver tries to + * communicate with a non-existant DisplayPort device). + * Avoid spamming the kernel log with timeout errors. + */ + if (ret == -ETIMEDOUT) + DRM_DEBUG_KMS_RATELIMITED("transaction timed out\n"); + else + DRM_DEBUG_KMS("transaction failed: %d\n", ret); + + return ret; + } + + + switch (msg->reply & DP_AUX_NATIVE_REPLY_MASK) { + case DP_AUX_NATIVE_REPLY_ACK: + /* + * For I2C-over-AUX transactions this isn't enough, we + * need to check for the I2C ACK reply. + */ + break; + + case DP_AUX_NATIVE_REPLY_NACK: + DRM_DEBUG_KMS("native nack (result=%d, size=%zu)\n", ret, msg->size); + return -EREMOTEIO; + + case DP_AUX_NATIVE_REPLY_DEFER: + DRM_DEBUG_KMS("native defer\n"); + /* + * We could check for I2C bit rate capabilities and if + * available adjust this interval. We could also be + * more careful with DP-to-legacy adapters where a + * long legacy cable may force very low I2C bit rates. + * + * For now just defer for long enough to hopefully be + * safe for all use-cases. + */ + usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100); + continue; + + default: + DRM_ERROR("invalid native reply %#04x\n", msg->reply); + return -EREMOTEIO; + } + + switch (msg->reply & DP_AUX_I2C_REPLY_MASK) { + case DP_AUX_I2C_REPLY_ACK: + /* + * Both native ACK and I2C ACK replies received. We + * can assume the transfer was successful. + */ + if (ret != msg->size) + drm_dp_i2c_msg_write_status_update(msg); + return ret; + + case DP_AUX_I2C_REPLY_NACK: + DRM_DEBUG_KMS("I2C nack (result=%d, size=%zu)\n", + ret, msg->size); + aux->i2c_nack_count++; + return -EREMOTEIO; + + case DP_AUX_I2C_REPLY_DEFER: + DRM_DEBUG_KMS("I2C defer\n"); + /* DP Compliance Test 4.2.2.5 Requirement: + * Must have at least 7 retries for I2C defers on the + * transaction to pass this test + */ + aux->i2c_defer_count++; + if (defer_i2c < 7) + defer_i2c++; + usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100); + drm_dp_i2c_msg_write_status_update(msg); + + continue; + + default: + DRM_ERROR("invalid I2C reply %#04x\n", msg->reply); + return -EREMOTEIO; + } + } + + DRM_DEBUG_KMS("too many retries, giving up\n"); + return -EREMOTEIO; +} + +static void drm_dp_i2c_msg_set_request(struct drm_dp_aux_msg *msg, + const struct i2c_msg *i2c_msg) +{ + msg->request = (i2c_msg->flags & I2C_M_RD) ? + DP_AUX_I2C_READ : DP_AUX_I2C_WRITE; + if (!(i2c_msg->flags & I2C_M_STOP)) + msg->request |= DP_AUX_I2C_MOT; +} + +/* + * Keep retrying drm_dp_i2c_do_msg until all data has been transferred. + * + * Returns an error code on failure, or a recommended transfer size on success. + */ +static int drm_dp_i2c_drain_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *orig_msg) +{ + int err, ret = orig_msg->size; + struct drm_dp_aux_msg msg = *orig_msg; + + while (msg.size > 0) { + err = drm_dp_i2c_do_msg(aux, &msg); + if (err <= 0) + return err == 0 ? -EPROTO : err; + + if (err < msg.size && err < ret) { + DRM_DEBUG_KMS("Partial I2C reply: requested %zu bytes got %d bytes\n", + msg.size, err); + ret = err; + } + + msg.size -= err; + msg.buffer += err; + } + + return ret; +} + +/* + * Bizlink designed DP->DVI-D Dual Link adapters require the I2C over AUX + * packets to be as large as possible. If not, the I2C transactions never + * succeed. Hence the default is maximum. + */ +static int dp_aux_i2c_transfer_size __read_mostly = DP_AUX_MAX_PAYLOAD_BYTES; +module_param_unsafe(dp_aux_i2c_transfer_size, int, 0644); +MODULE_PARM_DESC(dp_aux_i2c_transfer_size, + "Number of bytes to transfer in a single I2C over DP AUX CH message, (1-16, default 16)"); + +static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, + int num) +{ + struct drm_dp_aux *aux = adapter->algo_data; + unsigned int i, j; + unsigned transfer_size; + struct drm_dp_aux_msg msg; + int err = 0; + + dp_aux_i2c_transfer_size = clamp(dp_aux_i2c_transfer_size, 1, DP_AUX_MAX_PAYLOAD_BYTES); + + memset(&msg, 0, sizeof(msg)); + + for (i = 0; i < num; i++) { + msg.address = msgs[i].addr; + drm_dp_i2c_msg_set_request(&msg, &msgs[i]); + /* Send a bare address packet to start the transaction. + * Zero sized messages specify an address only (bare + * address) transaction. + */ + msg.buffer = NULL; + msg.size = 0; + err = drm_dp_i2c_do_msg(aux, &msg); + + /* + * Reset msg.request in case in case it got + * changed into a WRITE_STATUS_UPDATE. + */ + drm_dp_i2c_msg_set_request(&msg, &msgs[i]); + + if (err < 0) + break; + /* We want each transaction to be as large as possible, but + * we'll go to smaller sizes if the hardware gives us a + * short reply. + */ + transfer_size = dp_aux_i2c_transfer_size; + for (j = 0; j < msgs[i].len; j += msg.size) { + msg.buffer = msgs[i].buf + j; + msg.size = min(transfer_size, msgs[i].len - j); + + err = drm_dp_i2c_drain_msg(aux, &msg); + + /* + * Reset msg.request in case in case it got + * changed into a WRITE_STATUS_UPDATE. + */ + drm_dp_i2c_msg_set_request(&msg, &msgs[i]); + + if (err < 0) + break; + transfer_size = err; + } + if (err < 0) + break; + } + if (err >= 0) + err = num; + /* Send a bare address packet to close out the transaction. + * Zero sized messages specify an address only (bare + * address) transaction. + */ + msg.request &= ~DP_AUX_I2C_MOT; + msg.buffer = NULL; + msg.size = 0; + (void)drm_dp_i2c_do_msg(aux, &msg); + + return err; +} + +static const struct i2c_algorithm drm_dp_i2c_algo = { + .functionality = drm_dp_i2c_functionality, + .master_xfer = drm_dp_i2c_xfer, +}; + +static struct drm_dp_aux *i2c_to_aux(struct i2c_adapter *i2c) +{ + return container_of(i2c, struct drm_dp_aux, ddc); +} + +static void lock_bus(struct i2c_adapter *i2c, unsigned int flags) +{ + mutex_lock(&i2c_to_aux(i2c)->hw_mutex); +} + +static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags) +{ + return mutex_trylock(&i2c_to_aux(i2c)->hw_mutex); +} + +static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags) +{ + mutex_unlock(&i2c_to_aux(i2c)->hw_mutex); +} + +static const struct i2c_lock_operations drm_dp_i2c_lock_ops = { + .lock_bus = lock_bus, + .trylock_bus = trylock_bus, + .unlock_bus = unlock_bus, +}; + +static int drm_dp_aux_get_crc(struct drm_dp_aux *aux, u8 *crc) +{ + u8 buf, count; + int ret; + + ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf); + if (ret < 0) + return ret; + + WARN_ON(!(buf & DP_TEST_SINK_START)); + + ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK_MISC, &buf); + if (ret < 0) + return ret; + + count = buf & DP_TEST_COUNT_MASK; + if (count == aux->crc_count) + return -EAGAIN; /* No CRC yet */ + + aux->crc_count = count; + + /* + * At DP_TEST_CRC_R_CR, there's 6 bytes containing CRC data, 2 bytes + * per component (RGB or CrYCb). + */ + ret = drm_dp_dpcd_read(aux, DP_TEST_CRC_R_CR, crc, 6); + if (ret < 0) + return ret; + + return 0; +} + +static void drm_dp_aux_crc_work(struct work_struct *work) +{ + struct drm_dp_aux *aux = container_of(work, struct drm_dp_aux, + crc_work); + struct drm_crtc *crtc; + u8 crc_bytes[6]; + uint32_t crcs[3]; + int ret; + + if (WARN_ON(!aux->crtc)) + return; + + crtc = aux->crtc; + while (crtc->crc.opened) { + drm_crtc_wait_one_vblank(crtc); + if (!crtc->crc.opened) + break; + + ret = drm_dp_aux_get_crc(aux, crc_bytes); + if (ret == -EAGAIN) { + usleep_range(1000, 2000); + ret = drm_dp_aux_get_crc(aux, crc_bytes); + } + + if (ret == -EAGAIN) { + DRM_DEBUG_KMS("Get CRC failed after retrying: %d\n", + ret); + continue; + } else if (ret) { + DRM_DEBUG_KMS("Failed to get a CRC: %d\n", ret); + continue; + } + + crcs[0] = crc_bytes[0] | crc_bytes[1] << 8; + crcs[1] = crc_bytes[2] | crc_bytes[3] << 8; + crcs[2] = crc_bytes[4] | crc_bytes[5] << 8; + drm_crtc_add_crc_entry(crtc, false, 0, crcs); + } +} + +/** + * drm_dp_aux_init() - minimally initialise an aux channel + * @aux: DisplayPort AUX channel + * + * If you need to use the drm_dp_aux's i2c adapter prior to registering it + * with the outside world, call drm_dp_aux_init() first. You must still + * call drm_dp_aux_register() once the connector has been registered to + * allow userspace access to the auxiliary DP channel. + */ +void drm_dp_aux_init(struct drm_dp_aux *aux) +{ + mutex_init(&aux->hw_mutex); + mutex_init(&aux->cec.lock); + INIT_WORK(&aux->crc_work, drm_dp_aux_crc_work); + + aux->ddc.algo = &drm_dp_i2c_algo; + aux->ddc.algo_data = aux; + aux->ddc.retries = 3; + + aux->ddc.lock_ops = &drm_dp_i2c_lock_ops; +} +EXPORT_SYMBOL(drm_dp_aux_init); + +/** + * drm_dp_aux_register() - initialise and register aux channel + * @aux: DisplayPort AUX channel + * + * Automatically calls drm_dp_aux_init() if this hasn't been done yet. + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_aux_register(struct drm_dp_aux *aux) +{ + int ret; + + if (!aux->ddc.algo) + drm_dp_aux_init(aux); + + aux->ddc.class = I2C_CLASS_DDC; + aux->ddc.owner = THIS_MODULE; + aux->ddc.dev.parent = aux->dev; + + strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev), + sizeof(aux->ddc.name)); + + ret = drm_dp_aux_register_devnode(aux); + if (ret) + return ret; + + ret = i2c_add_adapter(&aux->ddc); + if (ret) { + drm_dp_aux_unregister_devnode(aux); + return ret; + } + + return 0; +} +EXPORT_SYMBOL(drm_dp_aux_register); + +/** + * drm_dp_aux_unregister() - unregister an AUX adapter + * @aux: DisplayPort AUX channel + */ +void drm_dp_aux_unregister(struct drm_dp_aux *aux) +{ + drm_dp_aux_unregister_devnode(aux); + i2c_del_adapter(&aux->ddc); +} +EXPORT_SYMBOL(drm_dp_aux_unregister); + +#define PSR_SETUP_TIME(x) [DP_PSR_SETUP_TIME_ ## x >> DP_PSR_SETUP_TIME_SHIFT] = (x) + +/** + * drm_dp_psr_setup_time() - PSR setup in time usec + * @psr_cap: PSR capabilities from DPCD + * + * Returns: + * PSR setup time for the panel in microseconds, negative + * error code on failure. + */ +int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]) +{ + static const u16 psr_setup_time_us[] = { + PSR_SETUP_TIME(330), + PSR_SETUP_TIME(275), + PSR_SETUP_TIME(220), + PSR_SETUP_TIME(165), + PSR_SETUP_TIME(110), + PSR_SETUP_TIME(55), + PSR_SETUP_TIME(0), + }; + int i; + + i = (psr_cap[1] & DP_PSR_SETUP_TIME_MASK) >> DP_PSR_SETUP_TIME_SHIFT; + if (i >= ARRAY_SIZE(psr_setup_time_us)) + return -EINVAL; + + return psr_setup_time_us[i]; +} +EXPORT_SYMBOL(drm_dp_psr_setup_time); + +#undef PSR_SETUP_TIME + +/** + * drm_dp_start_crc() - start capture of frame CRCs + * @aux: DisplayPort AUX channel + * @crtc: CRTC displaying the frames whose CRCs are to be captured + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc) +{ + u8 buf; + int ret; + + ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf); + if (ret < 0) + return ret; + + ret = drm_dp_dpcd_writeb(aux, DP_TEST_SINK, buf | DP_TEST_SINK_START); + if (ret < 0) + return ret; + + aux->crc_count = 0; + aux->crtc = crtc; + schedule_work(&aux->crc_work); + + return 0; +} +EXPORT_SYMBOL(drm_dp_start_crc); + +/** + * drm_dp_stop_crc() - stop capture of frame CRCs + * @aux: DisplayPort AUX channel + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_stop_crc(struct drm_dp_aux *aux) +{ + u8 buf; + int ret; + + ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf); + if (ret < 0) + return ret; + + ret = drm_dp_dpcd_writeb(aux, DP_TEST_SINK, buf & ~DP_TEST_SINK_START); + if (ret < 0) + return ret; + + flush_work(&aux->crc_work); + aux->crtc = NULL; + + return 0; +} +EXPORT_SYMBOL(drm_dp_stop_crc); + +struct dpcd_quirk { + u8 oui[3]; + u8 device_id[6]; + bool is_branch; + u32 quirks; +}; + +#define OUI(first, second, third) { (first), (second), (third) } +#define DEVICE_ID(first, second, third, fourth, fifth, sixth) \ + { (first), (second), (third), (fourth), (fifth), (sixth) } + +#define DEVICE_ID_ANY DEVICE_ID(0, 0, 0, 0, 0, 0) + +static const struct dpcd_quirk dpcd_quirk_list[] = { + /* Analogix 7737 needs reduced M and N at HBR2 link rates */ + { OUI(0x00, 0x22, 0xb9), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_CONSTANT_N) }, + /* LG LP140WF6-SPM1 eDP panel */ + { OUI(0x00, 0x22, 0xb9), DEVICE_ID('s', 'i', 'v', 'a', 'r', 'T'), false, BIT(DP_DPCD_QUIRK_CONSTANT_N) }, + /* Apple panels need some additional handling to support PSR */ + { OUI(0x00, 0x10, 0xfa), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_NO_PSR) }, + /* CH7511 seems to leave SINK_COUNT zeroed */ + { OUI(0x00, 0x00, 0x00), DEVICE_ID('C', 'H', '7', '5', '1', '1'), false, BIT(DP_DPCD_QUIRK_NO_SINK_COUNT) }, +}; + +#undef OUI + +/* + * Get a bit mask of DPCD quirks for the sink/branch device identified by + * ident. The quirk data is shared but it's up to the drivers to act on the + * data. + * + * For now, only the OUI (first three bytes) is used, but this may be extended + * to device identification string and hardware/firmware revisions later. + */ +static u32 +drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch) +{ + const struct dpcd_quirk *quirk; + u32 quirks = 0; + int i; + u8 any_device[] = DEVICE_ID_ANY; + + for (i = 0; i < ARRAY_SIZE(dpcd_quirk_list); i++) { + quirk = &dpcd_quirk_list[i]; + + if (quirk->is_branch != is_branch) + continue; + + if (memcmp(quirk->oui, ident->oui, sizeof(ident->oui)) != 0) + continue; + + if (memcmp(quirk->device_id, any_device, sizeof(any_device)) != 0 && + memcmp(quirk->device_id, ident->device_id, sizeof(ident->device_id)) != 0) + continue; + + quirks |= quirk->quirks; + } + + return quirks; +} + +#undef DEVICE_ID_ANY +#undef DEVICE_ID + +/** + * drm_dp_read_desc - read sink/branch descriptor from DPCD + * @aux: DisplayPort AUX channel + * @desc: Device decriptor to fill from DPCD + * @is_branch: true for branch devices, false for sink devices + * + * Read DPCD 0x400 (sink) or 0x500 (branch) into @desc. Also debug log the + * identification. + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc, + bool is_branch) +{ + struct drm_dp_dpcd_ident *ident = &desc->ident; + unsigned int offset = is_branch ? DP_BRANCH_OUI : DP_SINK_OUI; + int ret, dev_id_len; + + ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident)); + if (ret < 0) + return ret; + + desc->quirks = drm_dp_get_quirks(ident, is_branch); + + dev_id_len = strnlen(ident->device_id, sizeof(ident->device_id)); + + DRM_DEBUG_KMS("DP %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n", + is_branch ? "branch" : "sink", + (int)sizeof(ident->oui), ident->oui, + dev_id_len, ident->device_id, + ident->hw_rev >> 4, ident->hw_rev & 0xf, + ident->sw_major_rev, ident->sw_minor_rev, + desc->quirks); + + return 0; +} +EXPORT_SYMBOL(drm_dp_read_desc); + +/** + * drm_dp_dsc_sink_max_slice_count() - Get the max slice count + * supported by the DSC sink. + * @dsc_dpcd: DSC capabilities from DPCD + * @is_edp: true if its eDP, false for DP + * + * Read the slice capabilities DPCD register from DSC sink to get + * the maximum slice count supported. This is used to populate + * the DSC parameters in the &struct drm_dsc_config by the driver. + * Driver creates an infoframe using these parameters to populate + * &struct drm_dsc_pps_infoframe. These are sent to the sink using DSC + * infoframe using the helper function drm_dsc_pps_infoframe_pack() + * + * Returns: + * Maximum slice count supported by DSC sink or 0 its invalid + */ +u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE], + bool is_edp) +{ + u8 slice_cap1 = dsc_dpcd[DP_DSC_SLICE_CAP_1 - DP_DSC_SUPPORT]; + + if (is_edp) { + /* For eDP, register DSC_SLICE_CAPABILITIES_1 gives slice count */ + if (slice_cap1 & DP_DSC_4_PER_DP_DSC_SINK) + return 4; + if (slice_cap1 & DP_DSC_2_PER_DP_DSC_SINK) + return 2; + if (slice_cap1 & DP_DSC_1_PER_DP_DSC_SINK) + return 1; + } else { + /* For DP, use values from DSC_SLICE_CAP_1 and DSC_SLICE_CAP2 */ + u8 slice_cap2 = dsc_dpcd[DP_DSC_SLICE_CAP_2 - DP_DSC_SUPPORT]; + + if (slice_cap2 & DP_DSC_24_PER_DP_DSC_SINK) + return 24; + if (slice_cap2 & DP_DSC_20_PER_DP_DSC_SINK) + return 20; + if (slice_cap2 & DP_DSC_16_PER_DP_DSC_SINK) + return 16; + if (slice_cap1 & DP_DSC_12_PER_DP_DSC_SINK) + return 12; + if (slice_cap1 & DP_DSC_10_PER_DP_DSC_SINK) + return 10; + if (slice_cap1 & DP_DSC_8_PER_DP_DSC_SINK) + return 8; + if (slice_cap1 & DP_DSC_6_PER_DP_DSC_SINK) + return 6; + if (slice_cap1 & DP_DSC_4_PER_DP_DSC_SINK) + return 4; + if (slice_cap1 & DP_DSC_2_PER_DP_DSC_SINK) + return 2; + if (slice_cap1 & DP_DSC_1_PER_DP_DSC_SINK) + return 1; + } + + return 0; +} +EXPORT_SYMBOL(drm_dp_dsc_sink_max_slice_count); + +/** + * drm_dp_dsc_sink_line_buf_depth() - Get the line buffer depth in bits + * @dsc_dpcd: DSC capabilities from DPCD + * + * Read the DSC DPCD register to parse the line buffer depth in bits which is + * number of bits of precision within the decoder line buffer supported by + * the DSC sink. This is used to populate the DSC parameters in the + * &struct drm_dsc_config by the driver. + * Driver creates an infoframe using these parameters to populate + * &struct drm_dsc_pps_infoframe. These are sent to the sink using DSC + * infoframe using the helper function drm_dsc_pps_infoframe_pack() + * + * Returns: + * Line buffer depth supported by DSC panel or 0 its invalid + */ +u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) +{ + u8 line_buf_depth = dsc_dpcd[DP_DSC_LINE_BUF_BIT_DEPTH - DP_DSC_SUPPORT]; + + switch (line_buf_depth & DP_DSC_LINE_BUF_BIT_DEPTH_MASK) { + case DP_DSC_LINE_BUF_BIT_DEPTH_9: + return 9; + case DP_DSC_LINE_BUF_BIT_DEPTH_10: + return 10; + case DP_DSC_LINE_BUF_BIT_DEPTH_11: + return 11; + case DP_DSC_LINE_BUF_BIT_DEPTH_12: + return 12; + case DP_DSC_LINE_BUF_BIT_DEPTH_13: + return 13; + case DP_DSC_LINE_BUF_BIT_DEPTH_14: + return 14; + case DP_DSC_LINE_BUF_BIT_DEPTH_15: + return 15; + case DP_DSC_LINE_BUF_BIT_DEPTH_16: + return 16; + case DP_DSC_LINE_BUF_BIT_DEPTH_8: + return 8; + } + + return 0; +} +EXPORT_SYMBOL(drm_dp_dsc_sink_line_buf_depth); + +/** + * drm_dp_dsc_sink_supported_input_bpcs() - Get all the input bits per component + * values supported by the DSC sink. + * @dsc_dpcd: DSC capabilities from DPCD + * @dsc_bpc: An array to be filled by this helper with supported + * input bpcs. + * + * Read the DSC DPCD from the sink device to parse the supported bits per + * component values. This is used to populate the DSC parameters + * in the &struct drm_dsc_config by the driver. + * Driver creates an infoframe using these parameters to populate + * &struct drm_dsc_pps_infoframe. These are sent to the sink using DSC + * infoframe using the helper function drm_dsc_pps_infoframe_pack() + * + * Returns: + * Number of input BPC values parsed from the DPCD + */ +int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE], + u8 dsc_bpc[3]) +{ + int num_bpc = 0; + u8 color_depth = dsc_dpcd[DP_DSC_DEC_COLOR_DEPTH_CAP - DP_DSC_SUPPORT]; + + if (color_depth & DP_DSC_12_BPC) + dsc_bpc[num_bpc++] = 12; + if (color_depth & DP_DSC_10_BPC) + dsc_bpc[num_bpc++] = 10; + if (color_depth & DP_DSC_8_BPC) + dsc_bpc[num_bpc++] = 8; + + return num_bpc; +} +EXPORT_SYMBOL(drm_dp_dsc_sink_supported_input_bpcs); Index: sys/dev/drm/core/drm_dp_mst_topology.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_dp_mst_topology.c @@ -0,0 +1,4140 @@ +/* + * Copyright © 2014 Red Hat + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "drm_crtc_helper_internal.h" + +/** + * DOC: dp mst helper + * + * These functions contain parts of the DisplayPort 1.2a MultiStream Transport + * protocol. The helpers contain a topology manager and bandwidth manager. + * The helpers encapsulate the sending and received of sideband msgs. + */ +static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, + char *buf); +static int test_calc_pbn_mode(void); + +static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port); + +static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, + int id, + struct drm_dp_payload *payload); + +static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int offset, int size, u8 *bytes); +static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int offset, int size, u8 *bytes); + +static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb); +static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb, + struct drm_dp_mst_port *port); +static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, + u8 *guid); + +static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux); +static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux); +static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr); + +#define DP_STR(x) [DP_ ## x] = #x + +static const char *drm_dp_mst_req_type_str(u8 req_type) +{ + static const char * const req_type_str[] = { + DP_STR(GET_MSG_TRANSACTION_VERSION), + DP_STR(LINK_ADDRESS), + DP_STR(CONNECTION_STATUS_NOTIFY), + DP_STR(ENUM_PATH_RESOURCES), + DP_STR(ALLOCATE_PAYLOAD), + DP_STR(QUERY_PAYLOAD), + DP_STR(RESOURCE_STATUS_NOTIFY), + DP_STR(CLEAR_PAYLOAD_ID_TABLE), + DP_STR(REMOTE_DPCD_READ), + DP_STR(REMOTE_DPCD_WRITE), + DP_STR(REMOTE_I2C_READ), + DP_STR(REMOTE_I2C_WRITE), + DP_STR(POWER_UP_PHY), + DP_STR(POWER_DOWN_PHY), + DP_STR(SINK_EVENT_NOTIFY), + DP_STR(QUERY_STREAM_ENC_STATUS), + }; + + if (req_type >= ARRAY_SIZE(req_type_str) || + !req_type_str[req_type]) + return "unknown"; + + return req_type_str[req_type]; +} + +#undef DP_STR +#define DP_STR(x) [DP_NAK_ ## x] = #x + +static const char *drm_dp_mst_nak_reason_str(u8 nak_reason) +{ + static const char * const nak_reason_str[] = { + DP_STR(WRITE_FAILURE), + DP_STR(INVALID_READ), + DP_STR(CRC_FAILURE), + DP_STR(BAD_PARAM), + DP_STR(DEFER), + DP_STR(LINK_FAILURE), + DP_STR(NO_RESOURCES), + DP_STR(DPCD_FAIL), + DP_STR(I2C_NAK), + DP_STR(ALLOCATE_FAIL), + }; + + if (nak_reason >= ARRAY_SIZE(nak_reason_str) || + !nak_reason_str[nak_reason]) + return "unknown"; + + return nak_reason_str[nak_reason]; +} + +#undef DP_STR + +/* sideband msg handling */ +static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles) +{ + u8 bitmask = 0x80; + u8 bitshift = 7; + u8 array_index = 0; + int number_of_bits = num_nibbles * 4; + u8 remainder = 0; + + while (number_of_bits != 0) { + number_of_bits--; + remainder <<= 1; + remainder |= (data[array_index] & bitmask) >> bitshift; + bitmask >>= 1; + bitshift--; + if (bitmask == 0) { + bitmask = 0x80; + bitshift = 7; + array_index++; + } + if ((remainder & 0x10) == 0x10) + remainder ^= 0x13; + } + + number_of_bits = 4; + while (number_of_bits != 0) { + number_of_bits--; + remainder <<= 1; + if ((remainder & 0x10) != 0) + remainder ^= 0x13; + } + + return remainder; +} + +static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes) +{ + u8 bitmask = 0x80; + u8 bitshift = 7; + u8 array_index = 0; + int number_of_bits = number_of_bytes * 8; + u16 remainder = 0; + + while (number_of_bits != 0) { + number_of_bits--; + remainder <<= 1; + remainder |= (data[array_index] & bitmask) >> bitshift; + bitmask >>= 1; + bitshift--; + if (bitmask == 0) { + bitmask = 0x80; + bitshift = 7; + array_index++; + } + if ((remainder & 0x100) == 0x100) + remainder ^= 0xd5; + } + + number_of_bits = 8; + while (number_of_bits != 0) { + number_of_bits--; + remainder <<= 1; + if ((remainder & 0x100) != 0) + remainder ^= 0xd5; + } + + return remainder & 0xff; +} +static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr) +{ + u8 size = 3; + size += (hdr->lct / 2); + return size; +} + +static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr, + u8 *buf, int *len) +{ + int idx = 0; + int i; + u8 crc4; + buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf); + for (i = 0; i < (hdr->lct / 2); i++) + buf[idx++] = hdr->rad[i]; + buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) | + (hdr->msg_len & 0x3f); + buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4); + + crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1); + buf[idx - 1] |= (crc4 & 0xf); + + *len = idx; +} + +static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr, + u8 *buf, int buflen, u8 *hdrlen) +{ + u8 crc4; + u8 len; + int i; + u8 idx; + if (buf[0] == 0) + return false; + len = 3; + len += ((buf[0] & 0xf0) >> 4) / 2; + if (len > buflen) + return false; + crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1); + + if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) { + DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]); + return false; + } + + hdr->lct = (buf[0] & 0xf0) >> 4; + hdr->lcr = (buf[0] & 0xf); + idx = 1; + for (i = 0; i < (hdr->lct / 2); i++) + hdr->rad[i] = buf[idx++]; + hdr->broadcast = (buf[idx] >> 7) & 0x1; + hdr->path_msg = (buf[idx] >> 6) & 0x1; + hdr->msg_len = buf[idx] & 0x3f; + idx++; + hdr->somt = (buf[idx] >> 7) & 0x1; + hdr->eomt = (buf[idx] >> 6) & 0x1; + hdr->seqno = (buf[idx] >> 4) & 0x1; + idx++; + *hdrlen = idx; + return true; +} + +static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req, + struct drm_dp_sideband_msg_tx *raw) +{ + int idx = 0; + int i; + u8 *buf = raw->msg; + buf[idx++] = req->req_type & 0x7f; + + switch (req->req_type) { + case DP_ENUM_PATH_RESOURCES: + buf[idx] = (req->u.port_num.port_number & 0xf) << 4; + idx++; + break; + case DP_ALLOCATE_PAYLOAD: + buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 | + (req->u.allocate_payload.number_sdp_streams & 0xf); + idx++; + buf[idx] = (req->u.allocate_payload.vcpi & 0x7f); + idx++; + buf[idx] = (req->u.allocate_payload.pbn >> 8); + idx++; + buf[idx] = (req->u.allocate_payload.pbn & 0xff); + idx++; + for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) { + buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) | + (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf); + idx++; + } + if (req->u.allocate_payload.number_sdp_streams & 1) { + i = req->u.allocate_payload.number_sdp_streams - 1; + buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4; + idx++; + } + break; + case DP_QUERY_PAYLOAD: + buf[idx] = (req->u.query_payload.port_number & 0xf) << 4; + idx++; + buf[idx] = (req->u.query_payload.vcpi & 0x7f); + idx++; + break; + case DP_REMOTE_DPCD_READ: + buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4; + buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf; + idx++; + buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8; + idx++; + buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff); + idx++; + buf[idx] = (req->u.dpcd_read.num_bytes); + idx++; + break; + + case DP_REMOTE_DPCD_WRITE: + buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4; + buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf; + idx++; + buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8; + idx++; + buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff); + idx++; + buf[idx] = (req->u.dpcd_write.num_bytes); + idx++; + memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes); + idx += req->u.dpcd_write.num_bytes; + break; + case DP_REMOTE_I2C_READ: + buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4; + buf[idx] |= (req->u.i2c_read.num_transactions & 0x3); + idx++; + for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) { + buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f; + idx++; + buf[idx] = req->u.i2c_read.transactions[i].num_bytes; + idx++; + memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes); + idx += req->u.i2c_read.transactions[i].num_bytes; + + buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5; + buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf); + idx++; + } + buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f; + idx++; + buf[idx] = (req->u.i2c_read.num_bytes_read); + idx++; + break; + + case DP_REMOTE_I2C_WRITE: + buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4; + idx++; + buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f; + idx++; + buf[idx] = (req->u.i2c_write.num_bytes); + idx++; + memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes); + idx += req->u.i2c_write.num_bytes; + break; + + case DP_POWER_DOWN_PHY: + case DP_POWER_UP_PHY: + buf[idx] = (req->u.port_num.port_number & 0xf) << 4; + idx++; + break; + } + raw->cur_len = idx; +} + +static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len) +{ + u8 crc4; + crc4 = drm_dp_msg_data_crc4(msg, len); + msg[len] = crc4; +} + +static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep, + struct drm_dp_sideband_msg_tx *raw) +{ + int idx = 0; + u8 *buf = raw->msg; + + buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f); + + raw->cur_len = idx; +} + +/* this adds a chunk of msg to the builder to get the final msg */ +static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg, + u8 *replybuf, u8 replybuflen, bool hdr) +{ + int ret; + u8 crc4; + + if (hdr) { + u8 hdrlen; + struct drm_dp_sideband_msg_hdr recv_hdr; + ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen); + if (ret == false) { + print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false); + return false; + } + + /* + * ignore out-of-order messages or messages that are part of a + * failed transaction + */ + if (!recv_hdr.somt && !msg->have_somt) + return false; + + /* get length contained in this portion */ + msg->curchunk_len = recv_hdr.msg_len; + msg->curchunk_hdrlen = hdrlen; + + /* we have already gotten an somt - don't bother parsing */ + if (recv_hdr.somt && msg->have_somt) + return false; + + if (recv_hdr.somt) { + memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr)); + msg->have_somt = true; + } + if (recv_hdr.eomt) + msg->have_eomt = true; + + /* copy the bytes for the remainder of this header chunk */ + msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen)); + memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx); + } else { + memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen); + msg->curchunk_idx += replybuflen; + } + + if (msg->curchunk_idx >= msg->curchunk_len) { + /* do CRC */ + crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1); + /* copy chunk into bigger msg */ + memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1); + msg->curlen += msg->curchunk_len - 1; + } + return true; +} + +static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *repmsg) +{ + int idx = 1; + int i; + memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16); + idx += 16; + repmsg->u.link_addr.nports = raw->msg[idx] & 0xf; + idx++; + if (idx > raw->curlen) + goto fail_len; + for (i = 0; i < repmsg->u.link_addr.nports; i++) { + if (raw->msg[idx] & 0x80) + repmsg->u.link_addr.ports[i].input_port = 1; + + repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7; + repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf); + + idx++; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1; + repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1; + if (repmsg->u.link_addr.ports[i].input_port == 0) + repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1; + idx++; + if (idx > raw->curlen) + goto fail_len; + if (repmsg->u.link_addr.ports[i].input_port == 0) { + repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]); + idx++; + if (idx > raw->curlen) + goto fail_len; + memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16); + idx += 16; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf; + repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf); + idx++; + + } + if (idx > raw->curlen) + goto fail_len; + } + + return true; +fail_len: + DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *repmsg) +{ + int idx = 1; + repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf; + idx++; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx]; + idx++; + if (idx > raw->curlen) + goto fail_len; + + memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes); + return true; +fail_len: + DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *repmsg) +{ + int idx = 1; + repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf; + idx++; + if (idx > raw->curlen) + goto fail_len; + return true; +fail_len: + DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *repmsg) +{ + int idx = 1; + + repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf); + idx++; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx]; + idx++; + /* TODO check */ + memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes); + return true; +fail_len: + DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *repmsg) +{ + int idx = 1; + repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf; + idx++; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]); + idx += 2; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]); + idx += 2; + if (idx > raw->curlen) + goto fail_len; + return true; +fail_len: + DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *repmsg) +{ + int idx = 1; + repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf; + idx++; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.allocate_payload.vcpi = raw->msg[idx]; + idx++; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]); + idx += 2; + if (idx > raw->curlen) + goto fail_len; + return true; +fail_len: + DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *repmsg) +{ + int idx = 1; + repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf; + idx++; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]); + idx += 2; + if (idx > raw->curlen) + goto fail_len; + return true; +fail_len: + DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *repmsg) +{ + int idx = 1; + + repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf; + idx++; + if (idx > raw->curlen) { + DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n", + idx, raw->curlen); + return false; + } + return true; +} + +static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *msg) +{ + memset(msg, 0, sizeof(*msg)); + msg->reply_type = (raw->msg[0] & 0x80) >> 7; + msg->req_type = (raw->msg[0] & 0x7f); + + if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) { + memcpy(msg->u.nak.guid, &raw->msg[1], 16); + msg->u.nak.reason = raw->msg[17]; + msg->u.nak.nak_data = raw->msg[18]; + return false; + } + + switch (msg->req_type) { + case DP_LINK_ADDRESS: + return drm_dp_sideband_parse_link_address(raw, msg); + case DP_QUERY_PAYLOAD: + return drm_dp_sideband_parse_query_payload_ack(raw, msg); + case DP_REMOTE_DPCD_READ: + return drm_dp_sideband_parse_remote_dpcd_read(raw, msg); + case DP_REMOTE_DPCD_WRITE: + return drm_dp_sideband_parse_remote_dpcd_write(raw, msg); + case DP_REMOTE_I2C_READ: + return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg); + case DP_ENUM_PATH_RESOURCES: + return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg); + case DP_ALLOCATE_PAYLOAD: + return drm_dp_sideband_parse_allocate_payload_ack(raw, msg); + case DP_POWER_DOWN_PHY: + case DP_POWER_UP_PHY: + return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg); + default: + DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type, + drm_dp_mst_req_type_str(msg->req_type)); + return false; + } +} + +static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_req_body *msg) +{ + int idx = 1; + + msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4; + idx++; + if (idx > raw->curlen) + goto fail_len; + + memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16); + idx += 16; + if (idx > raw->curlen) + goto fail_len; + + msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1; + msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1; + msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1; + msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1; + msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7); + idx++; + return true; +fail_len: + DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_req_body *msg) +{ + int idx = 1; + + msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4; + idx++; + if (idx > raw->curlen) + goto fail_len; + + memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16); + idx += 16; + if (idx > raw->curlen) + goto fail_len; + + msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]); + idx++; + return true; +fail_len: + DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_req_body *msg) +{ + memset(msg, 0, sizeof(*msg)); + msg->req_type = (raw->msg[0] & 0x7f); + + switch (msg->req_type) { + case DP_CONNECTION_STATUS_NOTIFY: + return drm_dp_sideband_parse_connection_status_notify(raw, msg); + case DP_RESOURCE_STATUS_NOTIFY: + return drm_dp_sideband_parse_resource_status_notify(raw, msg); + default: + DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type, + drm_dp_mst_req_type_str(msg->req_type)); + return false; + } +} + +static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes) +{ + struct drm_dp_sideband_msg_req_body req; + + req.req_type = DP_REMOTE_DPCD_WRITE; + req.u.dpcd_write.port_number = port_num; + req.u.dpcd_write.dpcd_address = offset; + req.u.dpcd_write.num_bytes = num_bytes; + req.u.dpcd_write.bytes = bytes; + drm_dp_encode_sideband_req(&req, msg); + + return 0; +} + +static int build_link_address(struct drm_dp_sideband_msg_tx *msg) +{ + struct drm_dp_sideband_msg_req_body req; + + req.req_type = DP_LINK_ADDRESS; + drm_dp_encode_sideband_req(&req, msg); + return 0; +} + +static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num) +{ + struct drm_dp_sideband_msg_req_body req; + + req.req_type = DP_ENUM_PATH_RESOURCES; + req.u.port_num.port_number = port_num; + drm_dp_encode_sideband_req(&req, msg); + msg->path_msg = true; + return 0; +} + +static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num, + u8 vcpi, uint16_t pbn, + u8 number_sdp_streams, + u8 *sdp_stream_sink) +{ + struct drm_dp_sideband_msg_req_body req; + memset(&req, 0, sizeof(req)); + req.req_type = DP_ALLOCATE_PAYLOAD; + req.u.allocate_payload.port_number = port_num; + req.u.allocate_payload.vcpi = vcpi; + req.u.allocate_payload.pbn = pbn; + req.u.allocate_payload.number_sdp_streams = number_sdp_streams; + memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink, + number_sdp_streams); + drm_dp_encode_sideband_req(&req, msg); + msg->path_msg = true; + return 0; +} + +static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg, + int port_num, bool power_up) +{ + struct drm_dp_sideband_msg_req_body req; + + if (power_up) + req.req_type = DP_POWER_UP_PHY; + else + req.req_type = DP_POWER_DOWN_PHY; + + req.u.port_num.port_number = port_num; + drm_dp_encode_sideband_req(&req, msg); + msg->path_msg = true; + return 0; +} + +static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_vcpi *vcpi) +{ + int ret, vcpi_ret; + + mutex_lock(&mgr->payload_lock); + ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1); + if (ret > mgr->max_payloads) { + ret = -EINVAL; + DRM_DEBUG_KMS("out of payload ids %d\n", ret); + goto out_unlock; + } + + vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1); + if (vcpi_ret > mgr->max_payloads) { + ret = -EINVAL; + DRM_DEBUG_KMS("out of vcpi ids %d\n", ret); + goto out_unlock; + } + + set_bit(ret, &mgr->payload_mask); + set_bit(vcpi_ret, &mgr->vcpi_mask); + vcpi->vcpi = vcpi_ret + 1; + mgr->proposed_vcpis[ret - 1] = vcpi; +out_unlock: + mutex_unlock(&mgr->payload_lock); + return ret; +} + +static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr, + int vcpi) +{ + int i; + if (vcpi == 0) + return; + + mutex_lock(&mgr->payload_lock); + DRM_DEBUG_KMS("putting payload %d\n", vcpi); + clear_bit(vcpi - 1, &mgr->vcpi_mask); + + for (i = 0; i < mgr->max_payloads; i++) { + if (mgr->proposed_vcpis[i]) + if (mgr->proposed_vcpis[i]->vcpi == vcpi) { + mgr->proposed_vcpis[i] = NULL; + clear_bit(i + 1, &mgr->payload_mask); + } + } + mutex_unlock(&mgr->payload_lock); +} + +static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_sideband_msg_tx *txmsg) +{ + unsigned int state; + + /* + * All updates to txmsg->state are protected by mgr->qlock, and the two + * cases we check here are terminal states. For those the barriers + * provided by the wake_up/wait_event pair are enough. + */ + state = READ_ONCE(txmsg->state); + return (state == DRM_DP_SIDEBAND_TX_RX || + state == DRM_DP_SIDEBAND_TX_TIMEOUT); +} + +static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, + struct drm_dp_sideband_msg_tx *txmsg) +{ + struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; + int ret; + + ret = wait_event_timeout(mgr->tx_waitq, + check_txmsg_state(mgr, txmsg), + (4 * HZ)); + mutex_lock(&mstb->mgr->qlock); + if (ret > 0) { + if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) { + ret = -EIO; + goto out; + } + } else { + DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno); + + /* dump some state */ + ret = -EIO; + + /* remove from q */ + if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED || + txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) { + list_del(&txmsg->next); + } + + if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND || + txmsg->state == DRM_DP_SIDEBAND_TX_SENT) { + mstb->tx_slots[txmsg->seqno] = NULL; + } + } +out: + mutex_unlock(&mgr->qlock); + + return ret; +} + +static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad) +{ + struct drm_dp_mst_branch *mstb; + + mstb = kzalloc(sizeof(*mstb), GFP_KERNEL); + if (!mstb) + return NULL; + + mstb->lct = lct; + if (lct > 1) + memcpy(mstb->rad, rad, lct / 2); + INIT_LIST_HEAD(&mstb->ports); + kref_init(&mstb->topology_kref); + kref_init(&mstb->malloc_kref); + return mstb; +} + +static void drm_dp_free_mst_branch_device(struct kref *kref) +{ + struct drm_dp_mst_branch *mstb = + container_of(kref, struct drm_dp_mst_branch, malloc_kref); + + if (mstb->port_parent) + drm_dp_mst_put_port_malloc(mstb->port_parent); + + kfree(mstb); +} + +/** + * DOC: Branch device and port refcounting + * + * Topology refcount overview + * ~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * The refcounting schemes for &struct drm_dp_mst_branch and &struct + * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have + * two different kinds of refcounts: topology refcounts, and malloc refcounts. + * + * Topology refcounts are not exposed to drivers, and are handled internally + * by the DP MST helpers. The helpers use them in order to prevent the + * in-memory topology state from being changed in the middle of critical + * operations like changing the internal state of payload allocations. This + * means each branch and port will be considered to be connected to the rest + * of the topology until its topology refcount reaches zero. Additionally, + * for ports this means that their associated &struct drm_connector will stay + * registered with userspace until the port's refcount reaches 0. + * + * Malloc refcount overview + * ~~~~~~~~~~~~~~~~~~~~~~~~ + * + * Malloc references are used to keep a &struct drm_dp_mst_port or &struct + * drm_dp_mst_branch allocated even after all of its topology references have + * been dropped, so that the driver or MST helpers can safely access each + * branch's last known state before it was disconnected from the topology. + * When the malloc refcount of a port or branch reaches 0, the memory + * allocation containing the &struct drm_dp_mst_branch or &struct + * drm_dp_mst_port respectively will be freed. + * + * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed + * to drivers. As of writing this documentation, there are no drivers that + * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST + * helpers. Exposing this API to drivers in a race-free manner would take more + * tweaking of the refcounting scheme, however patches are welcome provided + * there is a legitimate driver usecase for this. + * + * Refcount relationships in a topology + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * Let's take a look at why the relationship between topology and malloc + * refcounts is designed the way it is. + * + * .. kernel-figure:: dp-mst/topology-figure-1.dot + * + * An example of topology and malloc refs in a DP MST topology with two + * active payloads. Topology refcount increments are indicated by solid + * lines, and malloc refcount increments are indicated by dashed lines. + * Each starts from the branch which incremented the refcount, and ends at + * the branch to which the refcount belongs to, i.e. the arrow points the + * same way as the C pointers used to reference a structure. + * + * As you can see in the above figure, every branch increments the topology + * refcount of its children, and increments the malloc refcount of its + * parent. Additionally, every payload increments the malloc refcount of its + * assigned port by 1. + * + * So, what would happen if MSTB #3 from the above figure was unplugged from + * the system, but the driver hadn't yet removed payload #2 from port #3? The + * topology would start to look like the figure below. + * + * .. kernel-figure:: dp-mst/topology-figure-2.dot + * + * Ports and branch devices which have been released from memory are + * colored grey, and references which have been removed are colored red. + * + * Whenever a port or branch device's topology refcount reaches zero, it will + * decrement the topology refcounts of all its children, the malloc refcount + * of its parent, and finally its own malloc refcount. For MSTB #4 and port + * #4, this means they both have been disconnected from the topology and freed + * from memory. But, because payload #2 is still holding a reference to port + * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port + * is still accessible from memory. This also means port #3 has not yet + * decremented the malloc refcount of MSTB #3, so its &struct + * drm_dp_mst_branch will also stay allocated in memory until port #3's + * malloc refcount reaches 0. + * + * This relationship is necessary because in order to release payload #2, we + * need to be able to figure out the last relative of port #3 that's still + * connected to the topology. In this case, we would travel up the topology as + * shown below. + * + * .. kernel-figure:: dp-mst/topology-figure-3.dot + * + * And finally, remove payload #2 by communicating with port #2 through + * sideband transactions. + */ + +/** + * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch + * device + * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of + * + * Increments &drm_dp_mst_branch.malloc_kref. When + * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb + * will be released and @mstb may no longer be used. + * + * See also: drm_dp_mst_put_mstb_malloc() + */ +static void +drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb) +{ + kref_get(&mstb->malloc_kref); + DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref)); +} + +/** + * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch + * device + * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of + * + * Decrements &drm_dp_mst_branch.malloc_kref. When + * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb + * will be released and @mstb may no longer be used. + * + * See also: drm_dp_mst_get_mstb_malloc() + */ +static void +drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb) +{ + DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1); + kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device); +} + +static void drm_dp_free_mst_port(struct kref *kref) +{ + struct drm_dp_mst_port *port = + container_of(kref, struct drm_dp_mst_port, malloc_kref); + + drm_dp_mst_put_mstb_malloc(port->parent); + kfree(port); +} + +/** + * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port + * @port: The &struct drm_dp_mst_port to increment the malloc refcount of + * + * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref + * reaches 0, the memory allocation for @port will be released and @port may + * no longer be used. + * + * Because @port could potentially be freed at any time by the DP MST helpers + * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this + * function, drivers that which to make use of &struct drm_dp_mst_port should + * ensure that they grab at least one main malloc reference to their MST ports + * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before + * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0. + * + * See also: drm_dp_mst_put_port_malloc() + */ +void +drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port) +{ + kref_get(&port->malloc_kref); + DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref)); +} +EXPORT_SYMBOL(drm_dp_mst_get_port_malloc); + +/** + * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port + * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of + * + * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref + * reaches 0, the memory allocation for @port will be released and @port may + * no longer be used. + * + * See also: drm_dp_mst_get_port_malloc() + */ +void +drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port) +{ + DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1); + kref_put(&port->malloc_kref, drm_dp_free_mst_port); +} +EXPORT_SYMBOL(drm_dp_mst_put_port_malloc); + +static void drm_dp_destroy_mst_branch_device(struct kref *kref) +{ + struct drm_dp_mst_branch *mstb = + container_of(kref, struct drm_dp_mst_branch, topology_kref); + struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; + struct drm_dp_mst_port *port, *tmp; + bool wake_tx = false; + + mutex_lock(&mgr->lock); + list_for_each_entry_safe(port, tmp, &mstb->ports, next) { + list_del(&port->next); + drm_dp_mst_topology_put_port(port); + } + mutex_unlock(&mgr->lock); + + /* drop any tx slots msg */ + mutex_lock(&mstb->mgr->qlock); + if (mstb->tx_slots[0]) { + mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; + mstb->tx_slots[0] = NULL; + wake_tx = true; + } + if (mstb->tx_slots[1]) { + mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; + mstb->tx_slots[1] = NULL; + wake_tx = true; + } + mutex_unlock(&mstb->mgr->qlock); + + if (wake_tx) + wake_up_all(&mstb->mgr->tx_waitq); + + drm_dp_mst_put_mstb_malloc(mstb); +} + +/** + * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a + * branch device unless it's zero + * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of + * + * Attempts to grab a topology reference to @mstb, if it hasn't yet been + * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has + * reached 0). Holding a topology reference implies that a malloc reference + * will be held to @mstb as long as the user holds the topology reference. + * + * Care should be taken to ensure that the user has at least one malloc + * reference to @mstb. If you already have a topology reference to @mstb, you + * should use drm_dp_mst_topology_get_mstb() instead. + * + * See also: + * drm_dp_mst_topology_get_mstb() + * drm_dp_mst_topology_put_mstb() + * + * Returns: + * * 1: A topology reference was grabbed successfully + * * 0: @port is no longer in the topology, no reference was grabbed + */ +static int __must_check +drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb) +{ + int ret = kref_get_unless_zero(&mstb->topology_kref); + + if (ret) + DRM_DEBUG("mstb %p (%d)\n", mstb, + kref_read(&mstb->topology_kref)); + + return ret; +} + +/** + * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a + * branch device + * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of + * + * Increments &drm_dp_mst_branch.topology_refcount without checking whether or + * not it's already reached 0. This is only valid to use in scenarios where + * you are already guaranteed to have at least one active topology reference + * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used. + * + * See also: + * drm_dp_mst_topology_try_get_mstb() + * drm_dp_mst_topology_put_mstb() + */ +static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb) +{ + WARN_ON(kref_read(&mstb->topology_kref) == 0); + kref_get(&mstb->topology_kref); + DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref)); +} + +/** + * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch + * device + * @mstb: The &struct drm_dp_mst_branch to release the topology reference from + * + * Releases a topology reference from @mstb by decrementing + * &drm_dp_mst_branch.topology_kref. + * + * See also: + * drm_dp_mst_topology_try_get_mstb() + * drm_dp_mst_topology_get_mstb() + */ +static void +drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb) +{ + DRM_DEBUG("mstb %p (%d)\n", + mstb, kref_read(&mstb->topology_kref) - 1); + kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device); +} + +static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt) +{ + struct drm_dp_mst_branch *mstb; + + switch (old_pdt) { + case DP_PEER_DEVICE_DP_LEGACY_CONV: + case DP_PEER_DEVICE_SST_SINK: + /* remove i2c over sideband */ + drm_dp_mst_unregister_i2c_bus(&port->aux); + break; + case DP_PEER_DEVICE_MST_BRANCHING: + mstb = port->mstb; + port->mstb = NULL; + drm_dp_mst_topology_put_mstb(mstb); + break; + } +} + +static void drm_dp_destroy_port(struct kref *kref) +{ + struct drm_dp_mst_port *port = + container_of(kref, struct drm_dp_mst_port, topology_kref); + struct drm_dp_mst_topology_mgr *mgr = port->mgr; + + if (!port->input) { + kfree(port->cached_edid); + + /* + * The only time we don't have a connector + * on an output port is if the connector init + * fails. + */ + if (port->connector) { + /* we can't destroy the connector here, as + * we might be holding the mode_config.mutex + * from an EDID retrieval */ + + mutex_lock(&mgr->destroy_connector_lock); + list_add(&port->next, &mgr->destroy_connector_list); + mutex_unlock(&mgr->destroy_connector_lock); + schedule_work(&mgr->destroy_connector_work); + return; + } + /* no need to clean up vcpi + * as if we have no connector we never setup a vcpi */ + drm_dp_port_teardown_pdt(port, port->pdt); + port->pdt = DP_PEER_DEVICE_NONE; + } + drm_dp_mst_put_port_malloc(port); +} + +/** + * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a + * port unless it's zero + * @port: &struct drm_dp_mst_port to increment the topology refcount of + * + * Attempts to grab a topology reference to @port, if it hasn't yet been + * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached + * 0). Holding a topology reference implies that a malloc reference will be + * held to @port as long as the user holds the topology reference. + * + * Care should be taken to ensure that the user has at least one malloc + * reference to @port. If you already have a topology reference to @port, you + * should use drm_dp_mst_topology_get_port() instead. + * + * See also: + * drm_dp_mst_topology_get_port() + * drm_dp_mst_topology_put_port() + * + * Returns: + * * 1: A topology reference was grabbed successfully + * * 0: @port is no longer in the topology, no reference was grabbed + */ +static int __must_check +drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port) +{ + int ret = kref_get_unless_zero(&port->topology_kref); + + if (ret) + DRM_DEBUG("port %p (%d)\n", port, + kref_read(&port->topology_kref)); + + return ret; +} + +/** + * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port + * @port: The &struct drm_dp_mst_port to increment the topology refcount of + * + * Increments &drm_dp_mst_port.topology_refcount without checking whether or + * not it's already reached 0. This is only valid to use in scenarios where + * you are already guaranteed to have at least one active topology reference + * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used. + * + * See also: + * drm_dp_mst_topology_try_get_port() + * drm_dp_mst_topology_put_port() + */ +static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port) +{ + WARN_ON(kref_read(&port->topology_kref) == 0); + kref_get(&port->topology_kref); + DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref)); +} + +/** + * drm_dp_mst_topology_put_port() - release a topology reference to a port + * @port: The &struct drm_dp_mst_port to release the topology reference from + * + * Releases a topology reference from @port by decrementing + * &drm_dp_mst_port.topology_kref. + * + * See also: + * drm_dp_mst_topology_try_get_port() + * drm_dp_mst_topology_get_port() + */ +static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port) +{ + DRM_DEBUG("port %p (%d)\n", + port, kref_read(&port->topology_kref) - 1); + kref_put(&port->topology_kref, drm_dp_destroy_port); +} + +static struct drm_dp_mst_branch * +drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb, + struct drm_dp_mst_branch *to_find) +{ + struct drm_dp_mst_port *port; + struct drm_dp_mst_branch *rmstb; + + if (to_find == mstb) + return mstb; + + list_for_each_entry(port, &mstb->ports, next) { + if (port->mstb) { + rmstb = drm_dp_mst_topology_get_mstb_validated_locked( + port->mstb, to_find); + if (rmstb) + return rmstb; + } + } + return NULL; +} + +static struct drm_dp_mst_branch * +drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb) +{ + struct drm_dp_mst_branch *rmstb = NULL; + + mutex_lock(&mgr->lock); + if (mgr->mst_primary) { + rmstb = drm_dp_mst_topology_get_mstb_validated_locked( + mgr->mst_primary, mstb); + + if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb)) + rmstb = NULL; + } + mutex_unlock(&mgr->lock); + return rmstb; +} + +static struct drm_dp_mst_port * +drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb, + struct drm_dp_mst_port *to_find) +{ + struct drm_dp_mst_port *port, *mport; + + list_for_each_entry(port, &mstb->ports, next) { + if (port == to_find) + return port; + + if (port->mstb) { + mport = drm_dp_mst_topology_get_port_validated_locked( + port->mstb, to_find); + if (mport) + return mport; + } + } + return NULL; +} + +static struct drm_dp_mst_port * +drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port) +{ + struct drm_dp_mst_port *rport = NULL; + + mutex_lock(&mgr->lock); + if (mgr->mst_primary) { + rport = drm_dp_mst_topology_get_port_validated_locked( + mgr->mst_primary, port); + + if (rport && !drm_dp_mst_topology_try_get_port(rport)) + rport = NULL; + } + mutex_unlock(&mgr->lock); + return rport; +} + +static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num) +{ + struct drm_dp_mst_port *port; + int ret; + + list_for_each_entry(port, &mstb->ports, next) { + if (port->port_num == port_num) { + ret = drm_dp_mst_topology_try_get_port(port); + return ret ? port : NULL; + } + } + + return NULL; +} + +/* + * calculate a new RAD for this MST branch device + * if parent has an LCT of 2 then it has 1 nibble of RAD, + * if parent has an LCT of 3 then it has 2 nibbles of RAD, + */ +static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, + u8 *rad) +{ + int parent_lct = port->parent->lct; + int shift = 4; + int idx = (parent_lct - 1) / 2; + if (parent_lct > 1) { + memcpy(rad, port->parent->rad, idx + 1); + shift = (parent_lct % 2) ? 4 : 0; + } else + rad[0] = 0; + + rad[idx] |= port->port_num << shift; + return parent_lct + 1; +} + +/* + * return sends link address for new mstb + */ +static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port) +{ + int ret; + u8 rad[6], lct; + bool send_link = false; + switch (port->pdt) { + case DP_PEER_DEVICE_DP_LEGACY_CONV: + case DP_PEER_DEVICE_SST_SINK: + /* add i2c over sideband */ + ret = drm_dp_mst_register_i2c_bus(&port->aux); + break; + case DP_PEER_DEVICE_MST_BRANCHING: + lct = drm_dp_calculate_rad(port, rad); + + port->mstb = drm_dp_add_mst_branch_device(lct, rad); + if (port->mstb) { + port->mstb->mgr = port->mgr; + port->mstb->port_parent = port; + /* + * Make sure this port's memory allocation stays + * around until its child MSTB releases it + */ + drm_dp_mst_get_port_malloc(port); + + send_link = true; + } + break; + } + return send_link; +} + +/** + * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband + * @aux: Fake sideband AUX CH + * @offset: address of the (first) register to read + * @buffer: buffer to store the register values + * @size: number of bytes in @buffer + * + * Performs the same functionality for remote devices via + * sideband messaging as drm_dp_dpcd_read() does for local + * devices via actual AUX CH. + * + * Return: Number of bytes read, or negative error code on failure. + */ +ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux, + unsigned int offset, void *buffer, size_t size) +{ + struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, + aux); + + return drm_dp_send_dpcd_read(port->mgr, port, + offset, size, buffer); +} + +/** + * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband + * @aux: Fake sideband AUX CH + * @offset: address of the (first) register to write + * @buffer: buffer containing the values to write + * @size: number of bytes in @buffer + * + * Performs the same functionality for remote devices via + * sideband messaging as drm_dp_dpcd_write() does for local + * devices via actual AUX CH. + * + * Return: 0 on success, negative error code on failure. + */ +ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux, + unsigned int offset, void *buffer, size_t size) +{ + struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, + aux); + + return drm_dp_send_dpcd_write(port->mgr, port, + offset, size, buffer); +} + +static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid) +{ + int ret; + + memcpy(mstb->guid, guid, 16); + + if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) { + if (mstb->port_parent) { + ret = drm_dp_send_dpcd_write( + mstb->mgr, + mstb->port_parent, + DP_GUID, + 16, + mstb->guid); + } else { + + ret = drm_dp_dpcd_write( + mstb->mgr->aux, + DP_GUID, + mstb->guid, + 16); + } + } +} + +static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb, + int pnum, + char *proppath, + size_t proppath_size) +{ + int i; + char temp[8]; + snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id); + for (i = 0; i < (mstb->lct - 1); i++) { + int shift = (i % 2) ? 0 : 4; + int port_num = (mstb->rad[i / 2] >> shift) & 0xf; + snprintf(temp, sizeof(temp), "-%d", port_num); + strlcat(proppath, temp, proppath_size); + } + snprintf(temp, sizeof(temp), "-%d", pnum); + strlcat(proppath, temp, proppath_size); +} + +/** + * drm_dp_mst_connector_late_register() - Late MST connector registration + * @connector: The MST connector + * @port: The MST port for this connector + * + * Helper to register the remote aux device for this MST port. Drivers should + * call this from their mst connector's late_register hook to enable MST aux + * devices. + * + * Return: 0 on success, negative error code on failure. + */ +int drm_dp_mst_connector_late_register(struct drm_connector *connector, + struct drm_dp_mst_port *port) +{ + DRM_DEBUG_KMS("registering %s remote bus for %s\n", + port->aux.name, connector->kdev->kobj.name); + + port->aux.dev = connector->kdev; + return drm_dp_aux_register_devnode(&port->aux); +} +EXPORT_SYMBOL(drm_dp_mst_connector_late_register); + +/** + * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration + * @connector: The MST connector + * @port: The MST port for this connector + * + * Helper to unregister the remote aux device for this MST port, registered by + * drm_dp_mst_connector_late_register(). Drivers should call this from their mst + * connector's early_unregister hook. + */ +void drm_dp_mst_connector_early_unregister(struct drm_connector *connector, + struct drm_dp_mst_port *port) +{ + DRM_DEBUG_KMS("unregistering %s remote bus for %s\n", + port->aux.name, connector->kdev->kobj.name); + drm_dp_aux_unregister_devnode(&port->aux); +} +EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister); + +static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, + struct drm_device *dev, + struct drm_dp_link_addr_reply_port *port_msg) +{ + struct drm_dp_mst_port *port; + bool ret; + bool created = false; + int old_pdt = 0; + int old_ddps = 0; + + port = drm_dp_get_port(mstb, port_msg->port_number); + if (!port) { + port = kzalloc(sizeof(*port), GFP_KERNEL); + if (!port) + return; + kref_init(&port->topology_kref); + kref_init(&port->malloc_kref); + port->parent = mstb; + port->port_num = port_msg->port_number; + port->mgr = mstb->mgr; + port->aux.name = "DPMST"; + port->aux.dev = dev->dev; + port->aux.is_remote = true; + + /* + * Make sure the memory allocation for our parent branch stays + * around until our own memory allocation is released + */ + drm_dp_mst_get_mstb_malloc(mstb); + + created = true; + } else { + old_pdt = port->pdt; + old_ddps = port->ddps; + } + + port->pdt = port_msg->peer_device_type; + port->input = port_msg->input_port; + port->mcs = port_msg->mcs; + port->ddps = port_msg->ddps; + port->ldps = port_msg->legacy_device_plug_status; + port->dpcd_rev = port_msg->dpcd_revision; + port->num_sdp_streams = port_msg->num_sdp_streams; + port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks; + + /* manage mstb port lists with mgr lock - take a reference + for this list */ + if (created) { + mutex_lock(&mstb->mgr->lock); + drm_dp_mst_topology_get_port(port); + list_add(&port->next, &mstb->ports); + mutex_unlock(&mstb->mgr->lock); + } + + if (old_ddps != port->ddps) { + if (port->ddps) { + if (!port->input) { + drm_dp_send_enum_path_resources(mstb->mgr, + mstb, port); + } + } else { + port->available_pbn = 0; + } + } + + if (old_pdt != port->pdt && !port->input) { + drm_dp_port_teardown_pdt(port, old_pdt); + + ret = drm_dp_port_setup_pdt(port); + if (ret == true) + drm_dp_send_link_address(mstb->mgr, port->mstb); + } + + if (created && !port->input) { + char proppath[255]; + + build_mst_prop_path(mstb, port->port_num, proppath, + sizeof(proppath)); + port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, + port, + proppath); + if (!port->connector) { + /* remove it from the port list */ + mutex_lock(&mstb->mgr->lock); + list_del(&port->next); + mutex_unlock(&mstb->mgr->lock); + /* drop port list reference */ + drm_dp_mst_topology_put_port(port); + goto out; + } + if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || + port->pdt == DP_PEER_DEVICE_SST_SINK) && + port->port_num >= DP_MST_LOGICAL_PORT_0) { + port->cached_edid = drm_get_edid(port->connector, + &port->aux.ddc); + drm_connector_set_tile_property(port->connector); + } + (*mstb->mgr->cbs->register_connector)(port->connector); + } + +out: + /* put reference to this port */ + drm_dp_mst_topology_put_port(port); +} + +static void drm_dp_update_port(struct drm_dp_mst_branch *mstb, + struct drm_dp_connection_status_notify *conn_stat) +{ + struct drm_dp_mst_port *port; + int old_pdt; + int old_ddps; + bool dowork = false; + port = drm_dp_get_port(mstb, conn_stat->port_number); + if (!port) + return; + + old_ddps = port->ddps; + old_pdt = port->pdt; + port->pdt = conn_stat->peer_device_type; + port->mcs = conn_stat->message_capability_status; + port->ldps = conn_stat->legacy_device_plug_status; + port->ddps = conn_stat->displayport_device_plug_status; + + if (old_ddps != port->ddps) { + if (port->ddps) { + dowork = true; + } else { + port->available_pbn = 0; + } + } + if (old_pdt != port->pdt && !port->input) { + drm_dp_port_teardown_pdt(port, old_pdt); + + if (drm_dp_port_setup_pdt(port)) + dowork = true; + } + + drm_dp_mst_topology_put_port(port); + if (dowork) + queue_work(system_long_wq, &mstb->mgr->work); + +} + +static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr, + u8 lct, u8 *rad) +{ + struct drm_dp_mst_branch *mstb; + struct drm_dp_mst_port *port; + int i, ret; + /* find the port by iterating down */ + + mutex_lock(&mgr->lock); + mstb = mgr->mst_primary; + + if (!mstb) + goto out; + + for (i = 0; i < lct - 1; i++) { + int shift = (i % 2) ? 0 : 4; + int port_num = (rad[i / 2] >> shift) & 0xf; + + list_for_each_entry(port, &mstb->ports, next) { + if (port->port_num == port_num) { + mstb = port->mstb; + if (!mstb) { + DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]); + goto out; + } + + break; + } + } + } + ret = drm_dp_mst_topology_try_get_mstb(mstb); + if (!ret) + mstb = NULL; +out: + mutex_unlock(&mgr->lock); + return mstb; +} + +static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper( + struct drm_dp_mst_branch *mstb, + uint8_t *guid) +{ + struct drm_dp_mst_branch *found_mstb; + struct drm_dp_mst_port *port; + + if (memcmp(mstb->guid, guid, 16) == 0) + return mstb; + + + list_for_each_entry(port, &mstb->ports, next) { + if (!port->mstb) + continue; + + found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid); + + if (found_mstb) + return found_mstb; + } + + return NULL; +} + +static struct drm_dp_mst_branch * +drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr, + uint8_t *guid) +{ + struct drm_dp_mst_branch *mstb; + int ret; + + /* find the port by iterating down */ + mutex_lock(&mgr->lock); + + mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid); + if (mstb) { + ret = drm_dp_mst_topology_try_get_mstb(mstb); + if (!ret) + mstb = NULL; + } + + mutex_unlock(&mgr->lock); + return mstb; +} + +static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb) +{ + struct drm_dp_mst_port *port; + struct drm_dp_mst_branch *mstb_child; + if (!mstb->link_address_sent) + drm_dp_send_link_address(mgr, mstb); + + list_for_each_entry(port, &mstb->ports, next) { + if (port->input) + continue; + + if (!port->ddps) + continue; + + if (!port->available_pbn) + drm_dp_send_enum_path_resources(mgr, mstb, port); + + if (port->mstb) { + mstb_child = drm_dp_mst_topology_get_mstb_validated( + mgr, port->mstb); + if (mstb_child) { + drm_dp_check_and_send_link_address(mgr, mstb_child); + drm_dp_mst_topology_put_mstb(mstb_child); + } + } + } +} + +static void drm_dp_mst_link_probe_work(struct work_struct *work) +{ + struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work); + struct drm_dp_mst_branch *mstb; + int ret; + + mutex_lock(&mgr->lock); + mstb = mgr->mst_primary; + if (mstb) { + ret = drm_dp_mst_topology_try_get_mstb(mstb); + if (!ret) + mstb = NULL; + } + mutex_unlock(&mgr->lock); + if (mstb) { + drm_dp_check_and_send_link_address(mgr, mstb); + drm_dp_mst_topology_put_mstb(mstb); + } +} + +static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, + u8 *guid) +{ + u64 salt; + + if (memchr_inv(guid, 0, 16)) + return true; + + salt = get_jiffies_64(); + + memcpy(&guid[0], &salt, sizeof(u64)); + memcpy(&guid[8], &salt, sizeof(u64)); + + return false; +} + +static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes) +{ + struct drm_dp_sideband_msg_req_body req; + + req.req_type = DP_REMOTE_DPCD_READ; + req.u.dpcd_read.port_number = port_num; + req.u.dpcd_read.dpcd_address = offset; + req.u.dpcd_read.num_bytes = num_bytes; + drm_dp_encode_sideband_req(&req, msg); + + return 0; +} + +static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr, + bool up, u8 *msg, int len) +{ + int ret; + int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE; + int tosend, total, offset; + int retries = 0; + +retry: + total = len; + offset = 0; + do { + tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total); + + ret = drm_dp_dpcd_write(mgr->aux, regbase + offset, + &msg[offset], + tosend); + if (ret != tosend) { + if (ret == -EIO && retries < 5) { + retries++; + goto retry; + } + DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret); + + return -EIO; + } + offset += tosend; + total -= tosend; + } while (total > 0); + return 0; +} + +static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr, + struct drm_dp_sideband_msg_tx *txmsg) +{ + struct drm_dp_mst_branch *mstb = txmsg->dst; + u8 req_type; + + /* both msg slots are full */ + if (txmsg->seqno == -1) { + if (mstb->tx_slots[0] && mstb->tx_slots[1]) { + DRM_DEBUG_KMS("%s: failed to find slot\n", __func__); + return -EAGAIN; + } + if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) { + txmsg->seqno = mstb->last_seqno; + mstb->last_seqno ^= 1; + } else if (mstb->tx_slots[0] == NULL) + txmsg->seqno = 0; + else + txmsg->seqno = 1; + mstb->tx_slots[txmsg->seqno] = txmsg; + } + + req_type = txmsg->msg[0] & 0x7f; + if (req_type == DP_CONNECTION_STATUS_NOTIFY || + req_type == DP_RESOURCE_STATUS_NOTIFY) + hdr->broadcast = 1; + else + hdr->broadcast = 0; + hdr->path_msg = txmsg->path_msg; + hdr->lct = mstb->lct; + hdr->lcr = mstb->lct - 1; + if (mstb->lct > 1) + memcpy(hdr->rad, mstb->rad, mstb->lct / 2); + hdr->seqno = txmsg->seqno; + return 0; +} +/* + * process a single block of the next message in the sideband queue + */ +static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_sideband_msg_tx *txmsg, + bool up) +{ + u8 chunk[48]; + struct drm_dp_sideband_msg_hdr hdr; + int len, space, idx, tosend; + int ret; + + memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr)); + + if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) { + txmsg->seqno = -1; + txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND; + } + + /* make hdr from dst mst - for replies use seqno + otherwise assign one */ + ret = set_hdr_from_dst_qlock(&hdr, txmsg); + if (ret < 0) + return ret; + + /* amount left to send in this message */ + len = txmsg->cur_len - txmsg->cur_offset; + + /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */ + space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr); + + tosend = min(len, space); + if (len == txmsg->cur_len) + hdr.somt = 1; + if (space >= len) + hdr.eomt = 1; + + + hdr.msg_len = tosend + 1; + drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx); + memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend); + /* add crc at end */ + drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend); + idx += tosend + 1; + + ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx); + if (ret) { + DRM_DEBUG_KMS("sideband msg failed to send\n"); + return ret; + } + + txmsg->cur_offset += tosend; + if (txmsg->cur_offset == txmsg->cur_len) { + txmsg->state = DRM_DP_SIDEBAND_TX_SENT; + return 1; + } + return 0; +} + +static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) +{ + struct drm_dp_sideband_msg_tx *txmsg; + int ret; + + WARN_ON(!mutex_is_locked(&mgr->qlock)); + + /* construct a chunk from the first msg in the tx_msg queue */ + if (list_empty(&mgr->tx_msg_downq)) + return; + + txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next); + ret = process_single_tx_qlock(mgr, txmsg, false); + if (ret == 1) { + /* txmsg is sent it should be in the slots now */ + list_del(&txmsg->next); + } else if (ret) { + DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); + list_del(&txmsg->next); + if (txmsg->seqno != -1) + txmsg->dst->tx_slots[txmsg->seqno] = NULL; + txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT; + wake_up_all(&mgr->tx_waitq); + } +} + +/* called holding qlock */ +static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_sideband_msg_tx *txmsg) +{ + int ret; + + /* construct a chunk from the first msg in the tx_msg queue */ + ret = process_single_tx_qlock(mgr, txmsg, true); + + if (ret != 1) + DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); + + if (txmsg->seqno != -1) { + WARN_ON((unsigned int)txmsg->seqno > + ARRAY_SIZE(txmsg->dst->tx_slots)); + txmsg->dst->tx_slots[txmsg->seqno] = NULL; + } +} + +static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_sideband_msg_tx *txmsg) +{ + mutex_lock(&mgr->qlock); + list_add_tail(&txmsg->next, &mgr->tx_msg_downq); + if (list_is_singular(&mgr->tx_msg_downq)) + process_single_down_tx_qlock(mgr); + mutex_unlock(&mgr->qlock); +} + +static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb) +{ + int len; + struct drm_dp_sideband_msg_tx *txmsg; + int ret; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) + return; + + txmsg->dst = mstb; + len = build_link_address(txmsg); + + mstb->link_address_sent = true; + drm_dp_queue_down_tx(mgr, txmsg); + + ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); + if (ret > 0) { + int i; + + if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { + DRM_DEBUG_KMS("link address nak received\n"); + } else { + DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports); + for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { + DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i, + txmsg->reply.u.link_addr.ports[i].input_port, + txmsg->reply.u.link_addr.ports[i].peer_device_type, + txmsg->reply.u.link_addr.ports[i].port_number, + txmsg->reply.u.link_addr.ports[i].dpcd_revision, + txmsg->reply.u.link_addr.ports[i].mcs, + txmsg->reply.u.link_addr.ports[i].ddps, + txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status, + txmsg->reply.u.link_addr.ports[i].num_sdp_streams, + txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks); + } + + drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid); + + for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { + drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]); + } + drm_kms_helper_hotplug_event(mgr->dev); + } + } else { + mstb->link_address_sent = false; + DRM_DEBUG_KMS("link address failed %d\n", ret); + } + + kfree(txmsg); +} + +static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb, + struct drm_dp_mst_port *port) +{ + int len; + struct drm_dp_sideband_msg_tx *txmsg; + int ret; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) + return -ENOMEM; + + txmsg->dst = mstb; + len = build_enum_path_resources(txmsg, port->port_num); + + drm_dp_queue_down_tx(mgr, txmsg); + + ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); + if (ret > 0) { + if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { + DRM_DEBUG_KMS("enum path resources nak received\n"); + } else { + if (port->port_num != txmsg->reply.u.path_resources.port_number) + DRM_ERROR("got incorrect port in response\n"); + DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number, + txmsg->reply.u.path_resources.avail_payload_bw_number); + port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number; + } + } + + kfree(txmsg); + return 0; +} + +static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb) +{ + if (!mstb->port_parent) + return NULL; + + if (mstb->port_parent->mstb != mstb) + return mstb->port_parent; + + return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent); +} + +/* + * Searches upwards in the topology starting from mstb to try to find the + * closest available parent of mstb that's still connected to the rest of the + * topology. This can be used in order to perform operations like releasing + * payloads, where the branch device which owned the payload may no longer be + * around and thus would require that the payload on the last living relative + * be freed instead. + */ +static struct drm_dp_mst_branch * +drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb, + int *port_num) +{ + struct drm_dp_mst_branch *rmstb = NULL; + struct drm_dp_mst_port *found_port; + + mutex_lock(&mgr->lock); + if (!mgr->mst_primary) + goto out; + + do { + found_port = drm_dp_get_last_connected_port_to_mstb(mstb); + if (!found_port) + break; + + if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) { + rmstb = found_port->parent; + *port_num = found_port->port_num; + } else { + /* Search again, starting from this parent */ + mstb = found_port->parent; + } + } while (!rmstb); +out: + mutex_unlock(&mgr->lock); + return rmstb; +} + +static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int id, + int pbn) +{ + struct drm_dp_sideband_msg_tx *txmsg; + struct drm_dp_mst_branch *mstb; + int len, ret, port_num; + u8 sinks[DRM_DP_MAX_SDP_STREAMS]; + int i; + + port_num = port->port_num; + mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); + if (!mstb) { + mstb = drm_dp_get_last_connected_port_and_mstb(mgr, + port->parent, + &port_num); + + if (!mstb) + return -EINVAL; + } + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) { + ret = -ENOMEM; + goto fail_put; + } + + for (i = 0; i < port->num_sdp_streams; i++) + sinks[i] = i; + + txmsg->dst = mstb; + len = build_allocate_payload(txmsg, port_num, + id, + pbn, port->num_sdp_streams, sinks); + + drm_dp_queue_down_tx(mgr, txmsg); + + /* + * FIXME: there is a small chance that between getting the last + * connected mstb and sending the payload message, the last connected + * mstb could also be removed from the topology. In the future, this + * needs to be fixed by restarting the + * drm_dp_get_last_connected_port_and_mstb() search in the event of a + * timeout if the topology is still connected to the system. + */ + ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); + if (ret > 0) { + if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) + ret = -EINVAL; + else + ret = 0; + } + kfree(txmsg); +fail_put: + drm_dp_mst_topology_put_mstb(mstb); + return ret; +} + +int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, bool power_up) +{ + struct drm_dp_sideband_msg_tx *txmsg; + int len, ret; + + port = drm_dp_mst_topology_get_port_validated(mgr, port); + if (!port) + return -EINVAL; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) { + drm_dp_mst_topology_put_port(port); + return -ENOMEM; + } + + txmsg->dst = port->parent; + len = build_power_updown_phy(txmsg, port->port_num, power_up); + drm_dp_queue_down_tx(mgr, txmsg); + + ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg); + if (ret > 0) { + if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) + ret = -EINVAL; + else + ret = 0; + } + kfree(txmsg); + drm_dp_mst_topology_put_port(port); + + return ret; +} +EXPORT_SYMBOL(drm_dp_send_power_updown_phy); + +static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr, + int id, + struct drm_dp_payload *payload) +{ + int ret; + + ret = drm_dp_dpcd_write_payload(mgr, id, payload); + if (ret < 0) { + payload->payload_state = 0; + return ret; + } + payload->payload_state = DP_PAYLOAD_LOCAL; + return 0; +} + +static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int id, + struct drm_dp_payload *payload) +{ + int ret; + ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn); + if (ret < 0) + return ret; + payload->payload_state = DP_PAYLOAD_REMOTE; + return ret; +} + +static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int id, + struct drm_dp_payload *payload) +{ + DRM_DEBUG_KMS("\n"); + /* it's okay for these to fail */ + if (port) { + drm_dp_payload_send_msg(mgr, port, id, 0); + } + + drm_dp_dpcd_write_payload(mgr, id, payload); + payload->payload_state = DP_PAYLOAD_DELETE_LOCAL; + return 0; +} + +static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr, + int id, + struct drm_dp_payload *payload) +{ + payload->payload_state = 0; + return 0; +} + +/** + * drm_dp_update_payload_part1() - Execute payload update part 1 + * @mgr: manager to use. + * + * This iterates over all proposed virtual channels, and tries to + * allocate space in the link for them. For 0->slots transitions, + * this step just writes the VCPI to the MST device. For slots->0 + * transitions, this writes the updated VCPIs and removes the + * remote VC payloads. + * + * after calling this the driver should generate ACT and payload + * packets. + */ +int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) +{ + struct drm_dp_payload req_payload; + struct drm_dp_mst_port *port; + int i, j; + int cur_slots = 1; + + mutex_lock(&mgr->payload_lock); + for (i = 0; i < mgr->max_payloads; i++) { + struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i]; + struct drm_dp_payload *payload = &mgr->payloads[i]; + bool put_port = false; + + /* solve the current payloads - compare to the hw ones + - update the hw view */ + req_payload.start_slot = cur_slots; + if (vcpi) { + port = container_of(vcpi, struct drm_dp_mst_port, + vcpi); + + /* Validated ports don't matter if we're releasing + * VCPI + */ + if (vcpi->num_slots) { + port = drm_dp_mst_topology_get_port_validated( + mgr, port); + if (!port) { + mutex_unlock(&mgr->payload_lock); + return -EINVAL; + } + put_port = true; + } + + req_payload.num_slots = vcpi->num_slots; + req_payload.vcpi = vcpi->vcpi; + } else { + port = NULL; + req_payload.num_slots = 0; + } + + payload->start_slot = req_payload.start_slot; + /* work out what is required to happen with this payload */ + if (payload->num_slots != req_payload.num_slots) { + + /* need to push an update for this payload */ + if (req_payload.num_slots) { + drm_dp_create_payload_step1(mgr, vcpi->vcpi, + &req_payload); + payload->num_slots = req_payload.num_slots; + payload->vcpi = req_payload.vcpi; + + } else if (payload->num_slots) { + payload->num_slots = 0; + drm_dp_destroy_payload_step1(mgr, port, + payload->vcpi, + payload); + req_payload.payload_state = + payload->payload_state; + payload->start_slot = 0; + } + payload->payload_state = req_payload.payload_state; + } + cur_slots += req_payload.num_slots; + + if (put_port) + drm_dp_mst_topology_put_port(port); + } + + for (i = 0; i < mgr->max_payloads; i++) { + if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) + continue; + + DRM_DEBUG_KMS("removing payload %d\n", i); + for (j = i; j < mgr->max_payloads - 1; j++) { + mgr->payloads[j] = mgr->payloads[j + 1]; + mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1]; + + if (mgr->proposed_vcpis[j] && + mgr->proposed_vcpis[j]->num_slots) { + set_bit(j + 1, &mgr->payload_mask); + } else { + clear_bit(j + 1, &mgr->payload_mask); + } + } + + memset(&mgr->payloads[mgr->max_payloads - 1], 0, + sizeof(struct drm_dp_payload)); + mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL; + clear_bit(mgr->max_payloads, &mgr->payload_mask); + } + mutex_unlock(&mgr->payload_lock); + + return 0; +} +EXPORT_SYMBOL(drm_dp_update_payload_part1); + +/** + * drm_dp_update_payload_part2() - Execute payload update part 2 + * @mgr: manager to use. + * + * This iterates over all proposed virtual channels, and tries to + * allocate space in the link for them. For 0->slots transitions, + * this step writes the remote VC payload commands. For slots->0 + * this just resets some internal state. + */ +int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr) +{ + struct drm_dp_mst_port *port; + int i; + int ret = 0; + mutex_lock(&mgr->payload_lock); + for (i = 0; i < mgr->max_payloads; i++) { + + if (!mgr->proposed_vcpis[i]) + continue; + + port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); + + DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state); + if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) { + ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]); + } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) { + ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]); + } + if (ret) { + mutex_unlock(&mgr->payload_lock); + return ret; + } + } + mutex_unlock(&mgr->payload_lock); + return 0; +} +EXPORT_SYMBOL(drm_dp_update_payload_part2); + +static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int offset, int size, u8 *bytes) +{ + int len; + int ret = 0; + struct drm_dp_sideband_msg_tx *txmsg; + struct drm_dp_mst_branch *mstb; + + mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); + if (!mstb) + return -EINVAL; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) { + ret = -ENOMEM; + goto fail_put; + } + + len = build_dpcd_read(txmsg, port->port_num, offset, size); + txmsg->dst = port->parent; + + drm_dp_queue_down_tx(mgr, txmsg); + + ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); + if (ret < 0) + goto fail_free; + + /* DPCD read should never be NACKed */ + if (txmsg->reply.reply_type == 1) { + DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n", + mstb, port->port_num, offset, size); + ret = -EIO; + goto fail_free; + } + + if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) { + ret = -EPROTO; + goto fail_free; + } + + ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes, + size); + memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret); + +fail_free: + kfree(txmsg); +fail_put: + drm_dp_mst_topology_put_mstb(mstb); + + return ret; +} + +static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int offset, int size, u8 *bytes) +{ + int len; + int ret; + struct drm_dp_sideband_msg_tx *txmsg; + struct drm_dp_mst_branch *mstb; + + mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); + if (!mstb) + return -EINVAL; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) { + ret = -ENOMEM; + goto fail_put; + } + + len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes); + txmsg->dst = mstb; + + drm_dp_queue_down_tx(mgr, txmsg); + + ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); + if (ret > 0) { + if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) + ret = -EIO; + else + ret = 0; + } + kfree(txmsg); +fail_put: + drm_dp_mst_topology_put_mstb(mstb); + return ret; +} + +static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type) +{ + struct drm_dp_sideband_msg_reply_body reply; + + reply.reply_type = DP_SIDEBAND_REPLY_ACK; + reply.req_type = req_type; + drm_dp_encode_sideband_reply(&reply, msg); + return 0; +} + +static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb, + int req_type, int seqno, bool broadcast) +{ + struct drm_dp_sideband_msg_tx *txmsg; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) + return -ENOMEM; + + txmsg->dst = mstb; + txmsg->seqno = seqno; + drm_dp_encode_up_ack_reply(txmsg, req_type); + + mutex_lock(&mgr->qlock); + + process_single_up_tx_qlock(mgr, txmsg); + + mutex_unlock(&mgr->qlock); + + kfree(txmsg); + return 0; +} + +static bool drm_dp_get_vc_payload_bw(int dp_link_bw, + int dp_link_count, + int *out) +{ + switch (dp_link_bw) { + default: + DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n", + dp_link_bw, dp_link_count); + return false; + + case DP_LINK_BW_1_62: + *out = 3 * dp_link_count; + break; + case DP_LINK_BW_2_7: + *out = 5 * dp_link_count; + break; + case DP_LINK_BW_5_4: + *out = 10 * dp_link_count; + break; + case DP_LINK_BW_8_1: + *out = 15 * dp_link_count; + break; + } + return true; +} + +/** + * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager + * @mgr: manager to set state for + * @mst_state: true to enable MST on this connector - false to disable. + * + * This is called by the driver when it detects an MST capable device plugged + * into a DP MST capable port, or when a DP MST capable device is unplugged. + */ +int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state) +{ + int ret = 0; + struct drm_dp_mst_branch *mstb = NULL; + + mutex_lock(&mgr->lock); + if (mst_state == mgr->mst_state) + goto out_unlock; + + mgr->mst_state = mst_state; + /* set the device into MST mode */ + if (mst_state) { + WARN_ON(mgr->mst_primary); + + /* get dpcd info */ + ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); + if (ret != DP_RECEIVER_CAP_SIZE) { + DRM_DEBUG_KMS("failed to read DPCD\n"); + goto out_unlock; + } + + if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1], + mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK, + &mgr->pbn_div)) { + ret = -EINVAL; + goto out_unlock; + } + + /* add initial branch device at LCT 1 */ + mstb = drm_dp_add_mst_branch_device(1, NULL); + if (mstb == NULL) { + ret = -ENOMEM; + goto out_unlock; + } + mstb->mgr = mgr; + + /* give this the main reference */ + mgr->mst_primary = mstb; + drm_dp_mst_topology_get_mstb(mgr->mst_primary); + + ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); + if (ret < 0) { + goto out_unlock; + } + + { + struct drm_dp_payload reset_pay; + reset_pay.start_slot = 0; + reset_pay.num_slots = 0x3f; + drm_dp_dpcd_write_payload(mgr, 0, &reset_pay); + } + + queue_work(system_long_wq, &mgr->work); + + ret = 0; + } else { + /* disable MST on the device */ + mstb = mgr->mst_primary; + mgr->mst_primary = NULL; + /* this can fail if the device is gone */ + drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0); + ret = 0; + memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload)); + mgr->payload_mask = 0; + set_bit(0, &mgr->payload_mask); + mgr->vcpi_mask = 0; + } + +out_unlock: + mutex_unlock(&mgr->lock); + if (mstb) + drm_dp_mst_topology_put_mstb(mstb); + return ret; + +} +EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst); + +/** + * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager + * @mgr: manager to suspend + * + * This function tells the MST device that we can't handle UP messages + * anymore. This should stop it from sending any since we are suspended. + */ +void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) +{ + mutex_lock(&mgr->lock); + drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | DP_UPSTREAM_IS_SRC); + mutex_unlock(&mgr->lock); + flush_work(&mgr->work); + flush_work(&mgr->destroy_connector_work); +} +EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); + +/** + * drm_dp_mst_topology_mgr_resume() - resume the MST manager + * @mgr: manager to resume + * + * This will fetch DPCD and see if the device is still there, + * if it is, it will rewrite the MSTM control bits, and return. + * + * if the device fails this returns -1, and the driver should do + * a full MST reprobe, in case we were undocked. + */ +int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr) +{ + int ret = 0; + + mutex_lock(&mgr->lock); + + if (mgr->mst_primary) { + int sret; + u8 guid[16]; + + sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); + if (sret != DP_RECEIVER_CAP_SIZE) { + DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); + ret = -1; + goto out_unlock; + } + + ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); + if (ret < 0) { + DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n"); + ret = -1; + goto out_unlock; + } + + /* Some hubs forget their guids after they resume */ + sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); + if (sret != 16) { + DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); + ret = -1; + goto out_unlock; + } + drm_dp_check_mstb_guid(mgr->mst_primary, guid); + + ret = 0; + } else + ret = -1; + +out_unlock: + mutex_unlock(&mgr->lock); + return ret; +} +EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); + +static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) +{ + int len; + u8 replyblock[32]; + int replylen, origlen, curreply; + int ret; + struct drm_dp_sideband_msg_rx *msg; + int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE; + msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv; + + len = min(mgr->max_dpcd_transaction_bytes, 16); + ret = drm_dp_dpcd_read(mgr->aux, basereg, + replyblock, len); + if (ret != len) { + DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret); + return false; + } + ret = drm_dp_sideband_msg_build(msg, replyblock, len, true); + if (!ret) { + DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]); + return false; + } + replylen = msg->curchunk_len + msg->curchunk_hdrlen; + + origlen = replylen; + replylen -= len; + curreply = len; + while (replylen > 0) { + len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16); + ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, + replyblock, len); + if (ret != len) { + DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n", + len, ret); + return false; + } + + ret = drm_dp_sideband_msg_build(msg, replyblock, len, false); + if (!ret) { + DRM_DEBUG_KMS("failed to build sideband msg\n"); + return false; + } + + curreply += len; + replylen -= len; + } + return true; +} + +static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) +{ + int ret = 0; + + if (!drm_dp_get_one_sb_msg(mgr, false)) { + memset(&mgr->down_rep_recv, 0, + sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } + + if (mgr->down_rep_recv.have_eomt) { + struct drm_dp_sideband_msg_tx *txmsg; + struct drm_dp_mst_branch *mstb; + int slot = -1; + mstb = drm_dp_get_mst_branch_device(mgr, + mgr->down_rep_recv.initial_hdr.lct, + mgr->down_rep_recv.initial_hdr.rad); + + if (!mstb) { + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct); + memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } + + /* find the message */ + slot = mgr->down_rep_recv.initial_hdr.seqno; + mutex_lock(&mgr->qlock); + txmsg = mstb->tx_slots[slot]; + /* remove from slots */ + mutex_unlock(&mgr->qlock); + + if (!txmsg) { + DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n", + mstb, + mgr->down_rep_recv.initial_hdr.seqno, + mgr->down_rep_recv.initial_hdr.lct, + mgr->down_rep_recv.initial_hdr.rad[0], + mgr->down_rep_recv.msg[0]); + drm_dp_mst_topology_put_mstb(mstb); + memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } + + drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply); + + if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) + DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n", + txmsg->reply.req_type, + drm_dp_mst_req_type_str(txmsg->reply.req_type), + txmsg->reply.u.nak.reason, + drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason), + txmsg->reply.u.nak.nak_data); + + memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + drm_dp_mst_topology_put_mstb(mstb); + + mutex_lock(&mgr->qlock); + txmsg->state = DRM_DP_SIDEBAND_TX_RX; + mstb->tx_slots[slot] = NULL; + mutex_unlock(&mgr->qlock); + + wake_up_all(&mgr->tx_waitq); + } + return ret; +} + +static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) +{ + int ret = 0; + + if (!drm_dp_get_one_sb_msg(mgr, true)) { + memset(&mgr->up_req_recv, 0, + sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } + + if (mgr->up_req_recv.have_eomt) { + struct drm_dp_sideband_msg_req_body msg; + struct drm_dp_mst_branch *mstb = NULL; + bool seqno; + + if (!mgr->up_req_recv.initial_hdr.broadcast) { + mstb = drm_dp_get_mst_branch_device(mgr, + mgr->up_req_recv.initial_hdr.lct, + mgr->up_req_recv.initial_hdr.rad); + if (!mstb) { + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } + } + + seqno = mgr->up_req_recv.initial_hdr.seqno; + drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg); + + if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { + drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); + + if (!mstb) + mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid); + + if (!mstb) { + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } + + drm_dp_update_port(mstb, &msg.u.conn_stat); + + DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type); + drm_kms_helper_hotplug_event(mgr->dev); + + } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { + drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); + if (!mstb) + mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid); + + if (!mstb) { + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } + + DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); + } + + if (mstb) + drm_dp_mst_topology_put_mstb(mstb); + + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + } + return ret; +} + +/** + * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify + * @mgr: manager to notify irq for. + * @esi: 4 bytes from SINK_COUNT_ESI + * @handled: whether the hpd interrupt was consumed or not + * + * This should be called from the driver when it detects a short IRQ, + * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The + * topology manager will process the sideband messages received as a result + * of this. + */ +int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled) +{ + int ret = 0; + int sc; + *handled = false; + sc = esi[0] & 0x3f; + + if (sc != mgr->sink_count) { + mgr->sink_count = sc; + *handled = true; + } + + if (esi[1] & DP_DOWN_REP_MSG_RDY) { + ret = drm_dp_mst_handle_down_rep(mgr); + *handled = true; + } + + if (esi[1] & DP_UP_REQ_MSG_RDY) { + ret |= drm_dp_mst_handle_up_req(mgr); + *handled = true; + } + + drm_dp_mst_kick_tx(mgr); + return ret; +} +EXPORT_SYMBOL(drm_dp_mst_hpd_irq); + +/** + * drm_dp_mst_detect_port() - get connection status for an MST port + * @connector: DRM connector for this port + * @mgr: manager for this port + * @port: unverified pointer to a port + * + * This returns the current connection state for a port. It validates the + * port pointer still exists so the caller doesn't require a reference + */ +enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, + struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) +{ + enum drm_connector_status status = connector_status_disconnected; + + /* we need to search for the port in the mgr in case it's gone */ + port = drm_dp_mst_topology_get_port_validated(mgr, port); + if (!port) + return connector_status_disconnected; + + if (!port->ddps) + goto out; + + switch (port->pdt) { + case DP_PEER_DEVICE_NONE: + case DP_PEER_DEVICE_MST_BRANCHING: + break; + + case DP_PEER_DEVICE_SST_SINK: + status = connector_status_connected; + /* for logical ports - cache the EDID */ + if (port->port_num >= 8 && !port->cached_edid) { + port->cached_edid = drm_get_edid(connector, &port->aux.ddc); + } + break; + case DP_PEER_DEVICE_DP_LEGACY_CONV: + if (port->ldps) + status = connector_status_connected; + break; + } +out: + drm_dp_mst_topology_put_port(port); + return status; +} +EXPORT_SYMBOL(drm_dp_mst_detect_port); + +/** + * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not + * @mgr: manager for this port + * @port: unverified pointer to a port. + * + * This returns whether the port supports audio or not. + */ +bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port) +{ + bool ret = false; + + port = drm_dp_mst_topology_get_port_validated(mgr, port); + if (!port) + return ret; + ret = port->has_audio; + drm_dp_mst_topology_put_port(port); + return ret; +} +EXPORT_SYMBOL(drm_dp_mst_port_has_audio); + +/** + * drm_dp_mst_get_edid() - get EDID for an MST port + * @connector: toplevel connector to get EDID for + * @mgr: manager for this port + * @port: unverified pointer to a port. + * + * This returns an EDID for the port connected to a connector, + * It validates the pointer still exists so the caller doesn't require a + * reference. + */ +struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) +{ + struct edid *edid = NULL; + + /* we need to search for the port in the mgr in case it's gone */ + port = drm_dp_mst_topology_get_port_validated(mgr, port); + if (!port) + return NULL; + + if (port->cached_edid) + edid = drm_edid_duplicate(port->cached_edid); + else { + edid = drm_get_edid(connector, &port->aux.ddc); + } + port->has_audio = drm_detect_monitor_audio(edid); + drm_dp_mst_topology_put_port(port); + return edid; +} +EXPORT_SYMBOL(drm_dp_mst_get_edid); + +/** + * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value + * @mgr: manager to use + * @pbn: payload bandwidth to convert into slots. + * + * Calculate the number of VCPI slots that will be required for the given PBN + * value. This function is deprecated, and should not be used in atomic + * drivers. + * + * RETURNS: + * The total slots required for this port, or error. + */ +int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, + int pbn) +{ + int num_slots; + + num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); + + /* max. time slots - one slot for MTP header */ + if (num_slots > 63) + return -ENOSPC; + return num_slots; +} +EXPORT_SYMBOL(drm_dp_find_vcpi_slots); + +static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_vcpi *vcpi, int pbn, int slots) +{ + int ret; + + /* max. time slots - one slot for MTP header */ + if (slots > 63) + return -ENOSPC; + + vcpi->pbn = pbn; + vcpi->aligned_pbn = slots * mgr->pbn_div; + vcpi->num_slots = slots; + + ret = drm_dp_mst_assign_payload_id(mgr, vcpi); + if (ret < 0) + return ret; + return 0; +} + +/** + * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state + * @state: global atomic state + * @mgr: MST topology manager for the port + * @port: port to find vcpi slots for + * @pbn: bandwidth required for the mode in PBN + * + * Allocates VCPI slots to @port, replacing any previous VCPI allocations it + * may have had. Any atomic drivers which support MST must call this function + * in their &drm_encoder_helper_funcs.atomic_check() callback to change the + * current VCPI allocation for the new state, but only when + * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set + * to ensure compatibility with userspace applications that still use the + * legacy modesetting UAPI. + * + * Allocations set by this function are not checked against the bandwidth + * restraints of @mgr until the driver calls drm_dp_mst_atomic_check(). + * + * Additionally, it is OK to call this function multiple times on the same + * @port as needed. It is not OK however, to call this function and + * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase. + * + * See also: + * drm_dp_atomic_release_vcpi_slots() + * drm_dp_mst_atomic_check() + * + * Returns: + * Total slots in the atomic state assigned for this port, or a negative error + * code if the port no longer exists + */ +int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, int pbn) +{ + struct drm_dp_mst_topology_state *topology_state; + struct drm_dp_vcpi_allocation *pos, *vcpi = NULL; + int prev_slots, req_slots, ret; + + topology_state = drm_atomic_get_mst_topology_state(state, mgr); + if (IS_ERR(topology_state)) + return PTR_ERR(topology_state); + + /* Find the current allocation for this port, if any */ + list_for_each_entry(pos, &topology_state->vcpis, next) { + if (pos->port == port) { + vcpi = pos; + prev_slots = vcpi->vcpi; + + /* + * This should never happen, unless the driver tries + * releasing and allocating the same VCPI allocation, + * which is an error + */ + if (WARN_ON(!prev_slots)) { + DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n", + port); + return -EINVAL; + } + + break; + } + } + if (!vcpi) + prev_slots = 0; + + req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); + + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n", + port->connector->base.id, port->connector->name, + port, prev_slots, req_slots); + + /* Add the new allocation to the state */ + if (!vcpi) { + vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL); + if (!vcpi) + return -ENOMEM; + + drm_dp_mst_get_port_malloc(port); + vcpi->port = port; + list_add(&vcpi->next, &topology_state->vcpis); + } + vcpi->vcpi = req_slots; + + ret = req_slots; + return ret; +} +EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots); + +/** + * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots + * @state: global atomic state + * @mgr: MST topology manager for the port + * @port: The port to release the VCPI slots from + * + * Releases any VCPI slots that have been allocated to a port in the atomic + * state. Any atomic drivers which support MST must call this function in + * their &drm_connector_helper_funcs.atomic_check() callback when the + * connector will no longer have VCPI allocated (e.g. because its CRTC was + * removed) when it had VCPI allocated in the previous atomic state. + * + * It is OK to call this even if @port has been removed from the system. + * Additionally, it is OK to call this function multiple times on the same + * @port as needed. It is not OK however, to call this function and + * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check + * phase. + * + * See also: + * drm_dp_atomic_find_vcpi_slots() + * drm_dp_mst_atomic_check() + * + * Returns: + * 0 if all slots for this port were added back to + * &drm_dp_mst_topology_state.avail_slots or negative error code + */ +int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port) +{ + struct drm_dp_mst_topology_state *topology_state; + struct drm_dp_vcpi_allocation *pos; + bool found = false; + + topology_state = drm_atomic_get_mst_topology_state(state, mgr); + if (IS_ERR(topology_state)) + return PTR_ERR(topology_state); + + list_for_each_entry(pos, &topology_state->vcpis, next) { + if (pos->port == port) { + found = true; + break; + } + } + if (WARN_ON(!found)) { + DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n", + port, &topology_state->base); + return -EINVAL; + } + + DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi); + if (pos->vcpi) { + drm_dp_mst_put_port_malloc(port); + pos->vcpi = 0; + } + + return 0; +} +EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots); + +/** + * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel + * @mgr: manager for this port + * @port: port to allocate a virtual channel for. + * @pbn: payload bandwidth number to request + * @slots: returned number of slots for this PBN. + */ +bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, int pbn, int slots) +{ + int ret; + + port = drm_dp_mst_topology_get_port_validated(mgr, port); + if (!port) + return false; + + if (slots < 0) + return false; + + if (port->vcpi.vcpi > 0) { + DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", + port->vcpi.vcpi, port->vcpi.pbn, pbn); + if (pbn == port->vcpi.pbn) { + drm_dp_mst_topology_put_port(port); + return true; + } + } + + ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots); + if (ret) { + DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n", + DIV_ROUND_UP(pbn, mgr->pbn_div), ret); + goto out; + } + DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n", + pbn, port->vcpi.num_slots); + + /* Keep port allocated until its payload has been removed */ + drm_dp_mst_get_port_malloc(port); + drm_dp_mst_topology_put_port(port); + return true; +out: + return false; +} +EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi); + +int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) +{ + int slots = 0; + port = drm_dp_mst_topology_get_port_validated(mgr, port); + if (!port) + return slots; + + slots = port->vcpi.num_slots; + drm_dp_mst_topology_put_port(port); + return slots; +} +EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots); + +/** + * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI + * @mgr: manager for this port + * @port: unverified pointer to a port. + * + * This just resets the number of slots for the ports VCPI for later programming. + */ +void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) +{ + /* + * A port with VCPI will remain allocated until its VCPI is + * released, no verified ref needed + */ + + port->vcpi.num_slots = 0; +} +EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots); + +/** + * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI + * @mgr: manager for this port + * @port: port to deallocate vcpi for + * + * This can be called unconditionally, regardless of whether + * drm_dp_mst_allocate_vcpi() succeeded or not. + */ +void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port) +{ + if (!port->vcpi.vcpi) + return; + + drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); + port->vcpi.num_slots = 0; + port->vcpi.pbn = 0; + port->vcpi.aligned_pbn = 0; + port->vcpi.vcpi = 0; + drm_dp_mst_put_port_malloc(port); +} +EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi); + +static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, + int id, struct drm_dp_payload *payload) +{ + u8 payload_alloc[3], status; + int ret; + int retries = 0; + + drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, + DP_PAYLOAD_TABLE_UPDATED); + + payload_alloc[0] = id; + payload_alloc[1] = payload->start_slot; + payload_alloc[2] = payload->num_slots; + + ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3); + if (ret != 3) { + DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret); + goto fail; + } + +retry: + ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); + if (ret < 0) { + DRM_DEBUG_KMS("failed to read payload table status %d\n", ret); + goto fail; + } + + if (!(status & DP_PAYLOAD_TABLE_UPDATED)) { + retries++; + if (retries < 20) { + usleep_range(10000, 20000); + goto retry; + } + DRM_DEBUG_KMS("status not set after read payload table status %d\n", status); + ret = -EINVAL; + goto fail; + } + ret = 0; +fail: + return ret; +} + + +/** + * drm_dp_check_act_status() - Check ACT handled status. + * @mgr: manager to use + * + * Check the payload status bits in the DPCD for ACT handled completion. + */ +int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr) +{ + u8 status; + int ret; + int count = 0; + + do { + ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); + + if (ret < 0) { + DRM_DEBUG_KMS("failed to read payload table status %d\n", ret); + goto fail; + } + + if (status & DP_PAYLOAD_ACT_HANDLED) + break; + count++; + udelay(100); + + } while (count < 30); + + if (!(status & DP_PAYLOAD_ACT_HANDLED)) { + DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count); + ret = -EINVAL; + goto fail; + } + return 0; +fail: + return ret; +} +EXPORT_SYMBOL(drm_dp_check_act_status); + +/** + * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode. + * @clock: dot clock for the mode + * @bpp: bpp for the mode. + * + * This uses the formula in the spec to calculate the PBN value for a mode. + */ +int drm_dp_calc_pbn_mode(int clock, int bpp) +{ + u64 kbps; + s64 peak_kbps; + u32 numerator; + u32 denominator; + + kbps = clock * bpp; + + /* + * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006 + * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on + * common multiplier to render an integer PBN for all link rate/lane + * counts combinations + * calculate + * peak_kbps *= (1006/1000) + * peak_kbps *= (64/54) + * peak_kbps *= 8 convert to bytes + */ + + numerator = 64 * 1006; + denominator = 54 * 8 * 1000 * 1000; + + kbps *= numerator; + peak_kbps = drm_fixp_from_fraction(kbps, denominator); + + return drm_fixp2int_ceil(peak_kbps); +} +EXPORT_SYMBOL(drm_dp_calc_pbn_mode); + +static int test_calc_pbn_mode(void) +{ + int ret; + ret = drm_dp_calc_pbn_mode(154000, 30); + if (ret != 689) { + DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", + 154000, 30, 689, ret); + return -EINVAL; + } + ret = drm_dp_calc_pbn_mode(234000, 30); + if (ret != 1047) { + DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", + 234000, 30, 1047, ret); + return -EINVAL; + } + ret = drm_dp_calc_pbn_mode(297000, 24); + if (ret != 1063) { + DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", + 297000, 24, 1063, ret); + return -EINVAL; + } + return 0; +} + +/* we want to kick the TX after we've ack the up/down IRQs. */ +static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr) +{ + queue_work(system_long_wq, &mgr->tx_work); +} + +static void drm_dp_mst_dump_mstb(struct seq_file *m, + struct drm_dp_mst_branch *mstb) +{ + struct drm_dp_mst_port *port; + int tabs = mstb->lct; + char prefix[10]; + int i; + + for (i = 0; i < tabs; i++) + prefix[i] = '\t'; + prefix[i] = '\0'; + + seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports); + list_for_each_entry(port, &mstb->ports, next) { + seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector); + if (port->mstb) + drm_dp_mst_dump_mstb(m, port->mstb); + } +} + +#define DP_PAYLOAD_TABLE_SIZE 64 + +static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, + char *buf) +{ + int i; + + for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) { + if (drm_dp_dpcd_read(mgr->aux, + DP_PAYLOAD_TABLE_UPDATE_STATUS + i, + &buf[i], 16) != 16) + return false; + } + return true; +} + +static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, char *name, + int namelen) +{ + struct edid *mst_edid; + + mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port); + drm_edid_get_monitor_name(mst_edid, name, namelen); +} + +/** + * drm_dp_mst_dump_topology(): dump topology to seq file. + * @m: seq_file to dump output to + * @mgr: manager to dump current topology for. + * + * helper to dump MST topology to a seq file for debugfs. + */ +void drm_dp_mst_dump_topology(struct seq_file *m, + struct drm_dp_mst_topology_mgr *mgr) +{ + int i; + struct drm_dp_mst_port *port; + + mutex_lock(&mgr->lock); + if (mgr->mst_primary) + drm_dp_mst_dump_mstb(m, mgr->mst_primary); + + /* dump VCPIs */ + mutex_unlock(&mgr->lock); + + mutex_lock(&mgr->payload_lock); + seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask, + mgr->max_payloads); + + for (i = 0; i < mgr->max_payloads; i++) { + if (mgr->proposed_vcpis[i]) { + char name[14]; + + port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); + fetch_monitor_name(mgr, port, name, sizeof(name)); + seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i, + port->port_num, port->vcpi.vcpi, + port->vcpi.num_slots, + (*name != 0) ? name : "Unknown"); + } else + seq_printf(m, "vcpi %d:unused\n", i); + } + for (i = 0; i < mgr->max_payloads; i++) { + seq_printf(m, "payload %d: %d, %d, %d\n", + i, + mgr->payloads[i].payload_state, + mgr->payloads[i].start_slot, + mgr->payloads[i].num_slots); + + + } + mutex_unlock(&mgr->payload_lock); + + mutex_lock(&mgr->lock); + if (mgr->mst_primary) { + u8 buf[DP_PAYLOAD_TABLE_SIZE]; + int ret; + + ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE); + seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf); + ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2); + seq_printf(m, "faux/mst: %*ph\n", 2, buf); + ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1); + seq_printf(m, "mst ctrl: %*ph\n", 1, buf); + + /* dump the standard OUI branch header */ + ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE); + seq_printf(m, "branch oui: %*phN devid: ", 3, buf); + for (i = 0x3; i < 0x8 && buf[i]; i++) + seq_printf(m, "%c", buf[i]); + seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n", + buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]); + if (dump_dp_payload_table(mgr, buf)) + seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf); + } + + mutex_unlock(&mgr->lock); + +} +EXPORT_SYMBOL(drm_dp_mst_dump_topology); + +static void drm_dp_tx_work(struct work_struct *work) +{ + struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work); + + mutex_lock(&mgr->qlock); + if (!list_empty(&mgr->tx_msg_downq)) + process_single_down_tx_qlock(mgr); + mutex_unlock(&mgr->qlock); +} + +static void drm_dp_destroy_connector_work(struct work_struct *work) +{ + struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); + struct drm_dp_mst_port *port; + bool send_hotplug = false; + /* + * Not a regular list traverse as we have to drop the destroy + * connector lock before destroying the connector, to avoid AB->BA + * ordering between this lock and the config mutex. + */ + for (;;) { + mutex_lock(&mgr->destroy_connector_lock); + port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next); + if (!port) { + mutex_unlock(&mgr->destroy_connector_lock); + break; + } + list_del(&port->next); + mutex_unlock(&mgr->destroy_connector_lock); + + INIT_LIST_HEAD(&port->next); + + mgr->cbs->destroy_connector(mgr, port->connector); + + drm_dp_port_teardown_pdt(port, port->pdt); + port->pdt = DP_PEER_DEVICE_NONE; + + drm_dp_mst_put_port_malloc(port); + send_hotplug = true; + } + if (send_hotplug) + drm_kms_helper_hotplug_event(mgr->dev); +} + +static struct drm_private_state * +drm_dp_mst_duplicate_state(struct drm_private_obj *obj) +{ + struct drm_dp_mst_topology_state *state, *old_state = + to_dp_mst_topology_state(obj->state); + struct drm_dp_vcpi_allocation *pos, *vcpi; + + state = kmemdup(old_state, sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); + + INIT_LIST_HEAD(&state->vcpis); + + list_for_each_entry(pos, &old_state->vcpis, next) { + /* Prune leftover freed VCPI allocations */ + if (!pos->vcpi) + continue; + + vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL); + if (!vcpi) + goto fail; + + drm_dp_mst_get_port_malloc(vcpi->port); + list_add(&vcpi->next, &state->vcpis); + } + + return &state->base; + +fail: + list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) { + drm_dp_mst_put_port_malloc(pos->port); + kfree(pos); + } + kfree(state); + + return NULL; +} + +static void drm_dp_mst_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + struct drm_dp_mst_topology_state *mst_state = + to_dp_mst_topology_state(state); + struct drm_dp_vcpi_allocation *pos, *tmp; + + list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) { + /* We only keep references to ports with non-zero VCPIs */ + if (pos->vcpi) + drm_dp_mst_put_port_malloc(pos->port); + kfree(pos); + } + + kfree(mst_state); +} + +static inline int +drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state) +{ + struct drm_dp_vcpi_allocation *vcpi; + int avail_slots = 63, payload_count = 0; + + list_for_each_entry(vcpi, &mst_state->vcpis, next) { + /* Releasing VCPI is always OK-even if the port is gone */ + if (!vcpi->vcpi) { + DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n", + vcpi->port); + continue; + } + + DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n", + vcpi->port, vcpi->vcpi); + + avail_slots -= vcpi->vcpi; + if (avail_slots < 0) { + DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n", + vcpi->port, mst_state, + avail_slots + vcpi->vcpi); + return -ENOSPC; + } + + if (++payload_count > mgr->max_payloads) { + DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n", + mgr, mst_state, mgr->max_payloads); + return -EINVAL; + } + } + DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n", + mgr, mst_state, avail_slots, + 63 - avail_slots); + + return 0; +} + +/** + * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an + * atomic update is valid + * @state: Pointer to the new &struct drm_dp_mst_topology_state + * + * Checks the given topology state for an atomic update to ensure that it's + * valid. This includes checking whether there's enough bandwidth to support + * the new VCPI allocations in the atomic update. + * + * Any atomic drivers supporting DP MST must make sure to call this after + * checking the rest of their state in their + * &drm_mode_config_funcs.atomic_check() callback. + * + * See also: + * drm_dp_atomic_find_vcpi_slots() + * drm_dp_atomic_release_vcpi_slots() + * + * Returns: + * + * 0 if the new state is valid, negative error code otherwise. + */ +int drm_dp_mst_atomic_check(struct drm_atomic_state *state) +{ + struct drm_dp_mst_topology_mgr *mgr; + struct drm_dp_mst_topology_state *mst_state; + int i, ret = 0; + + for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { + ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state); + if (ret) + break; + } + + return ret; +} +EXPORT_SYMBOL(drm_dp_mst_atomic_check); + +const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = { + .atomic_duplicate_state = drm_dp_mst_duplicate_state, + .atomic_destroy_state = drm_dp_mst_destroy_state, +}; +EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs); + +/** + * drm_atomic_get_mst_topology_state: get MST topology state + * + * @state: global atomic state + * @mgr: MST topology manager, also the private object in this case + * + * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic + * state vtable so that the private object state returned is that of a MST + * topology object. Also, drm_atomic_get_private_obj_state() expects the caller + * to care of the locking, so warn if don't hold the connection_mutex. + * + * RETURNS: + * + * The MST topology state or error pointer. + */ +struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr) +{ + struct drm_device *dev = mgr->dev; + + WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base)); +} +EXPORT_SYMBOL(drm_atomic_get_mst_topology_state); + +/** + * drm_dp_mst_topology_mgr_init - initialise a topology manager + * @mgr: manager struct to initialise + * @dev: device providing this structure - for i2c addition. + * @aux: DP helper aux channel to talk to this device + * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit + * @max_payloads: maximum number of payloads this GPU can source + * @conn_base_id: the connector object ID the MST device is connected to. + * + * Return 0 for success, or negative error code on failure + */ +int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, + struct drm_device *dev, struct drm_dp_aux *aux, + int max_dpcd_transaction_bytes, + int max_payloads, int conn_base_id) +{ + struct drm_dp_mst_topology_state *mst_state; + + mutex_init(&mgr->lock); + mutex_init(&mgr->qlock); + mutex_init(&mgr->payload_lock); + mutex_init(&mgr->destroy_connector_lock); + INIT_LIST_HEAD(&mgr->tx_msg_downq); + INIT_LIST_HEAD(&mgr->destroy_connector_list); + INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); + INIT_WORK(&mgr->tx_work, drm_dp_tx_work); + INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work); + init_waitqueue_head(&mgr->tx_waitq); + mgr->dev = dev; + mgr->aux = aux; + mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes; + mgr->max_payloads = max_payloads; + mgr->conn_base_id = conn_base_id; + if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 || + max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8) + return -EINVAL; + mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL); + if (!mgr->payloads) + return -ENOMEM; + mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL); + if (!mgr->proposed_vcpis) + return -ENOMEM; + set_bit(0, &mgr->payload_mask); + if (test_calc_pbn_mode() < 0) + DRM_ERROR("MST PBN self-test failed\n"); + + mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL); + if (mst_state == NULL) + return -ENOMEM; + + mst_state->mgr = mgr; + INIT_LIST_HEAD(&mst_state->vcpis); + + drm_atomic_private_obj_init(dev, &mgr->base, + &mst_state->base, + &drm_dp_mst_topology_state_funcs); + + return 0; +} +EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); + +/** + * drm_dp_mst_topology_mgr_destroy() - destroy topology manager. + * @mgr: manager to destroy + */ +void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) +{ + drm_dp_mst_topology_mgr_set_mst(mgr, false); + flush_work(&mgr->work); + flush_work(&mgr->destroy_connector_work); + mutex_lock(&mgr->payload_lock); + kfree(mgr->payloads); + mgr->payloads = NULL; + kfree(mgr->proposed_vcpis); + mgr->proposed_vcpis = NULL; + mutex_unlock(&mgr->payload_lock); + mgr->dev = NULL; + mgr->aux = NULL; + drm_atomic_private_obj_fini(&mgr->base); + mgr->funcs = NULL; +} +EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy); + +static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num) +{ + int i; + + if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS) + return false; + + for (i = 0; i < num - 1; i++) { + if (msgs[i].flags & I2C_M_RD || + msgs[i].len > 0xff) + return false; + } + + return msgs[num - 1].flags & I2C_M_RD && + msgs[num - 1].len <= 0xff; +} + +/* I2C device */ +static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, + int num) +{ + struct drm_dp_aux *aux = adapter->algo_data; + struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux); + struct drm_dp_mst_branch *mstb; + struct drm_dp_mst_topology_mgr *mgr = port->mgr; + unsigned int i; + struct drm_dp_sideband_msg_req_body msg; + struct drm_dp_sideband_msg_tx *txmsg = NULL; + int ret; + + mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); + if (!mstb) + return -EREMOTEIO; + + if (!remote_i2c_read_ok(msgs, num)) { + DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n"); + ret = -EIO; + goto out; + } + + memset(&msg, 0, sizeof(msg)); + msg.req_type = DP_REMOTE_I2C_READ; + msg.u.i2c_read.num_transactions = num - 1; + msg.u.i2c_read.port_number = port->port_num; + for (i = 0; i < num - 1; i++) { + msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr; + msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len; + msg.u.i2c_read.transactions[i].bytes = msgs[i].buf; + msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP); + } + msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr; + msg.u.i2c_read.num_bytes_read = msgs[num - 1].len; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) { + ret = -ENOMEM; + goto out; + } + + txmsg->dst = mstb; + drm_dp_encode_sideband_req(&msg, txmsg); + + drm_dp_queue_down_tx(mgr, txmsg); + + ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); + if (ret > 0) { + + if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { + ret = -EREMOTEIO; + goto out; + } + if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) { + ret = -EIO; + goto out; + } + memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len); + ret = num; + } +out: + kfree(txmsg); + drm_dp_mst_topology_put_mstb(mstb); + return ret; +} + +static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | + I2C_FUNC_SMBUS_READ_BLOCK_DATA | + I2C_FUNC_SMBUS_BLOCK_PROC_CALL | + I2C_FUNC_10BIT_ADDR; +} + +static const struct i2c_algorithm drm_dp_mst_i2c_algo = { + .functionality = drm_dp_mst_i2c_functionality, + .master_xfer = drm_dp_mst_i2c_xfer, +}; + +/** + * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX + * @aux: DisplayPort AUX channel + * + * Returns 0 on success or a negative error code on failure. + */ +static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux) +{ + aux->ddc.algo = &drm_dp_mst_i2c_algo; + aux->ddc.algo_data = aux; + aux->ddc.retries = 3; + + aux->ddc.class = I2C_CLASS_DDC; + aux->ddc.owner = THIS_MODULE; + aux->ddc.dev.parent = aux->dev; + aux->ddc.dev.of_node = aux->dev->of_node; + + strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev), + sizeof(aux->ddc.name)); + + return i2c_add_adapter(&aux->ddc); +} + +/** + * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter + * @aux: DisplayPort AUX channel + */ +static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux) +{ + i2c_del_adapter(&aux->ddc); +} Index: sys/dev/drm/core/drm_drv.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_drv.c @@ -0,0 +1,1201 @@ +/* + * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org + * + * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Author Rickard E. (Rik) Faith + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "drm_crtc_internal.h" +#include "drm_internal.h" +#include "drm_legacy.h" + +/* + * drm_debug: Enable debug output. + * Bitmask of DRM_UT_x. See include/drm/drm_print.h for details. + */ +unsigned int drm_debug = 0; +EXPORT_SYMBOL(drm_debug); + +MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"); +MODULE_DESCRIPTION("DRM shared core routines"); +MODULE_LICENSE("GPL and additional rights"); +MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n" +"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n" +"\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n" +"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n" +"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n" +"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n" +"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n" +"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)\n" +"\t\tBit 8 (0x100) will enable DP messages (displayport code)"); +module_param_named(debug, drm_debug, int, 0600); + +static DEFINE_SPINLOCK(drm_minor_lock); +static struct idr drm_minors_idr; + +/* + * If the drm core fails to init for whatever reason, + * we should prevent any drivers from registering with it. + * It's best to check this at drm_dev_init(), as some drivers + * prefer to embed struct drm_device into their own device + * structure and call drm_dev_init() themselves. + */ +static bool drm_core_init_complete = false; + +static struct dentry *drm_debugfs_root; + +DEFINE_STATIC_SRCU(drm_unplug_srcu); + +/* + * DRM Minors + * A DRM device can provide several char-dev interfaces on the DRM-Major. Each + * of them is represented by a drm_minor object. Depending on the capabilities + * of the device-driver, different interfaces are registered. + * + * Minors can be accessed via dev->$minor_name. This pointer is either + * NULL or a valid drm_minor pointer and stays valid as long as the device is + * valid. This means, DRM minors have the same life-time as the underlying + * device. However, this doesn't mean that the minor is active. Minors are + * registered and unregistered dynamically according to device-state. + */ + +static struct drm_minor **drm_minor_get_slot(struct drm_device *dev, + unsigned int type) +{ + switch (type) { + case DRM_MINOR_PRIMARY: + return &dev->primary; + case DRM_MINOR_RENDER: + return &dev->render; + default: + BUG(); + } +} + +static int drm_minor_alloc(struct drm_device *dev, unsigned int type) +{ + struct drm_minor *minor; + unsigned long flags; + int r; + + minor = kzalloc(sizeof(*minor), GFP_KERNEL); + if (!minor) + return -ENOMEM; + + minor->type = type; + minor->dev = dev; + + idr_preload(GFP_KERNEL); + spin_lock_irqsave(&drm_minor_lock, flags); + r = idr_alloc(&drm_minors_idr, + NULL, + 64 * type, + 64 * (type + 1), + GFP_NOWAIT); + spin_unlock_irqrestore(&drm_minor_lock, flags); + idr_preload_end(); + + if (r < 0) + goto err_free; + + minor->index = r; + + minor->kdev = drm_sysfs_minor_alloc(minor); + if (IS_ERR(minor->kdev)) { + r = PTR_ERR(minor->kdev); + goto err_index; + } + + *drm_minor_get_slot(dev, type) = minor; + return 0; + +err_index: + spin_lock_irqsave(&drm_minor_lock, flags); + idr_remove(&drm_minors_idr, minor->index); + spin_unlock_irqrestore(&drm_minor_lock, flags); +err_free: + kfree(minor); + return r; +} + +static void drm_minor_free(struct drm_device *dev, unsigned int type) +{ + struct drm_minor **slot, *minor; + unsigned long flags; + + slot = drm_minor_get_slot(dev, type); + minor = *slot; + if (!minor) + return; + + put_device(minor->kdev); + + spin_lock_irqsave(&drm_minor_lock, flags); + idr_remove(&drm_minors_idr, minor->index); + spin_unlock_irqrestore(&drm_minor_lock, flags); + + kfree(minor); + *slot = NULL; +} + +static int drm_minor_register(struct drm_device *dev, unsigned int type) +{ + struct drm_minor *minor; + unsigned long flags; + int ret; + + DRM_DEBUG("\n"); + + minor = *drm_minor_get_slot(dev, type); + if (!minor) + return 0; + + ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root); + if (ret) { + DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n"); + goto err_debugfs; + } + +#ifdef __linux__ + ret = device_add(minor->kdev); +#elif defined(__FreeBSD__) + ret = drm_fbsd_cdev_create(minor); +#endif + if (ret) + goto err_debugfs; + + /* replace NULL with @minor so lookups will succeed from now on */ + spin_lock_irqsave(&drm_minor_lock, flags); + idr_replace(&drm_minors_idr, minor, minor->index); + spin_unlock_irqrestore(&drm_minor_lock, flags); + + DRM_DEBUG("new minor registered %d\n", minor->index); + return 0; + +err_debugfs: + drm_debugfs_cleanup(minor); + return ret; +} + +static void drm_minor_unregister(struct drm_device *dev, unsigned int type) +{ + struct drm_minor *minor; + unsigned long flags; + + minor = *drm_minor_get_slot(dev, type); + if (!minor || !device_is_registered(minor->kdev)) + return; + + /* replace @minor with NULL so lookups will fail from now on */ + spin_lock_irqsave(&drm_minor_lock, flags); + idr_replace(&drm_minors_idr, NULL, minor->index); + spin_unlock_irqrestore(&drm_minor_lock, flags); + +#ifdef __linux__ + device_del(minor->kdev); + dev_set_drvdata(minor->kdev, NULL); /* safety belt */ +#elif defined(__FreeBSD__) + drm_fbsd_cdev_delete(minor); +#endif + drm_debugfs_cleanup(minor); +} + +/* + * Looks up the given minor-ID and returns the respective DRM-minor object. The + * refence-count of the underlying device is increased so you must release this + * object with drm_minor_release(). + * + * As long as you hold this minor, it is guaranteed that the object and the + * minor->dev pointer will stay valid! However, the device may get unplugged and + * unregistered while you hold the minor. + */ +struct drm_minor *drm_minor_acquire(unsigned int minor_id) +{ + struct drm_minor *minor; + unsigned long flags; + + spin_lock_irqsave(&drm_minor_lock, flags); + minor = idr_find(&drm_minors_idr, minor_id); + if (minor) + drm_dev_get(minor->dev); + spin_unlock_irqrestore(&drm_minor_lock, flags); + + if (!minor) { + return ERR_PTR(-ENODEV); + } else if (drm_dev_is_unplugged(minor->dev)) { + drm_dev_put(minor->dev); + return ERR_PTR(-ENODEV); + } + + return minor; +} + +void drm_minor_release(struct drm_minor *minor) +{ + drm_dev_put(minor->dev); +} + +/** + * DOC: driver instance overview + * + * A device instance for a drm driver is represented by &struct drm_device. This + * is initialized with drm_dev_init(), usually from bus-specific ->probe() + * callbacks implemented by the driver. The driver then needs to initialize all + * the various subsystems for the drm device like memory management, vblank + * handling, modesetting support and intial output configuration plus obviously + * initialize all the corresponding hardware bits. Finally when everything is up + * and running and ready for userspace the device instance can be published + * using drm_dev_register(). + * + * There is also deprecated support for initalizing device instances using + * bus-specific helpers and the &drm_driver.load callback. But due to + * backwards-compatibility needs the device instance have to be published too + * early, which requires unpretty global locking to make safe and is therefore + * only support for existing drivers not yet converted to the new scheme. + * + * When cleaning up a device instance everything needs to be done in reverse: + * First unpublish the device instance with drm_dev_unregister(). Then clean up + * any other resources allocated at device initialization and drop the driver's + * reference to &drm_device using drm_dev_put(). + * + * Note that the lifetime rules for &drm_device instance has still a lot of + * historical baggage. Hence use the reference counting provided by + * drm_dev_get() and drm_dev_put() only carefully. + * + * Display driver example + * ~~~~~~~~~~~~~~~~~~~~~~ + * + * The following example shows a typical structure of a DRM display driver. + * The example focus on the probe() function and the other functions that is + * almost always present and serves as a demonstration of devm_drm_dev_init() + * usage with its accompanying drm_driver->release callback. + * + * .. code-block:: c + * + * struct driver_device { + * struct drm_device drm; + * void *userspace_facing; + * struct clk *pclk; + * }; + * + * static void driver_drm_release(struct drm_device *drm) + * { + * struct driver_device *priv = container_of(...); + * + * drm_mode_config_cleanup(drm); + * drm_dev_fini(drm); + * kfree(priv->userspace_facing); + * kfree(priv); + * } + * + * static struct drm_driver driver_drm_driver = { + * [...] + * .release = driver_drm_release, + * }; + * + * static int driver_probe(struct platform_device *pdev) + * { + * struct driver_device *priv; + * struct drm_device *drm; + * int ret; + * + * // devm_kzalloc() can't be used here because the drm_device ' + * // lifetime can exceed the device lifetime if driver unbind + * // happens when userspace still has open file descriptors. + * priv = kzalloc(sizeof(*priv), GFP_KERNEL); + * if (!priv) + * return -ENOMEM; + * + * drm = &priv->drm; + * + * ret = devm_drm_dev_init(&pdev->dev, drm, &driver_drm_driver); + * if (ret) { + * kfree(drm); + * return ret; + * } + * + * drm_mode_config_init(drm); + * + * priv->userspace_facing = kzalloc(..., GFP_KERNEL); + * if (!priv->userspace_facing) + * return -ENOMEM; + * + * priv->pclk = devm_clk_get(dev, "PCLK"); + * if (IS_ERR(priv->pclk)) + * return PTR_ERR(priv->pclk); + * + * // Further setup, display pipeline etc + * + * platform_set_drvdata(pdev, drm); + * + * drm_mode_config_reset(drm); + * + * ret = drm_dev_register(drm); + * if (ret) + * return ret; + * + * drm_fbdev_generic_setup(drm, 32); + * + * return 0; + * } + * + * // This function is called before the devm_ resources are released + * static int driver_remove(struct platform_device *pdev) + * { + * struct drm_device *drm = platform_get_drvdata(pdev); + * + * drm_dev_unregister(drm); + * drm_atomic_helper_shutdown(drm) + * + * return 0; + * } + * + * // This function is called on kernel restart and shutdown + * static void driver_shutdown(struct platform_device *pdev) + * { + * drm_atomic_helper_shutdown(platform_get_drvdata(pdev)); + * } + * + * static int __maybe_unused driver_pm_suspend(struct device *dev) + * { + * return drm_mode_config_helper_suspend(dev_get_drvdata(dev)); + * } + * + * static int __maybe_unused driver_pm_resume(struct device *dev) + * { + * drm_mode_config_helper_resume(dev_get_drvdata(dev)); + * + * return 0; + * } + * + * static const struct dev_pm_ops driver_pm_ops = { + * SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume) + * }; + * + * static struct platform_driver driver_driver = { + * .driver = { + * [...] + * .pm = &driver_pm_ops, + * }, + * .probe = driver_probe, + * .remove = driver_remove, + * .shutdown = driver_shutdown, + * }; + * module_platform_driver(driver_driver); + * + * Drivers that want to support device unplugging (USB, DT overlay unload) should + * use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect + * regions that is accessing device resources to prevent use after they're + * released. This is done using drm_dev_enter() and drm_dev_exit(). There is one + * shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before + * drm_atomic_helper_shutdown() is called. This means that if the disable code + * paths are protected, they will not run on regular driver module unload, + * possibily leaving the hardware enabled. + */ + +/** + * drm_put_dev - Unregister and release a DRM device + * @dev: DRM device + * + * Called at module unload time or when a PCI device is unplugged. + * + * Cleans up all DRM device, calling drm_lastclose(). + * + * Note: Use of this function is deprecated. It will eventually go away + * completely. Please use drm_dev_unregister() and drm_dev_put() explicitly + * instead to make sure that the device isn't userspace accessible any more + * while teardown is in progress, ensuring that userspace can't access an + * inconsistent state. + */ +void drm_put_dev(struct drm_device *dev) +{ + DRM_DEBUG("\n"); + + if (!dev) { + DRM_ERROR("cleanup called no dev\n"); + return; + } + + drm_dev_unregister(dev); + drm_dev_put(dev); +} +EXPORT_SYMBOL(drm_put_dev); + +/** + * drm_dev_enter - Enter device critical section + * @dev: DRM device + * @idx: Pointer to index that will be passed to the matching drm_dev_exit() + * + * This function marks and protects the beginning of a section that should not + * be entered after the device has been unplugged. The section end is marked + * with drm_dev_exit(). Calls to this function can be nested. + * + * Returns: + * True if it is OK to enter the section, false otherwise. + */ +bool drm_dev_enter(struct drm_device *dev, int *idx) +{ + *idx = srcu_read_lock(&drm_unplug_srcu); + + if (dev->unplugged) { + srcu_read_unlock(&drm_unplug_srcu, *idx); + return false; + } + + return true; +} +EXPORT_SYMBOL(drm_dev_enter); + +/** + * drm_dev_exit - Exit device critical section + * @idx: index returned from drm_dev_enter() + * + * This function marks the end of a section that should not be entered after + * the device has been unplugged. + */ +void drm_dev_exit(int idx) +{ + srcu_read_unlock(&drm_unplug_srcu, idx); +} +EXPORT_SYMBOL(drm_dev_exit); + +/** + * drm_dev_unplug - unplug a DRM device + * @dev: DRM device + * + * This unplugs a hotpluggable DRM device, which makes it inaccessible to + * userspace operations. Entry-points can use drm_dev_enter() and + * drm_dev_exit() to protect device resources in a race free manner. This + * essentially unregisters the device like drm_dev_unregister(), but can be + * called while there are still open users of @dev. + */ +void drm_dev_unplug(struct drm_device *dev) +{ + /* + * After synchronizing any critical read section is guaranteed to see + * the new value of ->unplugged, and any critical section which might + * still have seen the old value of ->unplugged is guaranteed to have + * finished. + */ + dev->unplugged = true; + synchronize_srcu(&drm_unplug_srcu); + + drm_dev_unregister(dev); +} +EXPORT_SYMBOL(drm_dev_unplug); + +#ifdef __linux__ +/* + * DRM internal mount + * We want to be able to allocate our own "struct address_space" to control + * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow + * stand-alone address_space objects, so we need an underlying inode. As there + * is no way to allocate an independent inode easily, we need a fake internal + * VFS mount-point. + * + * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free() + * frees it again. You are allowed to use iget() and iput() to get references to + * the inode. But each drm_fs_inode_new() call must be paired with exactly one + * drm_fs_inode_free() call (which does not have to be the last iput()). + * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it + * between multiple inode-users. You could, technically, call + * iget() + drm_fs_inode_free() directly after alloc and sometime later do an + * iput(), but this way you'd end up with a new vfsmount for each inode. + */ + +static int drm_fs_cnt; +static struct vfsmount *drm_fs_mnt; + +static int drm_fs_init_fs_context(struct fs_context *fc) +{ + return init_pseudo(fc, 0x010203ff) ? 0 : -ENOMEM; +} + +static struct file_system_type drm_fs_type = { + .name = "drm", + .owner = THIS_MODULE, + .init_fs_context = drm_fs_init_fs_context, + .kill_sb = kill_anon_super, +}; + +static struct inode *drm_fs_inode_new(void) +{ + struct inode *inode; + int r; + + r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt); + if (r < 0) { + DRM_ERROR("Cannot mount pseudo fs: %d\n", r); + return ERR_PTR(r); + } + + inode = alloc_anon_inode(drm_fs_mnt->mnt_sb); + if (IS_ERR(inode)) + simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); + + return inode; +} + +static void drm_fs_inode_free(struct inode *inode) +{ + if (inode) { + iput(inode); + simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); + } +} +#elif defined(__FreeBSD__) +static struct inode *drm_fs_inode_new(void) +{ + return (NULL); +} +static void drm_fs_inode_free(struct inode *inode) +{ +} +#endif + +/** + * DOC: component helper usage recommendations + * + * DRM drivers that drive hardware where a logical device consists of a pile of + * independent hardware blocks are recommended to use the :ref:`component helper + * library`. For consistency and better options for code reuse the + * following guidelines apply: + * + * - The entire device initialization procedure should be run from the + * &component_master_ops.master_bind callback, starting with drm_dev_init(), + * then binding all components with component_bind_all() and finishing with + * drm_dev_register(). + * + * - The opaque pointer passed to all components through component_bind_all() + * should point at &struct drm_device of the device instance, not some driver + * specific private structure. + * + * - The component helper fills the niche where further standardization of + * interfaces is not practical. When there already is, or will be, a + * standardized interface like &drm_bridge or &drm_panel, providing its own + * functions to find such components at driver load time, like + * drm_of_find_panel_or_bridge(), then the component helper should not be + * used. + */ + +/** + * drm_dev_init - Initialise new DRM device + * @dev: DRM device + * @driver: DRM driver + * @parent: Parent device object + * + * Initialize a new DRM device. No device registration is done. + * Call drm_dev_register() to advertice the device to user space and register it + * with other core subsystems. This should be done last in the device + * initialization sequence to make sure userspace can't access an inconsistent + * state. + * + * The initial ref-count of the object is 1. Use drm_dev_get() and + * drm_dev_put() to take and drop further ref-counts. + * + * It is recommended that drivers embed &struct drm_device into their own device + * structure. + * + * Drivers that do not want to allocate their own device struct + * embedding &struct drm_device can call drm_dev_alloc() instead. For drivers + * that do embed &struct drm_device it must be placed first in the overall + * structure, and the overall structure must be allocated using kmalloc(): The + * drm core's release function unconditionally calls kfree() on the @dev pointer + * when the final reference is released. To override this behaviour, and so + * allow embedding of the drm_device inside the driver's device struct at an + * arbitrary offset, you must supply a &drm_driver.release callback and control + * the finalization explicitly. + * + * RETURNS: + * 0 on success, or error code on failure. + */ +int drm_dev_init(struct drm_device *dev, + struct drm_driver *driver, + struct device *parent) +{ + int ret; + + if (!drm_core_init_complete) { + DRM_ERROR("DRM core is not initialized\n"); + return -ENODEV; + } + + BUG_ON(!parent); + + kref_init(&dev->ref); + dev->dev = get_device(parent); + dev->driver = driver; + + /* no per-device feature limits by default */ + dev->driver_features = ~0u; + + drm_legacy_init_members(dev); + INIT_LIST_HEAD(&dev->filelist); + INIT_LIST_HEAD(&dev->filelist_internal); + INIT_LIST_HEAD(&dev->clientlist); + INIT_LIST_HEAD(&dev->vblank_event_list); + + spin_lock_init(&dev->event_lock); + mutex_init(&dev->struct_mutex); + mutex_init(&dev->filelist_mutex); + mutex_init(&dev->clientlist_mutex); + mutex_init(&dev->master_mutex); + + dev->anon_inode = drm_fs_inode_new(); + if (IS_ERR(dev->anon_inode)) { + ret = PTR_ERR(dev->anon_inode); + DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret); + goto err_free; + } + + if (drm_core_check_feature(dev, DRIVER_RENDER)) { + ret = drm_minor_alloc(dev, DRM_MINOR_RENDER); + if (ret) + goto err_minors; + } + + ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY); + if (ret) + goto err_minors; + + ret = drm_legacy_create_map_hash(dev); + if (ret) + goto err_minors; + + drm_legacy_ctxbitmap_init(dev); + + if (drm_core_check_feature(dev, DRIVER_GEM)) { + ret = drm_gem_init(dev); + if (ret) { + DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n"); + goto err_ctxbitmap; + } + } + + ret = drm_dev_set_unique(dev, dev_name(parent)); + if (ret) + goto err_setunique; + + return 0; + +err_setunique: + if (drm_core_check_feature(dev, DRIVER_GEM)) + drm_gem_destroy(dev); +err_ctxbitmap: + drm_legacy_ctxbitmap_cleanup(dev); + drm_legacy_remove_map_hash(dev); +err_minors: + drm_minor_free(dev, DRM_MINOR_PRIMARY); + drm_minor_free(dev, DRM_MINOR_RENDER); + drm_fs_inode_free(dev->anon_inode); +err_free: + put_device(dev->dev); + mutex_destroy(&dev->master_mutex); + mutex_destroy(&dev->clientlist_mutex); + mutex_destroy(&dev->filelist_mutex); + mutex_destroy(&dev->struct_mutex); + drm_legacy_destroy_members(dev); + return ret; +} +EXPORT_SYMBOL(drm_dev_init); + +static void devm_drm_dev_init_release(void *data) +{ + drm_dev_put(data); +} + +/** + * devm_drm_dev_init - Resource managed drm_dev_init() + * @parent: Parent device object + * @dev: DRM device + * @driver: DRM driver + * + * Managed drm_dev_init(). The DRM device initialized with this function is + * automatically put on driver detach using drm_dev_put(). You must supply a + * &drm_driver.release callback to control the finalization explicitly. + * + * RETURNS: + * 0 on success, or error code on failure. + */ +int devm_drm_dev_init(struct device *parent, + struct drm_device *dev, + struct drm_driver *driver) +{ + int ret; + + if (WARN_ON(!parent || !driver->release)) + return -EINVAL; + + ret = drm_dev_init(dev, driver, parent); + if (ret) + return ret; + + ret = devm_add_action(parent, devm_drm_dev_init_release, dev); + if (ret) + devm_drm_dev_init_release(dev); + + return ret; +} +EXPORT_SYMBOL(devm_drm_dev_init); + +/** + * drm_dev_fini - Finalize a dead DRM device + * @dev: DRM device + * + * Finalize a dead DRM device. This is the converse to drm_dev_init() and + * frees up all data allocated by it. All driver private data should be + * finalized first. Note that this function does not free the @dev, that is + * left to the caller. + * + * The ref-count of @dev must be zero, and drm_dev_fini() should only be called + * from a &drm_driver.release callback. + */ +void drm_dev_fini(struct drm_device *dev) +{ + drm_vblank_cleanup(dev); + + if (drm_core_check_feature(dev, DRIVER_GEM)) + drm_gem_destroy(dev); + + drm_legacy_ctxbitmap_cleanup(dev); + drm_legacy_remove_map_hash(dev); + drm_fs_inode_free(dev->anon_inode); + + drm_minor_free(dev, DRM_MINOR_PRIMARY); + drm_minor_free(dev, DRM_MINOR_RENDER); + + put_device(dev->dev); + + mutex_destroy(&dev->master_mutex); + mutex_destroy(&dev->clientlist_mutex); + mutex_destroy(&dev->filelist_mutex); + mutex_destroy(&dev->struct_mutex); + drm_legacy_destroy_members(dev); + kfree(dev->unique); +} +EXPORT_SYMBOL(drm_dev_fini); + +/** + * drm_dev_alloc - Allocate new DRM device + * @driver: DRM driver to allocate device for + * @parent: Parent device object + * + * Allocate and initialize a new DRM device. No device registration is done. + * Call drm_dev_register() to advertice the device to user space and register it + * with other core subsystems. This should be done last in the device + * initialization sequence to make sure userspace can't access an inconsistent + * state. + * + * The initial ref-count of the object is 1. Use drm_dev_get() and + * drm_dev_put() to take and drop further ref-counts. + * + * Note that for purely virtual devices @parent can be NULL. + * + * Drivers that wish to subclass or embed &struct drm_device into their + * own struct should look at using drm_dev_init() instead. + * + * RETURNS: + * Pointer to new DRM device, or ERR_PTR on failure. + */ +struct drm_device *drm_dev_alloc(struct drm_driver *driver, + struct device *parent) +{ + struct drm_device *dev; + int ret; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return ERR_PTR(-ENOMEM); + + ret = drm_dev_init(dev, driver, parent); + if (ret) { + kfree(dev); + return ERR_PTR(ret); + } + + return dev; +} +EXPORT_SYMBOL(drm_dev_alloc); + +static void drm_dev_release(struct kref *ref) +{ + struct drm_device *dev = container_of(ref, struct drm_device, ref); + + if (dev->driver->release) { + dev->driver->release(dev); + } else { + drm_dev_fini(dev); + kfree(dev); + } +} + +/** + * drm_dev_get - Take reference of a DRM device + * @dev: device to take reference of or NULL + * + * This increases the ref-count of @dev by one. You *must* already own a + * reference when calling this. Use drm_dev_put() to drop this reference + * again. + * + * This function never fails. However, this function does not provide *any* + * guarantee whether the device is alive or running. It only provides a + * reference to the object and the memory associated with it. + */ +void drm_dev_get(struct drm_device *dev) +{ + if (dev) + kref_get(&dev->ref); +} +EXPORT_SYMBOL(drm_dev_get); + +/** + * drm_dev_put - Drop reference of a DRM device + * @dev: device to drop reference of or NULL + * + * This decreases the ref-count of @dev by one. The device is destroyed if the + * ref-count drops to zero. + */ +void drm_dev_put(struct drm_device *dev) +{ + if (dev) + kref_put(&dev->ref, drm_dev_release); +} +EXPORT_SYMBOL(drm_dev_put); + +static int create_compat_control_link(struct drm_device *dev) +{ + struct drm_minor *minor; + char *name; + int ret; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return 0; + + minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY); + if (!minor) + return 0; + + /* + * Some existing userspace out there uses the existing of the controlD* + * sysfs files to figure out whether it's a modeset driver. It only does + * readdir, hence a symlink is sufficient (and the least confusing + * option). Otherwise controlD* is entirely unused. + * + * Old controlD chardev have been allocated in the range + * 64-127. + */ + name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64); + if (!name) + return -ENOMEM; + +#ifdef __linux__ + ret = sysfs_create_link(minor->kdev->kobj.parent, + &minor->kdev->kobj, + name); +#else + ret = 0; +#endif + + kfree(name); + + return ret; +} + +static void remove_compat_control_link(struct drm_device *dev) +{ + struct drm_minor *minor; + char *name; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return; + + minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY); + if (!minor) + return; + + name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64); + if (!name) + return; + +#ifdef __linux__ + sysfs_remove_link(minor->kdev->kobj.parent, name); +#endif + + kfree(name); +} + +/** + * drm_dev_register - Register DRM device + * @dev: Device to register + * @flags: Flags passed to the driver's .load() function + * + * Register the DRM device @dev with the system, advertise device to user-space + * and start normal device operation. @dev must be initialized via drm_dev_init() + * previously. + * + * Never call this twice on any device! + * + * NOTE: To ensure backward compatibility with existing drivers method this + * function calls the &drm_driver.load method after registering the device + * nodes, creating race conditions. Usage of the &drm_driver.load methods is + * therefore deprecated, drivers must perform all initialization before calling + * drm_dev_register(). + * + * RETURNS: + * 0 on success, negative error code on failure. + */ +int drm_dev_register(struct drm_device *dev, unsigned long flags) +{ + struct drm_driver *driver = dev->driver; + int ret; + + mutex_lock(&drm_global_mutex); + + ret = drm_minor_register(dev, DRM_MINOR_RENDER); + if (ret) + goto err_minors; + + ret = drm_minor_register(dev, DRM_MINOR_PRIMARY); + if (ret) + goto err_minors; + + ret = create_compat_control_link(dev); + if (ret) + goto err_minors; + + if (dev->driver->load) { + ret = dev->driver->load(dev, flags); + if (ret) + goto err_minors; + } + + dev->registered = true; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + drm_modeset_register_all(dev); + + ret = 0; + + DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", + driver->name, driver->major, driver->minor, + driver->patchlevel, driver->date, + dev->dev ? dev_name(dev->dev) : "virtual device", + dev->primary->index); + + goto out_unlock; + +err_minors: + remove_compat_control_link(dev); + drm_minor_unregister(dev, DRM_MINOR_PRIMARY); + drm_minor_unregister(dev, DRM_MINOR_RENDER); +out_unlock: + mutex_unlock(&drm_global_mutex); + return ret; +} +EXPORT_SYMBOL(drm_dev_register); + +/** + * drm_dev_unregister - Unregister DRM device + * @dev: Device to unregister + * + * Unregister the DRM device from the system. This does the reverse of + * drm_dev_register() but does not deallocate the device. The caller must call + * drm_dev_put() to drop their final reference. + * + * A special form of unregistering for hotpluggable devices is drm_dev_unplug(), + * which can be called while there are still open users of @dev. + * + * This should be called first in the device teardown code to make sure + * userspace can't access the device instance any more. + */ +void drm_dev_unregister(struct drm_device *dev) +{ + if (drm_core_check_feature(dev, DRIVER_LEGACY)) + drm_lastclose(dev); + + dev->registered = false; + + drm_client_dev_unregister(dev); + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + drm_modeset_unregister_all(dev); + + if (dev->driver->unload) + dev->driver->unload(dev); + + if (dev->agp) + drm_pci_agp_destroy(dev); + + drm_legacy_rmmaps(dev); + + remove_compat_control_link(dev); + drm_minor_unregister(dev, DRM_MINOR_PRIMARY); + drm_minor_unregister(dev, DRM_MINOR_RENDER); +} +EXPORT_SYMBOL(drm_dev_unregister); + +/** + * drm_dev_set_unique - Set the unique name of a DRM device + * @dev: device of which to set the unique name + * @name: unique name + * + * Sets the unique name of a DRM device using the specified string. This is + * already done by drm_dev_init(), drivers should only override the default + * unique name for backwards compatibility reasons. + * + * Return: 0 on success or a negative error code on failure. + */ +int drm_dev_set_unique(struct drm_device *dev, const char *name) +{ + kfree(dev->unique); + dev->unique = kstrdup(name, GFP_KERNEL); + + return dev->unique ? 0 : -ENOMEM; +} +EXPORT_SYMBOL(drm_dev_set_unique); + +#ifdef __linux__ +/* + * DRM Core + * The DRM core module initializes all global DRM objects and makes them + * available to drivers. Once setup, drivers can probe their respective + * devices. + * Currently, core management includes: + * - The "DRM-Global" key/value database + * - Global ID management for connectors + * - DRM major number allocation + * - DRM minor management + * - DRM sysfs class + * - DRM debugfs root + * + * Furthermore, the DRM core provides dynamic char-dev lookups. For each + * interface registered on a DRM device, you can request minor numbers from DRM + * core. DRM core takes care of major-number management and char-dev + * registration. A stub ->open() callback forwards any open() requests to the + * registered minor. + */ + +static int drm_stub_open(struct inode *inode, struct file *filp) +{ + const struct file_operations *new_fops; + struct drm_minor *minor; + int err; + + DRM_DEBUG("\n"); + + mutex_lock(&drm_global_mutex); + minor = drm_minor_acquire(iminor(inode)); + if (IS_ERR(minor)) { + err = PTR_ERR(minor); + goto out_unlock; + } + + new_fops = fops_get(minor->dev->driver->fops); + if (!new_fops) { + err = -ENODEV; + goto out_release; + } + + replace_fops(filp, new_fops); + if (filp->f_op->open) + err = filp->f_op->open(inode, filp); + else + err = 0; + +out_release: + drm_minor_release(minor); +out_unlock: + mutex_unlock(&drm_global_mutex); + return err; +} + +static const struct file_operations drm_stub_fops = { + .owner = THIS_MODULE, + .open = drm_stub_open, + .llseek = noop_llseek, +}; +#endif + +static void drm_core_exit(void) +{ +#ifdef __linux__ + unregister_chrdev(DRM_MAJOR, "drm"); + debugfs_remove(drm_debugfs_root); + drm_sysfs_destroy(); +#endif + idr_destroy(&drm_minors_idr); + drm_connector_ida_destroy(); +} + +static int __init drm_core_init(void) +{ + int ret; + + drm_connector_ida_init(); + idr_init(&drm_minors_idr); + +#ifdef __linux__ + ret = drm_sysfs_init(); + if (ret < 0) { + DRM_ERROR("Cannot create DRM class: %d\n", ret); + goto error; + } + + drm_debugfs_root = debugfs_create_dir("dri", NULL); + + ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops); + if (ret < 0) + goto error; +#endif + + drm_core_init_complete = true; + + DRM_DEBUG("Initialized\n"); + return 0; + +#ifdef __linux__ +error: +#endif + drm_core_exit(); + return ret; +} + +module_init(drm_core_init); +module_exit(drm_core_exit); Index: sys/dev/drm/core/drm_dumb_buffers.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_dumb_buffers.c @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2006-2008 Intel Corporation + * Copyright (c) 2007 Dave Airlie + * Copyright (c) 2008 Red Hat Inc. + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include +#include +#include +#include + +#include "drm_crtc_internal.h" + +/** + * DOC: overview + * + * The KMS API doesn't standardize backing storage object creation and leaves it + * to driver-specific ioctls. Furthermore actually creating a buffer object even + * for GEM-based drivers is done through a driver-specific ioctl - GEM only has + * a common userspace interface for sharing and destroying objects. While not an + * issue for full-fledged graphics stacks that include device-specific userspace + * components (in libdrm for instance), this limit makes DRM-based early boot + * graphics unnecessarily complex. + * + * Dumb objects partly alleviate the problem by providing a standard API to + * create dumb buffers suitable for scanout, which can then be used to create + * KMS frame buffers. + * + * To support dumb objects drivers must implement the &drm_driver.dumb_create + * operation. &drm_driver.dumb_destroy defaults to drm_gem_dumb_destroy() if + * not set and &drm_driver.dumb_map_offset defaults to + * drm_gem_dumb_map_offset(). See the callbacks for further details. + * + * Note that dumb objects may not be used for gpu acceleration, as has been + * attempted on some ARM embedded platforms. Such drivers really must have + * a hardware-specific ioctl to allocate suitable buffer objects. + */ + +int drm_mode_create_dumb(struct drm_device *dev, + struct drm_mode_create_dumb *args, + struct drm_file *file_priv) +{ + u32 cpp, stride, size; + + if (!dev->driver->dumb_create) + return -ENOSYS; + if (!args->width || !args->height || !args->bpp) + return -EINVAL; + + /* overflow checks for 32bit size calculations */ + if (args->bpp > U32_MAX - 8) + return -EINVAL; + cpp = DIV_ROUND_UP(args->bpp, 8); + if (cpp > U32_MAX / args->width) + return -EINVAL; + stride = cpp * args->width; + if (args->height > U32_MAX / stride) + return -EINVAL; + + /* test for wrap-around */ + size = args->height * stride; + if (PAGE_ALIGN(size) == 0) + return -EINVAL; + + /* + * handle, pitch and size are output parameters. Zero them out to + * prevent drivers from accidentally using uninitialized data. Since + * not all existing userspace is clearing these fields properly we + * cannot reject IOCTL with garbage in them. + */ + args->handle = 0; + args->pitch = 0; + args->size = 0; + + return dev->driver->dumb_create(file_priv, dev, args); +} + +int drm_mode_create_dumb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + return drm_mode_create_dumb(dev, data, file_priv); +} + +/** + * drm_mode_mmap_dumb_ioctl - create an mmap offset for a dumb backing storage buffer + * @dev: DRM device + * @data: ioctl data + * @file_priv: DRM file info + * + * Allocate an offset in the drm device node's address space to be able to + * memory map a dumb buffer. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_mmap_dumb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_mode_map_dumb *args = data; + + if (!dev->driver->dumb_create) + return -ENOSYS; + + if (dev->driver->dumb_map_offset) + return dev->driver->dumb_map_offset(file_priv, dev, + args->handle, + &args->offset); + else + return drm_gem_dumb_map_offset(file_priv, dev, args->handle, + &args->offset); +} + +int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle, + struct drm_file *file_priv) +{ + if (!dev->driver->dumb_create) + return -ENOSYS; + + if (dev->driver->dumb_destroy) + return dev->driver->dumb_destroy(file_priv, dev, handle); + else + return drm_gem_dumb_destroy(file_priv, dev, handle); +} + +int drm_mode_destroy_dumb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_mode_destroy_dumb *args = data; + + return drm_mode_destroy_dumb(dev, args->handle, file_priv); +} Index: sys/dev/drm/core/drm_edid.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_edid.c @@ -0,0 +1,5499 @@ +/* + * Copyright (c) 2006 Luc Verhaegen (quirks list) + * Copyright (c) 2007-2008 Intel Corporation + * Jesse Barnes + * Copyright 2010 Red Hat, Inc. + * + * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from + * FB layer. + * Copyright (C) 2006 Dennis Munsie + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "drm_crtc_internal.h" + +#define version_greater(edid, maj, min) \ + (((edid)->version > (maj)) || \ + ((edid)->version == (maj) && (edid)->revision > (min))) + +#define EDID_EST_TIMINGS 16 +#define EDID_STD_TIMINGS 8 +#define EDID_DETAILED_TIMINGS 4 + +/* + * EDID blocks out in the wild have a variety of bugs, try to collect + * them here (note that userspace may work around broken monitors first, + * but fixes should make their way here so that the kernel "just works" + * on as many displays as possible). + */ + +/* First detailed mode wrong, use largest 60Hz mode */ +#define EDID_QUIRK_PREFER_LARGE_60 (1 << 0) +/* Reported 135MHz pixel clock is too high, needs adjustment */ +#define EDID_QUIRK_135_CLOCK_TOO_HIGH (1 << 1) +/* Prefer the largest mode at 75 Hz */ +#define EDID_QUIRK_PREFER_LARGE_75 (1 << 2) +/* Detail timing is in cm not mm */ +#define EDID_QUIRK_DETAILED_IN_CM (1 << 3) +/* Detailed timing descriptors have bogus size values, so just take the + * maximum size and use that. + */ +#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4) +/* use +hsync +vsync for detailed mode */ +#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) +/* Force reduced-blanking timings for detailed modes */ +#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7) +/* Force 8bpc */ +#define EDID_QUIRK_FORCE_8BPC (1 << 8) +/* Force 12bpc */ +#define EDID_QUIRK_FORCE_12BPC (1 << 9) +/* Force 6bpc */ +#define EDID_QUIRK_FORCE_6BPC (1 << 10) +/* Force 10bpc */ +#define EDID_QUIRK_FORCE_10BPC (1 << 11) +/* Non desktop display (i.e. HMD) */ +#define EDID_QUIRK_NON_DESKTOP (1 << 12) + +struct detailed_mode_closure { + struct drm_connector *connector; + struct edid *edid; + bool preferred; + u32 quirks; + int modes; +}; + +#define LEVEL_DMT 0 +#define LEVEL_GTF 1 +#define LEVEL_GTF2 2 +#define LEVEL_CVT 3 + +static const struct edid_quirk { + char vendor[4]; + int product_id; + u32 quirks; +} edid_quirk_list[] = { + /* Acer AL1706 */ + { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 }, + /* Acer F51 */ + { "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 }, + + /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */ + { "AEO", 0, EDID_QUIRK_FORCE_6BPC }, + + /* BOE model on HP Pavilion 15-n233sl reports 8 bpc, but is a 6 bpc panel */ + { "BOE", 0x78b, EDID_QUIRK_FORCE_6BPC }, + + /* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */ + { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC }, + + /* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */ + { "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC }, + + /* BOE model 0x0771 reports 8 bpc, but is a 6 bpc panel */ + { "BOE", 0x0771, EDID_QUIRK_FORCE_6BPC }, + + /* Belinea 10 15 55 */ + { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, + { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, + + /* Envision Peripherals, Inc. EN-7100e */ + { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH }, + /* Envision EN2028 */ + { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 }, + + /* Funai Electronics PM36B */ + { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 | + EDID_QUIRK_DETAILED_IN_CM }, + + /* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */ + { "LGD", 764, EDID_QUIRK_FORCE_10BPC }, + + /* LG Philips LCD LP154W01-A5 */ + { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE }, + { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE }, + + /* Samsung SyncMaster 205BW. Note: irony */ + { "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP }, + /* Samsung SyncMaster 22[5-6]BW */ + { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 }, + { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 }, + + /* Sony PVM-2541A does up to 12 bpc, but only reports max 8 bpc */ + { "SNY", 0x2541, EDID_QUIRK_FORCE_12BPC }, + + /* ViewSonic VA2026w */ + { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING }, + + /* Medion MD 30217 PG */ + { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, + + /* Lenovo G50 */ + { "SDC", 18514, EDID_QUIRK_FORCE_6BPC }, + + /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */ + { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC }, + + /* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/ + { "ETR", 13896, EDID_QUIRK_FORCE_8BPC }, + + /* Valve Index Headset */ + { "VLV", 0x91a8, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b0, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b1, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b2, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b3, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b4, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b5, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b6, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b7, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b8, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b9, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91ba, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91bb, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91bc, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91bd, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91be, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91bf, EDID_QUIRK_NON_DESKTOP }, + + /* HTC Vive and Vive Pro VR Headsets */ + { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP }, + { "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP }, + + /* Oculus Rift DK1, DK2, and CV1 VR Headsets */ + { "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP }, + { "OVR", 0x0003, EDID_QUIRK_NON_DESKTOP }, + { "OVR", 0x0004, EDID_QUIRK_NON_DESKTOP }, + + /* Windows Mixed Reality Headsets */ + { "ACR", 0x7fce, EDID_QUIRK_NON_DESKTOP }, + { "HPN", 0x3515, EDID_QUIRK_NON_DESKTOP }, + { "LEN", 0x0408, EDID_QUIRK_NON_DESKTOP }, + { "LEN", 0xb800, EDID_QUIRK_NON_DESKTOP }, + { "FUJ", 0x1970, EDID_QUIRK_NON_DESKTOP }, + { "DEL", 0x7fce, EDID_QUIRK_NON_DESKTOP }, + { "SEC", 0x144a, EDID_QUIRK_NON_DESKTOP }, + { "AUS", 0xc102, EDID_QUIRK_NON_DESKTOP }, + + /* Sony PlayStation VR Headset */ + { "SNY", 0x0704, EDID_QUIRK_NON_DESKTOP }, + + /* Sensics VR Headsets */ + { "SEN", 0x1019, EDID_QUIRK_NON_DESKTOP }, + + /* OSVR HDK and HDK2 VR Headsets */ + { "SVR", 0x1019, EDID_QUIRK_NON_DESKTOP }, +}; + +/* + * Autogenerated from the DMT spec. + * This table is copied from xfree86/modes/xf86EdidModes.c. + */ +static const struct drm_display_mode drm_dmt_modes[] = { + /* 0x01 - 640x350@85Hz */ + { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672, + 736, 832, 0, 350, 382, 385, 445, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x02 - 640x400@85Hz */ + { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672, + 736, 832, 0, 400, 401, 404, 445, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x03 - 720x400@85Hz */ + { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756, + 828, 936, 0, 400, 401, 404, 446, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x04 - 640x480@60Hz */ + { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, + 752, 800, 0, 480, 490, 492, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x05 - 640x480@72Hz */ + { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664, + 704, 832, 0, 480, 489, 492, 520, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x06 - 640x480@75Hz */ + { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656, + 720, 840, 0, 480, 481, 484, 500, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x07 - 640x480@85Hz */ + { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696, + 752, 832, 0, 480, 481, 484, 509, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x08 - 800x600@56Hz */ + { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824, + 896, 1024, 0, 600, 601, 603, 625, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x09 - 800x600@60Hz */ + { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, + 968, 1056, 0, 600, 601, 605, 628, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x0a - 800x600@72Hz */ + { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856, + 976, 1040, 0, 600, 637, 643, 666, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x0b - 800x600@75Hz */ + { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816, + 896, 1056, 0, 600, 601, 604, 625, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x0c - 800x600@85Hz */ + { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832, + 896, 1048, 0, 600, 601, 604, 631, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x0d - 800x600@120Hz RB */ + { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848, + 880, 960, 0, 600, 603, 607, 636, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x0e - 848x480@60Hz */ + { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864, + 976, 1088, 0, 480, 486, 494, 517, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x0f - 1024x768@43Hz, interlace */ + { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032, + 1208, 1264, 0, 768, 768, 776, 817, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | + DRM_MODE_FLAG_INTERLACE) }, + /* 0x10 - 1024x768@60Hz */ + { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, + 1184, 1344, 0, 768, 771, 777, 806, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x11 - 1024x768@70Hz */ + { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048, + 1184, 1328, 0, 768, 771, 777, 806, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x12 - 1024x768@75Hz */ + { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040, + 1136, 1312, 0, 768, 769, 772, 800, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x13 - 1024x768@85Hz */ + { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072, + 1168, 1376, 0, 768, 769, 772, 808, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x14 - 1024x768@120Hz RB */ + { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072, + 1104, 1184, 0, 768, 771, 775, 813, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x15 - 1152x864@75Hz */ + { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, + 1344, 1600, 0, 864, 865, 868, 900, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x55 - 1280x720@60Hz */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390, + 1430, 1650, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x16 - 1280x768@60Hz RB */ + { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328, + 1360, 1440, 0, 768, 771, 778, 790, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x17 - 1280x768@60Hz */ + { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344, + 1472, 1664, 0, 768, 771, 778, 798, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x18 - 1280x768@75Hz */ + { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360, + 1488, 1696, 0, 768, 771, 778, 805, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x19 - 1280x768@85Hz */ + { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360, + 1496, 1712, 0, 768, 771, 778, 809, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x1a - 1280x768@120Hz RB */ + { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328, + 1360, 1440, 0, 768, 771, 778, 813, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x1b - 1280x800@60Hz RB */ + { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328, + 1360, 1440, 0, 800, 803, 809, 823, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x1c - 1280x800@60Hz */ + { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352, + 1480, 1680, 0, 800, 803, 809, 831, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x1d - 1280x800@75Hz */ + { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360, + 1488, 1696, 0, 800, 803, 809, 838, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x1e - 1280x800@85Hz */ + { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360, + 1496, 1712, 0, 800, 803, 809, 843, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x1f - 1280x800@120Hz RB */ + { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328, + 1360, 1440, 0, 800, 803, 809, 847, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x20 - 1280x960@60Hz */ + { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376, + 1488, 1800, 0, 960, 961, 964, 1000, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x21 - 1280x960@85Hz */ + { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344, + 1504, 1728, 0, 960, 961, 964, 1011, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x22 - 1280x960@120Hz RB */ + { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328, + 1360, 1440, 0, 960, 963, 967, 1017, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x23 - 1280x1024@60Hz */ + { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328, + 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x24 - 1280x1024@75Hz */ + { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296, + 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x25 - 1280x1024@85Hz */ + { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344, + 1504, 1728, 0, 1024, 1025, 1028, 1072, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x26 - 1280x1024@120Hz RB */ + { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328, + 1360, 1440, 0, 1024, 1027, 1034, 1084, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x27 - 1360x768@60Hz */ + { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424, + 1536, 1792, 0, 768, 771, 777, 795, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x28 - 1360x768@120Hz RB */ + { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408, + 1440, 1520, 0, 768, 771, 776, 813, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x51 - 1366x768@60Hz */ + { DRM_MODE("1366x768", DRM_MODE_TYPE_DRIVER, 85500, 1366, 1436, + 1579, 1792, 0, 768, 771, 774, 798, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x56 - 1366x768@60Hz */ + { DRM_MODE("1366x768", DRM_MODE_TYPE_DRIVER, 72000, 1366, 1380, + 1436, 1500, 0, 768, 769, 772, 800, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x29 - 1400x1050@60Hz RB */ + { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448, + 1480, 1560, 0, 1050, 1053, 1057, 1080, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x2a - 1400x1050@60Hz */ + { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488, + 1632, 1864, 0, 1050, 1053, 1057, 1089, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x2b - 1400x1050@75Hz */ + { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504, + 1648, 1896, 0, 1050, 1053, 1057, 1099, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x2c - 1400x1050@85Hz */ + { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504, + 1656, 1912, 0, 1050, 1053, 1057, 1105, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x2d - 1400x1050@120Hz RB */ + { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448, + 1480, 1560, 0, 1050, 1053, 1057, 1112, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x2e - 1440x900@60Hz RB */ + { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488, + 1520, 1600, 0, 900, 903, 909, 926, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x2f - 1440x900@60Hz */ + { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520, + 1672, 1904, 0, 900, 903, 909, 934, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x30 - 1440x900@75Hz */ + { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536, + 1688, 1936, 0, 900, 903, 909, 942, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x31 - 1440x900@85Hz */ + { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544, + 1696, 1952, 0, 900, 903, 909, 948, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x32 - 1440x900@120Hz RB */ + { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488, + 1520, 1600, 0, 900, 903, 909, 953, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x53 - 1600x900@60Hz */ + { DRM_MODE("1600x900", DRM_MODE_TYPE_DRIVER, 108000, 1600, 1624, + 1704, 1800, 0, 900, 901, 904, 1000, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x33 - 1600x1200@60Hz */ + { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664, + 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x34 - 1600x1200@65Hz */ + { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664, + 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x35 - 1600x1200@70Hz */ + { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664, + 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x36 - 1600x1200@75Hz */ + { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664, + 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x37 - 1600x1200@85Hz */ + { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664, + 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x38 - 1600x1200@120Hz RB */ + { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648, + 1680, 1760, 0, 1200, 1203, 1207, 1271, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x39 - 1680x1050@60Hz RB */ + { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728, + 1760, 1840, 0, 1050, 1053, 1059, 1080, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x3a - 1680x1050@60Hz */ + { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784, + 1960, 2240, 0, 1050, 1053, 1059, 1089, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x3b - 1680x1050@75Hz */ + { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800, + 1976, 2272, 0, 1050, 1053, 1059, 1099, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x3c - 1680x1050@85Hz */ + { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808, + 1984, 2288, 0, 1050, 1053, 1059, 1105, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x3d - 1680x1050@120Hz RB */ + { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728, + 1760, 1840, 0, 1050, 1053, 1059, 1112, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x3e - 1792x1344@60Hz */ + { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920, + 2120, 2448, 0, 1344, 1345, 1348, 1394, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x3f - 1792x1344@75Hz */ + { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888, + 2104, 2456, 0, 1344, 1345, 1348, 1417, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x40 - 1792x1344@120Hz RB */ + { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840, + 1872, 1952, 0, 1344, 1347, 1351, 1423, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x41 - 1856x1392@60Hz */ + { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952, + 2176, 2528, 0, 1392, 1393, 1396, 1439, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x42 - 1856x1392@75Hz */ + { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984, + 2208, 2560, 0, 1392, 1393, 1396, 1500, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x43 - 1856x1392@120Hz RB */ + { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904, + 1936, 2016, 0, 1392, 1395, 1399, 1474, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x52 - 1920x1080@60Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, + 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x44 - 1920x1200@60Hz RB */ + { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968, + 2000, 2080, 0, 1200, 1203, 1209, 1235, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x45 - 1920x1200@60Hz */ + { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056, + 2256, 2592, 0, 1200, 1203, 1209, 1245, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x46 - 1920x1200@75Hz */ + { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056, + 2264, 2608, 0, 1200, 1203, 1209, 1255, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x47 - 1920x1200@85Hz */ + { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064, + 2272, 2624, 0, 1200, 1203, 1209, 1262, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x48 - 1920x1200@120Hz RB */ + { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968, + 2000, 2080, 0, 1200, 1203, 1209, 1271, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x49 - 1920x1440@60Hz */ + { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048, + 2256, 2600, 0, 1440, 1441, 1444, 1500, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x4a - 1920x1440@75Hz */ + { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064, + 2288, 2640, 0, 1440, 1441, 1444, 1500, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x4b - 1920x1440@120Hz RB */ + { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968, + 2000, 2080, 0, 1440, 1443, 1447, 1525, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x54 - 2048x1152@60Hz */ + { DRM_MODE("2048x1152", DRM_MODE_TYPE_DRIVER, 162000, 2048, 2074, + 2154, 2250, 0, 1152, 1153, 1156, 1200, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x4c - 2560x1600@60Hz RB */ + { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608, + 2640, 2720, 0, 1600, 1603, 1609, 1646, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x4d - 2560x1600@60Hz */ + { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752, + 3032, 3504, 0, 1600, 1603, 1609, 1658, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x4e - 2560x1600@75Hz */ + { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768, + 3048, 3536, 0, 1600, 1603, 1609, 1672, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x4f - 2560x1600@85Hz */ + { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768, + 3048, 3536, 0, 1600, 1603, 1609, 1682, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 0x50 - 2560x1600@120Hz RB */ + { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608, + 2640, 2720, 0, 1600, 1603, 1609, 1694, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x57 - 4096x2160@60Hz RB */ + { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 556744, 4096, 4104, + 4136, 4176, 0, 2160, 2208, 2216, 2222, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 0x58 - 4096x2160@59.94Hz RB */ + { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 556188, 4096, 4104, + 4136, 4176, 0, 2160, 2208, 2216, 2222, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, +}; + +/* + * These more or less come from the DMT spec. The 720x400 modes are + * inferred from historical 80x25 practice. The 640x480@67 and 832x624@75 + * modes are old-school Mac modes. The EDID spec says the 1152x864@75 mode + * should be 1152x870, again for the Mac, but instead we use the x864 DMT + * mode. + * + * The DMT modes have been fact-checked; the rest are mild guesses. + */ +static const struct drm_display_mode edid_est_modes[] = { + { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, + 968, 1056, 0, 600, 601, 605, 628, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */ + { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824, + 896, 1024, 0, 600, 601, 603, 625, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */ + { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656, + 720, 840, 0, 480, 481, 484, 500, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */ + { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664, + 704, 832, 0, 480, 489, 492, 520, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */ + { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704, + 768, 864, 0, 480, 483, 486, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */ + { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, + 752, 800, 0, 480, 490, 492, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */ + { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738, + 846, 900, 0, 400, 421, 423, 449, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */ + { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738, + 846, 900, 0, 400, 412, 414, 449, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */ + { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296, + 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */ + { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040, + 1136, 1312, 0, 768, 769, 772, 800, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */ + { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048, + 1184, 1328, 0, 768, 771, 777, 806, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */ + { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, + 1184, 1344, 0, 768, 771, 777, 806, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */ + { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032, + 1208, 1264, 0, 768, 768, 776, 817, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */ + { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864, + 928, 1152, 0, 624, 625, 628, 667, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */ + { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816, + 896, 1056, 0, 600, 601, 604, 625, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */ + { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856, + 976, 1040, 0, 600, 637, 643, 666, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */ + { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, + 1344, 1600, 0, 864, 865, 868, 900, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */ +}; + +struct minimode { + short w; + short h; + short r; + short rb; +}; + +static const struct minimode est3_modes[] = { + /* byte 6 */ + { 640, 350, 85, 0 }, + { 640, 400, 85, 0 }, + { 720, 400, 85, 0 }, + { 640, 480, 85, 0 }, + { 848, 480, 60, 0 }, + { 800, 600, 85, 0 }, + { 1024, 768, 85, 0 }, + { 1152, 864, 75, 0 }, + /* byte 7 */ + { 1280, 768, 60, 1 }, + { 1280, 768, 60, 0 }, + { 1280, 768, 75, 0 }, + { 1280, 768, 85, 0 }, + { 1280, 960, 60, 0 }, + { 1280, 960, 85, 0 }, + { 1280, 1024, 60, 0 }, + { 1280, 1024, 85, 0 }, + /* byte 8 */ + { 1360, 768, 60, 0 }, + { 1440, 900, 60, 1 }, + { 1440, 900, 60, 0 }, + { 1440, 900, 75, 0 }, + { 1440, 900, 85, 0 }, + { 1400, 1050, 60, 1 }, + { 1400, 1050, 60, 0 }, + { 1400, 1050, 75, 0 }, + /* byte 9 */ + { 1400, 1050, 85, 0 }, + { 1680, 1050, 60, 1 }, + { 1680, 1050, 60, 0 }, + { 1680, 1050, 75, 0 }, + { 1680, 1050, 85, 0 }, + { 1600, 1200, 60, 0 }, + { 1600, 1200, 65, 0 }, + { 1600, 1200, 70, 0 }, + /* byte 10 */ + { 1600, 1200, 75, 0 }, + { 1600, 1200, 85, 0 }, + { 1792, 1344, 60, 0 }, + { 1792, 1344, 75, 0 }, + { 1856, 1392, 60, 0 }, + { 1856, 1392, 75, 0 }, + { 1920, 1200, 60, 1 }, + { 1920, 1200, 60, 0 }, + /* byte 11 */ + { 1920, 1200, 75, 0 }, + { 1920, 1200, 85, 0 }, + { 1920, 1440, 60, 0 }, + { 1920, 1440, 75, 0 }, +}; + +static const struct minimode extra_modes[] = { + { 1024, 576, 60, 0 }, + { 1366, 768, 60, 0 }, + { 1600, 900, 60, 0 }, + { 1680, 945, 60, 0 }, + { 1920, 1080, 60, 0 }, + { 2048, 1152, 60, 0 }, + { 2048, 1536, 60, 0 }, +}; + +/* + * Probably taken from CEA-861 spec. + * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c. + * + * Index using the VIC. + */ +static const struct drm_display_mode edid_cea_modes[] = { + /* 0 - dummy, VICs start at 1 */ + { }, + /* 1 - 640x480@60Hz 4:3 */ + { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, + 752, 800, 0, 480, 490, 492, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + /* 2 - 720x480@60Hz 4:3 */ + { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, + 798, 858, 0, 480, 489, 495, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + /* 3 - 720x480@60Hz 16:9 */ + { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, + 798, 858, 0, 480, 489, 495, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 4 - 1280x720@60Hz 16:9 */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390, + 1430, 1650, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 5 - 1920x1080i@60Hz 16:9 */ + { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, + 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | + DRM_MODE_FLAG_INTERLACE), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 6 - 720(1440)x480i@60Hz 4:3 */ + { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739, + 801, 858, 0, 480, 488, 494, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + /* 7 - 720(1440)x480i@60Hz 16:9 */ + { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739, + 801, 858, 0, 480, 488, 494, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 8 - 720(1440)x240@60Hz 4:3 */ + { DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739, + 801, 858, 0, 240, 244, 247, 262, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_DBLCLK), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + /* 9 - 720(1440)x240@60Hz 16:9 */ + { DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739, + 801, 858, 0, 240, 244, 247, 262, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_DBLCLK), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 10 - 2880x480i@60Hz 4:3 */ + { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, + 3204, 3432, 0, 480, 488, 494, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + /* 11 - 2880x480i@60Hz 16:9 */ + { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, + 3204, 3432, 0, 480, 488, 494, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 12 - 2880x240@60Hz 4:3 */ + { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, + 3204, 3432, 0, 240, 244, 247, 262, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + /* 13 - 2880x240@60Hz 16:9 */ + { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, + 3204, 3432, 0, 240, 244, 247, 262, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 14 - 1440x480@60Hz 4:3 */ + { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472, + 1596, 1716, 0, 480, 489, 495, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + /* 15 - 1440x480@60Hz 16:9 */ + { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472, + 1596, 1716, 0, 480, 489, 495, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 16 - 1920x1080@60Hz 16:9 */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, + 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 17 - 720x576@50Hz 4:3 */ + { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, + 796, 864, 0, 576, 581, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + /* 18 - 720x576@50Hz 16:9 */ + { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, + 796, 864, 0, 576, 581, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 19 - 1280x720@50Hz 16:9 */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720, + 1760, 1980, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 20 - 1920x1080i@50Hz 16:9 */ + { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, + 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | + DRM_MODE_FLAG_INTERLACE), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 21 - 720(1440)x576i@50Hz 4:3 */ + { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732, + 795, 864, 0, 576, 580, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + /* 22 - 720(1440)x576i@50Hz 16:9 */ + { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732, + 795, 864, 0, 576, 580, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 23 - 720(1440)x288@50Hz 4:3 */ + { DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732, + 795, 864, 0, 288, 290, 293, 312, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_DBLCLK), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + /* 24 - 720(1440)x288@50Hz 16:9 */ + { DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732, + 795, 864, 0, 288, 290, 293, 312, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_DBLCLK), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 25 - 2880x576i@50Hz 4:3 */ + { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, + 3180, 3456, 0, 576, 580, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + /* 26 - 2880x576i@50Hz 16:9 */ + { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, + 3180, 3456, 0, 576, 580, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 27 - 2880x288@50Hz 4:3 */ + { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, + 3180, 3456, 0, 288, 290, 293, 312, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + /* 28 - 2880x288@50Hz 16:9 */ + { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, + 3180, 3456, 0, 288, 290, 293, 312, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 29 - 1440x576@50Hz 4:3 */ + { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, + 1592, 1728, 0, 576, 581, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + /* 30 - 1440x576@50Hz 16:9 */ + { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, + 1592, 1728, 0, 576, 581, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 31 - 1920x1080@50Hz 16:9 */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, + 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 32 - 1920x1080@24Hz 16:9 */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558, + 2602, 2750, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 33 - 1920x1080@25Hz 16:9 */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, + 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 34 - 1920x1080@30Hz 16:9 */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, + 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 35 - 2880x480@60Hz 4:3 */ + { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944, + 3192, 3432, 0, 480, 489, 495, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + /* 36 - 2880x480@60Hz 16:9 */ + { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944, + 3192, 3432, 0, 480, 489, 495, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 37 - 2880x576@50Hz 4:3 */ + { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928, + 3184, 3456, 0, 576, 581, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + /* 38 - 2880x576@50Hz 16:9 */ + { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928, + 3184, 3456, 0, 576, 581, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 39 - 1920x1080i@50Hz 16:9 */ + { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952, + 2120, 2304, 0, 1080, 1126, 1136, 1250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 40 - 1920x1080i@100Hz 16:9 */ + { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, + 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | + DRM_MODE_FLAG_INTERLACE), + .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 41 - 1280x720@100Hz 16:9 */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720, + 1760, 1980, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 42 - 720x576@100Hz 4:3 */ + { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, + 796, 864, 0, 576, 581, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + /* 43 - 720x576@100Hz 16:9 */ + { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, + 796, 864, 0, 576, 581, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 44 - 720(1440)x576i@100Hz 4:3 */ + { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, + 795, 864, 0, 576, 580, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), + .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + /* 45 - 720(1440)x576i@100Hz 16:9 */ + { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, + 795, 864, 0, 576, 580, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), + .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 46 - 1920x1080i@120Hz 16:9 */ + { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, + 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | + DRM_MODE_FLAG_INTERLACE), + .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 47 - 1280x720@120Hz 16:9 */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390, + 1430, 1650, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 48 - 720x480@120Hz 4:3 */ + { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736, + 798, 858, 0, 480, 489, 495, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + /* 49 - 720x480@120Hz 16:9 */ + { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736, + 798, 858, 0, 480, 489, 495, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 50 - 720(1440)x480i@120Hz 4:3 */ + { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739, + 801, 858, 0, 480, 488, 494, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), + .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + /* 51 - 720(1440)x480i@120Hz 16:9 */ + { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739, + 801, 858, 0, 480, 488, 494, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), + .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 52 - 720x576@200Hz 4:3 */ + { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732, + 796, 864, 0, 576, 581, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + /* 53 - 720x576@200Hz 16:9 */ + { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732, + 796, 864, 0, 576, 581, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 54 - 720(1440)x576i@200Hz 4:3 */ + { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, + 795, 864, 0, 576, 580, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), + .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + /* 55 - 720(1440)x576i@200Hz 16:9 */ + { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, + 795, 864, 0, 576, 580, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), + .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 56 - 720x480@240Hz 4:3 */ + { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736, + 798, 858, 0, 480, 489, 495, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + /* 57 - 720x480@240Hz 16:9 */ + { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736, + 798, 858, 0, 480, 489, 495, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 58 - 720(1440)x480i@240Hz 4:3 */ + { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739, + 801, 858, 0, 480, 488, 494, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), + .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + /* 59 - 720(1440)x480i@240Hz 16:9 */ + { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739, + 801, 858, 0, 480, 488, 494, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), + .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 60 - 1280x720@24Hz 16:9 */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040, + 3080, 3300, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 61 - 1280x720@25Hz 16:9 */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700, + 3740, 3960, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 62 - 1280x720@30Hz 16:9 */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040, + 3080, 3300, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 63 - 1920x1080@120Hz 16:9 */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008, + 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 64 - 1920x1080@100Hz 16:9 */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448, + 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 65 - 1280x720@24Hz 64:27 */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040, + 3080, 3300, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 66 - 1280x720@25Hz 64:27 */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700, + 3740, 3960, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 67 - 1280x720@30Hz 64:27 */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040, + 3080, 3300, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 68 - 1280x720@50Hz 64:27 */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720, + 1760, 1980, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 69 - 1280x720@60Hz 64:27 */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390, + 1430, 1650, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 70 - 1280x720@100Hz 64:27 */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720, + 1760, 1980, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 71 - 1280x720@120Hz 64:27 */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390, + 1430, 1650, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 72 - 1920x1080@24Hz 64:27 */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558, + 2602, 2750, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 73 - 1920x1080@25Hz 64:27 */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, + 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 74 - 1920x1080@30Hz 64:27 */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, + 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 75 - 1920x1080@50Hz 64:27 */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, + 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 76 - 1920x1080@60Hz 64:27 */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, + 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 77 - 1920x1080@100Hz 64:27 */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448, + 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 78 - 1920x1080@120Hz 64:27 */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008, + 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 79 - 1680x720@24Hz 64:27 */ + { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 3040, + 3080, 3300, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 80 - 1680x720@25Hz 64:27 */ + { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2908, + 2948, 3168, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 81 - 1680x720@30Hz 64:27 */ + { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2380, + 2420, 2640, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 82 - 1680x720@50Hz 64:27 */ + { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 82500, 1680, 1940, + 1980, 2200, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 83 - 1680x720@60Hz 64:27 */ + { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 1940, + 1980, 2200, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 84 - 1680x720@100Hz 64:27 */ + { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 165000, 1680, 1740, + 1780, 2000, 0, 720, 725, 730, 825, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 85 - 1680x720@120Hz 64:27 */ + { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 198000, 1680, 1740, + 1780, 2000, 0, 720, 725, 730, 825, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 86 - 2560x1080@24Hz 64:27 */ + { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 99000, 2560, 3558, + 3602, 3750, 0, 1080, 1084, 1089, 1100, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 87 - 2560x1080@25Hz 64:27 */ + { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 90000, 2560, 3008, + 3052, 3200, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 88 - 2560x1080@30Hz 64:27 */ + { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 118800, 2560, 3328, + 3372, 3520, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 89 - 2560x1080@50Hz 64:27 */ + { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 185625, 2560, 3108, + 3152, 3300, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 90 - 2560x1080@60Hz 64:27 */ + { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 2808, + 2852, 3000, 0, 1080, 1084, 1089, 1100, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 91 - 2560x1080@100Hz 64:27 */ + { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 371250, 2560, 2778, + 2822, 2970, 0, 1080, 1084, 1089, 1250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 92 - 2560x1080@120Hz 64:27 */ + { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 495000, 2560, 3108, + 3152, 3300, 0, 1080, 1084, 1089, 1250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 93 - 3840x2160@24Hz 16:9 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116, + 5204, 5500, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 94 - 3840x2160@25Hz 16:9 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896, + 4984, 5280, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 95 - 3840x2160@30Hz 16:9 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016, + 4104, 4400, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 96 - 3840x2160@50Hz 16:9 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896, + 4984, 5280, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 97 - 3840x2160@60Hz 16:9 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016, + 4104, 4400, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 98 - 4096x2160@24Hz 256:135 */ + { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5116, + 5204, 5500, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, + /* 99 - 4096x2160@25Hz 256:135 */ + { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064, + 5152, 5280, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, + /* 100 - 4096x2160@30Hz 256:135 */ + { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 4184, + 4272, 4400, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, + /* 101 - 4096x2160@50Hz 256:135 */ + { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064, + 5152, 5280, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, + /* 102 - 4096x2160@60Hz 256:135 */ + { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 4184, + 4272, 4400, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, + /* 103 - 3840x2160@24Hz 64:27 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116, + 5204, 5500, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 104 - 3840x2160@25Hz 64:27 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896, + 4984, 5280, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 105 - 3840x2160@30Hz 64:27 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016, + 4104, 4400, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 106 - 3840x2160@50Hz 64:27 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896, + 4984, 5280, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 107 - 3840x2160@60Hz 64:27 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016, + 4104, 4400, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, +}; + +/* + * HDMI 1.4 4k modes. Index using the VIC. + */ +static const struct drm_display_mode edid_4k_modes[] = { + /* 0 - dummy, VICs start at 1 */ + { }, + /* 1 - 3840x2160@30Hz */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, + 3840, 4016, 4104, 4400, 0, + 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 30, }, + /* 2 - 3840x2160@25Hz */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, + 3840, 4896, 4984, 5280, 0, + 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 25, }, + /* 3 - 3840x2160@24Hz */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, + 3840, 5116, 5204, 5500, 0, + 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, }, + /* 4 - 4096x2160@24Hz (SMPTE) */ + { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, + 4096, 5116, 5204, 5500, 0, + 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, }, +}; + +/*** DDC fetch and block validation ***/ + +static const u8 edid_header[] = { + 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 +}; + +/** + * drm_edid_header_is_valid - sanity check the header of the base EDID block + * @raw_edid: pointer to raw base EDID block + * + * Sanity check the header of the base EDID block. + * + * Return: 8 if the header is perfect, down to 0 if it's totally wrong. + */ +int drm_edid_header_is_valid(const u8 *raw_edid) +{ + int i, score = 0; + + for (i = 0; i < sizeof(edid_header); i++) + if (raw_edid[i] == edid_header[i]) + score++; + + return score; +} +EXPORT_SYMBOL(drm_edid_header_is_valid); + +static int edid_fixup __read_mostly = 6; +module_param_named(edid_fixup, edid_fixup, int, 0400); +MODULE_PARM_DESC(edid_fixup, + "Minimum number of valid EDID header bytes (0-8, default 6)"); + +static void drm_get_displayid(struct drm_connector *connector, + struct edid *edid); +static int validate_displayid(u8 *displayid, int length, int idx); + +static int drm_edid_block_checksum(const u8 *raw_edid) +{ + int i; + u8 csum = 0; + for (i = 0; i < EDID_LENGTH; i++) + csum += raw_edid[i]; + + return csum; +} + +static bool drm_edid_is_zero(const u8 *in_edid, int length) +{ + if (memchr_inv(in_edid, 0, length)) + return false; + + return true; +} + +/** + * drm_edid_block_valid - Sanity check the EDID block (base or extension) + * @raw_edid: pointer to raw EDID block + * @block: type of block to validate (0 for base, extension otherwise) + * @print_bad_edid: if true, dump bad EDID blocks to the console + * @edid_corrupt: if true, the header or checksum is invalid + * + * Validate a base or extension EDID block and optionally dump bad blocks to + * the console. + * + * Return: True if the block is valid, false otherwise. + */ +bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid, + bool *edid_corrupt) +{ + u8 csum; + struct edid *edid = (struct edid *)raw_edid; + + if (WARN_ON(!raw_edid)) + return false; + + if (edid_fixup > 8 || edid_fixup < 0) + edid_fixup = 6; + + if (block == 0) { + int score = drm_edid_header_is_valid(raw_edid); + if (score == 8) { + if (edid_corrupt) + *edid_corrupt = false; + } else if (score >= edid_fixup) { + /* Displayport Link CTS Core 1.2 rev1.1 test 4.2.2.6 + * The corrupt flag needs to be set here otherwise, the + * fix-up code here will correct the problem, the + * checksum is correct and the test fails + */ + if (edid_corrupt) + *edid_corrupt = true; + DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); + memcpy(raw_edid, edid_header, sizeof(edid_header)); + } else { + if (edid_corrupt) + *edid_corrupt = true; + goto bad; + } + } + + csum = drm_edid_block_checksum(raw_edid); + if (csum) { + if (edid_corrupt) + *edid_corrupt = true; + + /* allow CEA to slide through, switches mangle this */ + if (raw_edid[0] == CEA_EXT) { + DRM_DEBUG("EDID checksum is invalid, remainder is %d\n", csum); + DRM_DEBUG("Assuming a KVM switch modified the CEA block but left the original checksum\n"); + } else { + if (print_bad_edid) + DRM_NOTE("EDID checksum is invalid, remainder is %d\n", csum); + + goto bad; + } + } + + /* per-block-type checks */ + switch (raw_edid[0]) { + case 0: /* base */ + if (edid->version != 1) { + DRM_NOTE("EDID has major version %d, instead of 1\n", edid->version); + goto bad; + } + + if (edid->revision > 4) + DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n"); + break; + + default: + break; + } + + return true; + +bad: + if (print_bad_edid) { + if (drm_edid_is_zero(raw_edid, EDID_LENGTH)) { + pr_notice("EDID block is all zeroes\n"); + } else { + pr_notice("Raw EDID:\n"); + print_hex_dump(KERN_NOTICE, + " \t", DUMP_PREFIX_NONE, 16, 1, + raw_edid, EDID_LENGTH, false); + } + } + return false; +} +EXPORT_SYMBOL(drm_edid_block_valid); + +/** + * drm_edid_is_valid - sanity check EDID data + * @edid: EDID data + * + * Sanity-check an entire EDID record (including extensions) + * + * Return: True if the EDID data is valid, false otherwise. + */ +bool drm_edid_is_valid(struct edid *edid) +{ + int i; + u8 *raw = (u8 *)edid; + + if (!edid) + return false; + + for (i = 0; i <= edid->extensions; i++) + if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true, NULL)) + return false; + + return true; +} +EXPORT_SYMBOL(drm_edid_is_valid); + +#define DDC_SEGMENT_ADDR 0x30 +/** + * drm_do_probe_ddc_edid() - get EDID information via I2C + * @data: I2C device adapter + * @buf: EDID data buffer to be filled + * @block: 128 byte EDID block to start fetching from + * @len: EDID data buffer length to fetch + * + * Try to fetch EDID information by calling I2C driver functions. + * + * Return: 0 on success or -1 on failure. + */ +static int +drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len) +{ + struct i2c_adapter *adapter = data; + unsigned char start = block * EDID_LENGTH; + unsigned char segment = block >> 1; + unsigned char xfers = segment ? 3 : 2; + int ret, retries = 5; + + /* + * The core I2C driver will automatically retry the transfer if the + * adapter reports EAGAIN. However, we find that bit-banging transfers + * are susceptible to errors under a heavily loaded machine and + * generate spurious NAKs and timeouts. Retrying the transfer + * of the individual block a few times seems to overcome this. + */ + do { + struct i2c_msg msgs[] = { + { + .addr = DDC_SEGMENT_ADDR, + .flags = 0, + .len = 1, + .buf = &segment, + }, { + .addr = DDC_ADDR, + .flags = 0, + .len = 1, + .buf = &start, + }, { + .addr = DDC_ADDR, + .flags = I2C_M_RD, + .len = len, + .buf = buf, + } + }; + + /* + * Avoid sending the segment addr to not upset non-compliant + * DDC monitors. + */ + ret = i2c_transfer(adapter, &msgs[3 - xfers], xfers); + + if (ret == -ENXIO) { + DRM_DEBUG_KMS("drm: skipping non-existent adapter %s\n", + adapter->name); + break; + } + } while (ret != xfers && --retries); + + return ret == xfers ? 0 : -1; +} + +static void connector_bad_edid(struct drm_connector *connector, + u8 *edid, int num_blocks) +{ + int i; + + if (connector->bad_edid_counter++ && !(drm_debug & DRM_UT_KMS)) + return; + + dev_warn(connector->dev->dev, + "%s: EDID is invalid:\n", + connector->name); + for (i = 0; i < num_blocks; i++) { + u8 *block = edid + i * EDID_LENGTH; + char prefix[20]; + + if (drm_edid_is_zero(block, EDID_LENGTH)) + sprintf(prefix, "\t[%02x] ZERO ", i); + else if (!drm_edid_block_valid(block, i, false, NULL)) + sprintf(prefix, "\t[%02x] BAD ", i); + else + sprintf(prefix, "\t[%02x] GOOD ", i); + + print_hex_dump(KERN_WARNING, + prefix, DUMP_PREFIX_NONE, 16, 1, + block, EDID_LENGTH, false); + } +} + +/* Get override or firmware EDID */ +static struct edid *drm_get_override_edid(struct drm_connector *connector) +{ + struct edid *override = NULL; + + if (connector->override_edid) + override = drm_edid_duplicate(connector->edid_blob_ptr->data); + + if (!override) + override = drm_load_edid_firmware(connector); + + return IS_ERR(override) ? NULL : override; +} + +/** + * drm_add_override_edid_modes - add modes from override/firmware EDID + * @connector: connector we're probing + * + * Add modes from the override/firmware EDID, if available. Only to be used from + * drm_helper_probe_single_connector_modes() as a fallback for when DDC probe + * failed during drm_get_edid() and caused the override/firmware EDID to be + * skipped. + * + * Return: The number of modes added or 0 if we couldn't find any. + */ +int drm_add_override_edid_modes(struct drm_connector *connector) +{ + struct edid *override; + int num_modes = 0; + + override = drm_get_override_edid(connector); + if (override) { + drm_connector_update_edid_property(connector, override); + num_modes = drm_add_edid_modes(connector, override); + kfree(override); + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] adding %d modes via fallback override/firmware EDID\n", + connector->base.id, connector->name, num_modes); + } + + return num_modes; +} +EXPORT_SYMBOL(drm_add_override_edid_modes); + +/** + * drm_do_get_edid - get EDID data using a custom EDID block read function + * @connector: connector we're probing + * @get_edid_block: EDID block read function + * @data: private data passed to the block read function + * + * When the I2C adapter connected to the DDC bus is hidden behind a device that + * exposes a different interface to read EDID blocks this function can be used + * to get EDID data using a custom block read function. + * + * As in the general case the DDC bus is accessible by the kernel at the I2C + * level, drivers must make all reasonable efforts to expose it as an I2C + * adapter and use drm_get_edid() instead of abusing this function. + * + * The EDID may be overridden using debugfs override_edid or firmare EDID + * (drm_load_edid_firmware() and drm.edid_firmware parameter), in this priority + * order. Having either of them bypasses actual EDID reads. + * + * Return: Pointer to valid EDID or NULL if we couldn't find any. + */ +struct edid *drm_do_get_edid(struct drm_connector *connector, + int (*get_edid_block)(void *data, u8 *buf, unsigned int block, + size_t len), + void *data) +{ + int i, j = 0, valid_extensions = 0; + u8 *edid, *new; + struct edid *override; + + override = drm_get_override_edid(connector); + if (override) + return override; + + if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) + return NULL; + + /* base block fetch */ + for (i = 0; i < 4; i++) { + if (get_edid_block(data, edid, 0, EDID_LENGTH)) + goto out; + if (drm_edid_block_valid(edid, 0, false, + &connector->edid_corrupt)) + break; + if (i == 0 && drm_edid_is_zero(edid, EDID_LENGTH)) { + connector->null_edid_counter++; + goto carp; + } + } + if (i == 4) + goto carp; + + /* if there's no extensions, we're done */ + valid_extensions = edid[0x7e]; + if (valid_extensions == 0) + return (struct edid *)edid; + + new = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); + if (!new) + goto out; + edid = new; + + for (j = 1; j <= edid[0x7e]; j++) { + u8 *block = edid + j * EDID_LENGTH; + + for (i = 0; i < 4; i++) { + if (get_edid_block(data, block, j, EDID_LENGTH)) + goto out; + if (drm_edid_block_valid(block, j, false, NULL)) + break; + } + + if (i == 4) + valid_extensions--; + } + + if (valid_extensions != edid[0x7e]) { + u8 *base; + + connector_bad_edid(connector, edid, edid[0x7e] + 1); + + edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions; + edid[0x7e] = valid_extensions; + + new = kmalloc_array(valid_extensions + 1, EDID_LENGTH, + GFP_KERNEL); + if (!new) + goto out; + + base = new; + for (i = 0; i <= edid[0x7e]; i++) { + u8 *block = edid + i * EDID_LENGTH; + + if (!drm_edid_block_valid(block, i, false, NULL)) + continue; + + memcpy(base, block, EDID_LENGTH); + base += EDID_LENGTH; + } + + kfree(edid); + edid = new; + } + + return (struct edid *)edid; + +carp: + connector_bad_edid(connector, edid, 1); +out: + kfree(edid); + return NULL; +} +EXPORT_SYMBOL_GPL(drm_do_get_edid); + +/** + * drm_probe_ddc() - probe DDC presence + * @adapter: I2C adapter to probe + * + * Return: True on success, false on failure. + */ +bool +drm_probe_ddc(struct i2c_adapter *adapter) +{ + unsigned char out; + + return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0); +} +EXPORT_SYMBOL(drm_probe_ddc); + +/** + * drm_get_edid - get EDID data, if available + * @connector: connector we're probing + * @adapter: I2C adapter to use for DDC + * + * Poke the given I2C channel to grab EDID data if possible. If found, + * attach it to the connector. + * + * Return: Pointer to valid EDID or NULL if we couldn't find any. + */ +struct edid *drm_get_edid(struct drm_connector *connector, + struct i2c_adapter *adapter) +{ + struct edid *edid; + + if (connector->force == DRM_FORCE_OFF) + return NULL; + + if (connector->force == DRM_FORCE_UNSPECIFIED && !drm_probe_ddc(adapter)) + return NULL; + + edid = drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter); + if (edid) + drm_get_displayid(connector, edid); + return edid; +} +EXPORT_SYMBOL(drm_get_edid); + +#ifdef __linux__ +/** + * drm_get_edid_switcheroo - get EDID data for a vga_switcheroo output + * @connector: connector we're probing + * @adapter: I2C adapter to use for DDC + * + * Wrapper around drm_get_edid() for laptops with dual GPUs using one set of + * outputs. The wrapper adds the requisite vga_switcheroo calls to temporarily + * switch DDC to the GPU which is retrieving EDID. + * + * Return: Pointer to valid EDID or %NULL if we couldn't find any. + */ +struct edid *drm_get_edid_switcheroo(struct drm_connector *connector, + struct i2c_adapter *adapter) +{ + struct pci_dev *pdev = connector->dev->pdev; + struct edid *edid; + + vga_switcheroo_lock_ddc(pdev); + edid = drm_get_edid(connector, adapter); + vga_switcheroo_unlock_ddc(pdev); + + return edid; +} +EXPORT_SYMBOL(drm_get_edid_switcheroo); +#endif + +/** + * drm_edid_duplicate - duplicate an EDID and the extensions + * @edid: EDID to duplicate + * + * Return: Pointer to duplicated EDID or NULL on allocation failure. + */ +struct edid *drm_edid_duplicate(const struct edid *edid) +{ + return kmemdup(edid, (edid->extensions + 1) * EDID_LENGTH, GFP_KERNEL); +} +EXPORT_SYMBOL(drm_edid_duplicate); + +/*** EDID parsing ***/ + +/** + * edid_vendor - match a string against EDID's obfuscated vendor field + * @edid: EDID to match + * @vendor: vendor string + * + * Returns true if @vendor is in @edid, false otherwise + */ +static bool edid_vendor(const struct edid *edid, const char *vendor) +{ + char edid_vendor[3]; + + edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@'; + edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) | + ((edid->mfg_id[1] & 0xe0) >> 5)) + '@'; + edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@'; + + return !strncmp(edid_vendor, vendor, 3); +} + +/** + * edid_get_quirks - return quirk flags for a given EDID + * @edid: EDID to process + * + * This tells subsequent routines what fixes they need to apply. + */ +static u32 edid_get_quirks(const struct edid *edid) +{ + const struct edid_quirk *quirk; + int i; + + for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) { + quirk = &edid_quirk_list[i]; + + if (edid_vendor(edid, quirk->vendor) && + (EDID_PRODUCT_ID(edid) == quirk->product_id)) + return quirk->quirks; + } + + return 0; +} + +#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay) +#define MODE_REFRESH_DIFF(c,t) (abs((c) - (t))) + +/** + * edid_fixup_preferred - set preferred modes based on quirk list + * @connector: has mode list to fix up + * @quirks: quirks list + * + * Walk the mode list for @connector, clearing the preferred status + * on existing modes and setting it anew for the right mode ala @quirks. + */ +static void edid_fixup_preferred(struct drm_connector *connector, + u32 quirks) +{ + struct drm_display_mode *t, *cur_mode, *preferred_mode; + int target_refresh = 0; + int cur_vrefresh, preferred_vrefresh; + + if (list_empty(&connector->probed_modes)) + return; + + if (quirks & EDID_QUIRK_PREFER_LARGE_60) + target_refresh = 60; + if (quirks & EDID_QUIRK_PREFER_LARGE_75) + target_refresh = 75; + + preferred_mode = list_first_entry(&connector->probed_modes, + struct drm_display_mode, head); + + list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) { + cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED; + + if (cur_mode == preferred_mode) + continue; + + /* Largest mode is preferred */ + if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode)) + preferred_mode = cur_mode; + + cur_vrefresh = cur_mode->vrefresh ? + cur_mode->vrefresh : drm_mode_vrefresh(cur_mode); + preferred_vrefresh = preferred_mode->vrefresh ? + preferred_mode->vrefresh : drm_mode_vrefresh(preferred_mode); + /* At a given size, try to get closest to target refresh */ + if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) && + MODE_REFRESH_DIFF(cur_vrefresh, target_refresh) < + MODE_REFRESH_DIFF(preferred_vrefresh, target_refresh)) { + preferred_mode = cur_mode; + } + } + + preferred_mode->type |= DRM_MODE_TYPE_PREFERRED; +} + +static bool +mode_is_rb(const struct drm_display_mode *mode) +{ + return (mode->htotal - mode->hdisplay == 160) && + (mode->hsync_end - mode->hdisplay == 80) && + (mode->hsync_end - mode->hsync_start == 32) && + (mode->vsync_start - mode->vdisplay == 3); +} + +/* + * drm_mode_find_dmt - Create a copy of a mode if present in DMT + * @dev: Device to duplicate against + * @hsize: Mode width + * @vsize: Mode height + * @fresh: Mode refresh rate + * @rb: Mode reduced-blanking-ness + * + * Walk the DMT mode list looking for a match for the given parameters. + * + * Return: A newly allocated copy of the mode, or NULL if not found. + */ +struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, + int hsize, int vsize, int fresh, + bool rb) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) { + const struct drm_display_mode *ptr = &drm_dmt_modes[i]; + if (hsize != ptr->hdisplay) + continue; + if (vsize != ptr->vdisplay) + continue; + if (fresh != drm_mode_vrefresh(ptr)) + continue; + if (rb != mode_is_rb(ptr)) + continue; + + return drm_mode_duplicate(dev, ptr); + } + + return NULL; +} +EXPORT_SYMBOL(drm_mode_find_dmt); + +typedef void detailed_cb(struct detailed_timing *timing, void *closure); + +static void +cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure) +{ + int i, n = 0; + u8 d = ext[0x02]; + u8 *det_base = ext + d; + + n = (127 - d) / 18; + for (i = 0; i < n; i++) + cb((struct detailed_timing *)(det_base + 18 * i), closure); +} + +static void +vtb_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure) +{ + unsigned int i, n = min((int)ext[0x02], 6); + u8 *det_base = ext + 5; + + if (ext[0x01] != 1) + return; /* unknown version */ + + for (i = 0; i < n; i++) + cb((struct detailed_timing *)(det_base + 18 * i), closure); +} + +static void +drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure) +{ + int i; + struct edid *edid = (struct edid *)raw_edid; + + if (edid == NULL) + return; + + for (i = 0; i < EDID_DETAILED_TIMINGS; i++) + cb(&(edid->detailed_timings[i]), closure); + + for (i = 1; i <= raw_edid[0x7e]; i++) { + u8 *ext = raw_edid + (i * EDID_LENGTH); + switch (*ext) { + case CEA_EXT: + cea_for_each_detailed_block(ext, cb, closure); + break; + case VTB_EXT: + vtb_for_each_detailed_block(ext, cb, closure); + break; + default: + break; + } + } +} + +static void +is_rb(struct detailed_timing *t, void *data) +{ + u8 *r = (u8 *)t; + if (r[3] == EDID_DETAIL_MONITOR_RANGE) + if (r[15] & 0x10) + *(bool *)data = true; +} + +/* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */ +static bool +drm_monitor_supports_rb(struct edid *edid) +{ + if (edid->revision >= 4) { + bool ret = false; + drm_for_each_detailed_block((u8 *)edid, is_rb, &ret); + return ret; + } + + return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0); +} + +static void +find_gtf2(struct detailed_timing *t, void *data) +{ + u8 *r = (u8 *)t; + if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02) + *(u8 **)data = r; +} + +/* Secondary GTF curve kicks in above some break frequency */ +static int +drm_gtf2_hbreak(struct edid *edid) +{ + u8 *r = NULL; + drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); + return r ? (r[12] * 2) : 0; +} + +static int +drm_gtf2_2c(struct edid *edid) +{ + u8 *r = NULL; + drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); + return r ? r[13] : 0; +} + +static int +drm_gtf2_m(struct edid *edid) +{ + u8 *r = NULL; + drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); + return r ? (r[15] << 8) + r[14] : 0; +} + +static int +drm_gtf2_k(struct edid *edid) +{ + u8 *r = NULL; + drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); + return r ? r[16] : 0; +} + +static int +drm_gtf2_2j(struct edid *edid) +{ + u8 *r = NULL; + drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); + return r ? r[17] : 0; +} + +/** + * standard_timing_level - get std. timing level(CVT/GTF/DMT) + * @edid: EDID block to scan + */ +static int standard_timing_level(struct edid *edid) +{ + if (edid->revision >= 2) { + if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)) + return LEVEL_CVT; + if (drm_gtf2_hbreak(edid)) + return LEVEL_GTF2; + return LEVEL_GTF; + } + return LEVEL_DMT; +} + +/* + * 0 is reserved. The spec says 0x01 fill for unused timings. Some old + * monitors fill with ascii space (0x20) instead. + */ +static int +bad_std_timing(u8 a, u8 b) +{ + return (a == 0x00 && b == 0x00) || + (a == 0x01 && b == 0x01) || + (a == 0x20 && b == 0x20); +} + +/** + * drm_mode_std - convert standard mode info (width, height, refresh) into mode + * @connector: connector of for the EDID block + * @edid: EDID block to scan + * @t: standard timing params + * + * Take the standard timing params (in this case width, aspect, and refresh) + * and convert them into a real mode using CVT/GTF/DMT. + */ +static struct drm_display_mode * +drm_mode_std(struct drm_connector *connector, struct edid *edid, + struct std_timing *t) +{ + struct drm_device *dev = connector->dev; + struct drm_display_mode *m, *mode = NULL; + int hsize, vsize; + int vrefresh_rate; + unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK) + >> EDID_TIMING_ASPECT_SHIFT; + unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK) + >> EDID_TIMING_VFREQ_SHIFT; + int timing_level = standard_timing_level(edid); + + if (bad_std_timing(t->hsize, t->vfreq_aspect)) + return NULL; + + /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */ + hsize = t->hsize * 8 + 248; + /* vrefresh_rate = vfreq + 60 */ + vrefresh_rate = vfreq + 60; + /* the vdisplay is calculated based on the aspect ratio */ + if (aspect_ratio == 0) { + if (edid->revision < 3) + vsize = hsize; + else + vsize = (hsize * 10) / 16; + } else if (aspect_ratio == 1) + vsize = (hsize * 3) / 4; + else if (aspect_ratio == 2) + vsize = (hsize * 4) / 5; + else + vsize = (hsize * 9) / 16; + + /* HDTV hack, part 1 */ + if (vrefresh_rate == 60 && + ((hsize == 1360 && vsize == 765) || + (hsize == 1368 && vsize == 769))) { + hsize = 1366; + vsize = 768; + } + + /* + * If this connector already has a mode for this size and refresh + * rate (because it came from detailed or CVT info), use that + * instead. This way we don't have to guess at interlace or + * reduced blanking. + */ + list_for_each_entry(m, &connector->probed_modes, head) + if (m->hdisplay == hsize && m->vdisplay == vsize && + drm_mode_vrefresh(m) == vrefresh_rate) + return NULL; + + /* HDTV hack, part 2 */ + if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) { + mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0, + false); + if (!mode) + return NULL; + mode->hdisplay = 1366; + mode->hsync_start = mode->hsync_start - 1; + mode->hsync_end = mode->hsync_end - 1; + return mode; + } + + /* check whether it can be found in default mode table */ + if (drm_monitor_supports_rb(edid)) { + mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, + true); + if (mode) + return mode; + } + mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, false); + if (mode) + return mode; + + /* okay, generate it */ + switch (timing_level) { + case LEVEL_DMT: + break; + case LEVEL_GTF: + mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); + break; + case LEVEL_GTF2: + /* + * This is potentially wrong if there's ever a monitor with + * more than one ranges section, each claiming a different + * secondary GTF curve. Please don't do that. + */ + mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); + if (!mode) + return NULL; + if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) { + drm_mode_destroy(dev, mode); + mode = drm_gtf_mode_complex(dev, hsize, vsize, + vrefresh_rate, 0, 0, + drm_gtf2_m(edid), + drm_gtf2_2c(edid), + drm_gtf2_k(edid), + drm_gtf2_2j(edid)); + } + break; + case LEVEL_CVT: + mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0, + false); + break; + } + return mode; +} + +/* + * EDID is delightfully ambiguous about how interlaced modes are to be + * encoded. Our internal representation is of frame height, but some + * HDTV detailed timings are encoded as field height. + * + * The format list here is from CEA, in frame size. Technically we + * should be checking refresh rate too. Whatever. + */ +static void +drm_mode_do_interlace_quirk(struct drm_display_mode *mode, + struct detailed_pixel_timing *pt) +{ + int i; + static const struct { + int w, h; + } cea_interlaced[] = { + { 1920, 1080 }, + { 720, 480 }, + { 1440, 480 }, + { 2880, 480 }, + { 720, 576 }, + { 1440, 576 }, + { 2880, 576 }, + }; + + if (!(pt->misc & DRM_EDID_PT_INTERLACED)) + return; + + for (i = 0; i < ARRAY_SIZE(cea_interlaced); i++) { + if ((mode->hdisplay == cea_interlaced[i].w) && + (mode->vdisplay == cea_interlaced[i].h / 2)) { + mode->vdisplay *= 2; + mode->vsync_start *= 2; + mode->vsync_end *= 2; + mode->vtotal *= 2; + mode->vtotal |= 1; + } + } + + mode->flags |= DRM_MODE_FLAG_INTERLACE; +} + +/** + * drm_mode_detailed - create a new mode from an EDID detailed timing section + * @dev: DRM device (needed to create new mode) + * @edid: EDID block + * @timing: EDID detailed timing info + * @quirks: quirks to apply + * + * An EDID detailed timing block contains enough info for us to create and + * return a new struct drm_display_mode. + */ +static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, + struct edid *edid, + struct detailed_timing *timing, + u32 quirks) +{ + struct drm_display_mode *mode; + struct detailed_pixel_timing *pt = &timing->data.pixel_data; + unsigned hactive = (pt->hactive_hblank_hi & 0xf0) << 4 | pt->hactive_lo; + unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo; + unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo; + unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo; + unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo; + unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo; + unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4; + unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf); + + /* ignore tiny modes */ + if (hactive < 64 || vactive < 64) + return NULL; + + if (pt->misc & DRM_EDID_PT_STEREO) { + DRM_DEBUG_KMS("stereo mode not supported\n"); + return NULL; + } + if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) { + DRM_DEBUG_KMS("composite sync not supported\n"); + } + + /* it is incorrect if hsync/vsync width is zero */ + if (!hsync_pulse_width || !vsync_pulse_width) { + DRM_DEBUG_KMS("Incorrect Detailed timing. " + "Wrong Hsync/Vsync pulse width\n"); + return NULL; + } + + if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) { + mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false); + if (!mode) + return NULL; + + goto set_size; + } + + mode = drm_mode_create(dev); + if (!mode) + return NULL; + + if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH) + timing->pixel_clock = cpu_to_le16(1088); + + mode->clock = le16_to_cpu(timing->pixel_clock) * 10; + + mode->hdisplay = hactive; + mode->hsync_start = mode->hdisplay + hsync_offset; + mode->hsync_end = mode->hsync_start + hsync_pulse_width; + mode->htotal = mode->hdisplay + hblank; + + mode->vdisplay = vactive; + mode->vsync_start = mode->vdisplay + vsync_offset; + mode->vsync_end = mode->vsync_start + vsync_pulse_width; + mode->vtotal = mode->vdisplay + vblank; + + /* Some EDIDs have bogus h/vtotal values */ + if (mode->hsync_end > mode->htotal) + mode->htotal = mode->hsync_end + 1; + if (mode->vsync_end > mode->vtotal) + mode->vtotal = mode->vsync_end + 1; + + drm_mode_do_interlace_quirk(mode, pt); + + if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { + pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; + } + + mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ? + DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC; + mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ? + DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; + +set_size: + mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4; + mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8; + + if (quirks & EDID_QUIRK_DETAILED_IN_CM) { + mode->width_mm *= 10; + mode->height_mm *= 10; + } + + if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) { + mode->width_mm = edid->width_cm * 10; + mode->height_mm = edid->height_cm * 10; + } + + mode->type = DRM_MODE_TYPE_DRIVER; + mode->vrefresh = drm_mode_vrefresh(mode); + drm_mode_set_name(mode); + + return mode; +} + +static bool +mode_in_hsync_range(const struct drm_display_mode *mode, + struct edid *edid, u8 *t) +{ + int hsync, hmin, hmax; + + hmin = t[7]; + if (edid->revision >= 4) + hmin += ((t[4] & 0x04) ? 255 : 0); + hmax = t[8]; + if (edid->revision >= 4) + hmax += ((t[4] & 0x08) ? 255 : 0); + hsync = drm_mode_hsync(mode); + + return (hsync <= hmax && hsync >= hmin); +} + +static bool +mode_in_vsync_range(const struct drm_display_mode *mode, + struct edid *edid, u8 *t) +{ + int vsync, vmin, vmax; + + vmin = t[5]; + if (edid->revision >= 4) + vmin += ((t[4] & 0x01) ? 255 : 0); + vmax = t[6]; + if (edid->revision >= 4) + vmax += ((t[4] & 0x02) ? 255 : 0); + vsync = drm_mode_vrefresh(mode); + + return (vsync <= vmax && vsync >= vmin); +} + +static u32 +range_pixel_clock(struct edid *edid, u8 *t) +{ + /* unspecified */ + if (t[9] == 0 || t[9] == 255) + return 0; + + /* 1.4 with CVT support gives us real precision, yay */ + if (edid->revision >= 4 && t[10] == 0x04) + return (t[9] * 10000) - ((t[12] >> 2) * 250); + + /* 1.3 is pathetic, so fuzz up a bit */ + return t[9] * 10000 + 5001; +} + +static bool +mode_in_range(const struct drm_display_mode *mode, struct edid *edid, + struct detailed_timing *timing) +{ + u32 max_clock; + u8 *t = (u8 *)timing; + + if (!mode_in_hsync_range(mode, edid, t)) + return false; + + if (!mode_in_vsync_range(mode, edid, t)) + return false; + + if ((max_clock = range_pixel_clock(edid, t))) + if (mode->clock > max_clock) + return false; + + /* 1.4 max horizontal check */ + if (edid->revision >= 4 && t[10] == 0x04) + if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3)))) + return false; + + if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid)) + return false; + + return true; +} + +static bool valid_inferred_mode(const struct drm_connector *connector, + const struct drm_display_mode *mode) +{ + const struct drm_display_mode *m; + bool ok = false; + + list_for_each_entry(m, &connector->probed_modes, head) { + if (mode->hdisplay == m->hdisplay && + mode->vdisplay == m->vdisplay && + drm_mode_vrefresh(mode) == drm_mode_vrefresh(m)) + return false; /* duplicated */ + if (mode->hdisplay <= m->hdisplay && + mode->vdisplay <= m->vdisplay) + ok = true; + } + return ok; +} + +static int +drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid, + struct detailed_timing *timing) +{ + int i, modes = 0; + struct drm_display_mode *newmode; + struct drm_device *dev = connector->dev; + + for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) { + if (mode_in_range(drm_dmt_modes + i, edid, timing) && + valid_inferred_mode(connector, drm_dmt_modes + i)) { + newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]); + if (newmode) { + drm_mode_probed_add(connector, newmode); + modes++; + } + } + } + + return modes; +} + +/* fix up 1366x768 mode from 1368x768; + * GFT/CVT can't express 1366 width which isn't dividable by 8 + */ +void drm_mode_fixup_1366x768(struct drm_display_mode *mode) +{ + if (mode->hdisplay == 1368 && mode->vdisplay == 768) { + mode->hdisplay = 1366; + mode->hsync_start--; + mode->hsync_end--; + drm_mode_set_name(mode); + } +} + +static int +drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid, + struct detailed_timing *timing) +{ + int i, modes = 0; + struct drm_display_mode *newmode; + struct drm_device *dev = connector->dev; + + for (i = 0; i < ARRAY_SIZE(extra_modes); i++) { + const struct minimode *m = &extra_modes[i]; + newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0); + if (!newmode) + return modes; + + drm_mode_fixup_1366x768(newmode); + if (!mode_in_range(newmode, edid, timing) || + !valid_inferred_mode(connector, newmode)) { + drm_mode_destroy(dev, newmode); + continue; + } + + drm_mode_probed_add(connector, newmode); + modes++; + } + + return modes; +} + +static int +drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid, + struct detailed_timing *timing) +{ + int i, modes = 0; + struct drm_display_mode *newmode; + struct drm_device *dev = connector->dev; + bool rb = drm_monitor_supports_rb(edid); + + for (i = 0; i < ARRAY_SIZE(extra_modes); i++) { + const struct minimode *m = &extra_modes[i]; + newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0); + if (!newmode) + return modes; + + drm_mode_fixup_1366x768(newmode); + if (!mode_in_range(newmode, edid, timing) || + !valid_inferred_mode(connector, newmode)) { + drm_mode_destroy(dev, newmode); + continue; + } + + drm_mode_probed_add(connector, newmode); + modes++; + } + + return modes; +} + +static void +do_inferred_modes(struct detailed_timing *timing, void *c) +{ + struct detailed_mode_closure *closure = c; + struct detailed_non_pixel *data = &timing->data.other_data; + struct detailed_data_monitor_range *range = &data->data.range; + + if (data->type != EDID_DETAIL_MONITOR_RANGE) + return; + + closure->modes += drm_dmt_modes_for_range(closure->connector, + closure->edid, + timing); + + if (!version_greater(closure->edid, 1, 1)) + return; /* GTF not defined yet */ + + switch (range->flags) { + case 0x02: /* secondary gtf, XXX could do more */ + case 0x00: /* default gtf */ + closure->modes += drm_gtf_modes_for_range(closure->connector, + closure->edid, + timing); + break; + case 0x04: /* cvt, only in 1.4+ */ + if (!version_greater(closure->edid, 1, 3)) + break; + + closure->modes += drm_cvt_modes_for_range(closure->connector, + closure->edid, + timing); + break; + case 0x01: /* just the ranges, no formula */ + default: + break; + } +} + +static int +add_inferred_modes(struct drm_connector *connector, struct edid *edid) +{ + struct detailed_mode_closure closure = { + .connector = connector, + .edid = edid, + }; + + if (version_greater(edid, 1, 0)) + drm_for_each_detailed_block((u8 *)edid, do_inferred_modes, + &closure); + + return closure.modes; +} + +static int +drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing) +{ + int i, j, m, modes = 0; + struct drm_display_mode *mode; + u8 *est = ((u8 *)timing) + 6; + + for (i = 0; i < 6; i++) { + for (j = 7; j >= 0; j--) { + m = (i * 8) + (7 - j); + if (m >= ARRAY_SIZE(est3_modes)) + break; + if (est[i] & (1 << j)) { + mode = drm_mode_find_dmt(connector->dev, + est3_modes[m].w, + est3_modes[m].h, + est3_modes[m].r, + est3_modes[m].rb); + if (mode) { + drm_mode_probed_add(connector, mode); + modes++; + } + } + } + } + + return modes; +} + +static void +do_established_modes(struct detailed_timing *timing, void *c) +{ + struct detailed_mode_closure *closure = c; + struct detailed_non_pixel *data = &timing->data.other_data; + + if (data->type == EDID_DETAIL_EST_TIMINGS) + closure->modes += drm_est3_modes(closure->connector, timing); +} + +/** + * add_established_modes - get est. modes from EDID and add them + * @connector: connector to add mode(s) to + * @edid: EDID block to scan + * + * Each EDID block contains a bitmap of the supported "established modes" list + * (defined above). Tease them out and add them to the global modes list. + */ +static int +add_established_modes(struct drm_connector *connector, struct edid *edid) +{ + struct drm_device *dev = connector->dev; + unsigned long est_bits = edid->established_timings.t1 | + (edid->established_timings.t2 << 8) | + ((edid->established_timings.mfg_rsvd & 0x80) << 9); + int i, modes = 0; + struct detailed_mode_closure closure = { + .connector = connector, + .edid = edid, + }; + + for (i = 0; i <= EDID_EST_TIMINGS; i++) { + if (est_bits & (1<data.other_data; + struct drm_connector *connector = closure->connector; + struct edid *edid = closure->edid; + + if (data->type == EDID_DETAIL_STD_MODES) { + int i; + for (i = 0; i < 6; i++) { + struct std_timing *std; + struct drm_display_mode *newmode; + + std = &data->data.timings[i]; + newmode = drm_mode_std(connector, edid, std); + if (newmode) { + drm_mode_probed_add(connector, newmode); + closure->modes++; + } + } + } +} + +/** + * add_standard_modes - get std. modes from EDID and add them + * @connector: connector to add mode(s) to + * @edid: EDID block to scan + * + * Standard modes can be calculated using the appropriate standard (DMT, + * GTF or CVT. Grab them from @edid and add them to the list. + */ +static int +add_standard_modes(struct drm_connector *connector, struct edid *edid) +{ + int i, modes = 0; + struct detailed_mode_closure closure = { + .connector = connector, + .edid = edid, + }; + + for (i = 0; i < EDID_STD_TIMINGS; i++) { + struct drm_display_mode *newmode; + + newmode = drm_mode_std(connector, edid, + &edid->standard_timings[i]); + if (newmode) { + drm_mode_probed_add(connector, newmode); + modes++; + } + } + + if (version_greater(edid, 1, 0)) + drm_for_each_detailed_block((u8 *)edid, do_standard_modes, + &closure); + + /* XXX should also look for standard codes in VTB blocks */ + + return modes + closure.modes; +} + +static int drm_cvt_modes(struct drm_connector *connector, + struct detailed_timing *timing) +{ + int i, j, modes = 0; + struct drm_display_mode *newmode; + struct drm_device *dev = connector->dev; + struct cvt_timing *cvt; + const int rates[] = { 60, 85, 75, 60, 50 }; + const u8 empty[3] = { 0, 0, 0 }; + + for (i = 0; i < 4; i++) { + int uninitialized_var(width), height; + cvt = &(timing->data.other_data.data.cvt[i]); + + if (!memcmp(cvt->code, empty, 3)) + continue; + + height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2; + switch (cvt->code[1] & 0x0c) { + case 0x00: + width = height * 4 / 3; + break; + case 0x04: + width = height * 16 / 9; + break; + case 0x08: + width = height * 16 / 10; + break; + case 0x0c: + width = height * 15 / 9; + break; + } + + for (j = 1; j < 5; j++) { + if (cvt->code[2] & (1 << j)) { + newmode = drm_cvt_mode(dev, width, height, + rates[j], j == 0, + false, false); + if (newmode) { + drm_mode_probed_add(connector, newmode); + modes++; + } + } + } + } + + return modes; +} + +static void +do_cvt_mode(struct detailed_timing *timing, void *c) +{ + struct detailed_mode_closure *closure = c; + struct detailed_non_pixel *data = &timing->data.other_data; + + if (data->type == EDID_DETAIL_CVT_3BYTE) + closure->modes += drm_cvt_modes(closure->connector, timing); +} + +static int +add_cvt_modes(struct drm_connector *connector, struct edid *edid) +{ + struct detailed_mode_closure closure = { + .connector = connector, + .edid = edid, + }; + + if (version_greater(edid, 1, 2)) + drm_for_each_detailed_block((u8 *)edid, do_cvt_mode, &closure); + + /* XXX should also look for CVT codes in VTB blocks */ + + return closure.modes; +} + +static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode); + +static void +do_detailed_mode(struct detailed_timing *timing, void *c) +{ + struct detailed_mode_closure *closure = c; + struct drm_display_mode *newmode; + + if (timing->pixel_clock) { + newmode = drm_mode_detailed(closure->connector->dev, + closure->edid, timing, + closure->quirks); + if (!newmode) + return; + + if (closure->preferred) + newmode->type |= DRM_MODE_TYPE_PREFERRED; + + /* + * Detailed modes are limited to 10kHz pixel clock resolution, + * so fix up anything that looks like CEA/HDMI mode, but the clock + * is just slightly off. + */ + fixup_detailed_cea_mode_clock(newmode); + + drm_mode_probed_add(closure->connector, newmode); + closure->modes++; + closure->preferred = false; + } +} + +/* + * add_detailed_modes - Add modes from detailed timings + * @connector: attached connector + * @edid: EDID block to scan + * @quirks: quirks to apply + */ +static int +add_detailed_modes(struct drm_connector *connector, struct edid *edid, + u32 quirks) +{ + struct detailed_mode_closure closure = { + .connector = connector, + .edid = edid, + .preferred = true, + .quirks = quirks, + }; + + if (closure.preferred && !version_greater(edid, 1, 3)) + closure.preferred = + (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING); + + drm_for_each_detailed_block((u8 *)edid, do_detailed_mode, &closure); + + return closure.modes; +} + +#define AUDIO_BLOCK 0x01 +#define VIDEO_BLOCK 0x02 +#define VENDOR_BLOCK 0x03 +#define SPEAKER_BLOCK 0x04 +#define HDR_STATIC_METADATA_BLOCK 0x6 +#define USE_EXTENDED_TAG 0x07 +#define EXT_VIDEO_CAPABILITY_BLOCK 0x00 +#define EXT_VIDEO_DATA_BLOCK_420 0x0E +#define EXT_VIDEO_CAP_BLOCK_Y420CMDB 0x0F +#define EDID_BASIC_AUDIO (1 << 6) +#define EDID_CEA_YCRCB444 (1 << 5) +#define EDID_CEA_YCRCB422 (1 << 4) +#define EDID_CEA_VCDB_QS (1 << 6) + +/* + * Search EDID for CEA extension block. + */ +static u8 *drm_find_edid_extension(const struct edid *edid, int ext_id) +{ + u8 *edid_ext = NULL; + int i; + + /* No EDID or EDID extensions */ + if (edid == NULL || edid->extensions == 0) + return NULL; + + /* Find CEA extension */ + for (i = 0; i < edid->extensions; i++) { + edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1); + if (edid_ext[0] == ext_id) + break; + } + + if (i == edid->extensions) + return NULL; + + return edid_ext; +} + + +static u8 *drm_find_displayid_extension(const struct edid *edid) +{ + return drm_find_edid_extension(edid, DISPLAYID_EXT); +} + +static u8 *drm_find_cea_extension(const struct edid *edid) +{ + int ret; + int idx = 1; + int length = EDID_LENGTH; + struct displayid_block *block; + u8 *cea; + u8 *displayid; + + /* Look for a top level CEA extension block */ + cea = drm_find_edid_extension(edid, CEA_EXT); + if (cea) + return cea; + + /* CEA blocks can also be found embedded in a DisplayID block */ + displayid = drm_find_displayid_extension(edid); + if (!displayid) + return NULL; + + ret = validate_displayid(displayid, length, idx); + if (ret) + return NULL; + + idx += sizeof(struct displayid_hdr); + for_each_displayid_db(displayid, block, idx, length) { + if (block->tag == DATA_BLOCK_CTA) { + cea = (u8 *)block; + break; + } + } + + return cea; +} + +/* + * Calculate the alternate clock for the CEA mode + * (60Hz vs. 59.94Hz etc.) + */ +static unsigned int +cea_mode_alternate_clock(const struct drm_display_mode *cea_mode) +{ + unsigned int clock = cea_mode->clock; + + if (cea_mode->vrefresh % 6 != 0) + return clock; + + /* + * edid_cea_modes contains the 59.94Hz + * variant for 240 and 480 line modes, + * and the 60Hz variant otherwise. + */ + if (cea_mode->vdisplay == 240 || cea_mode->vdisplay == 480) + clock = DIV_ROUND_CLOSEST(clock * 1001, 1000); + else + clock = DIV_ROUND_CLOSEST(clock * 1000, 1001); + + return clock; +} + +static bool +cea_mode_alternate_timings(u8 vic, struct drm_display_mode *mode) +{ + /* + * For certain VICs the spec allows the vertical + * front porch to vary by one or two lines. + * + * cea_modes[] stores the variant with the shortest + * vertical front porch. We can adjust the mode to + * get the other variants by simply increasing the + * vertical front porch length. + */ +#ifdef __linux__ + BUILD_BUG_ON(edid_cea_modes[8].vtotal != 262 || + edid_cea_modes[9].vtotal != 262 || + edid_cea_modes[12].vtotal != 262 || + edid_cea_modes[13].vtotal != 262 || + edid_cea_modes[23].vtotal != 312 || + edid_cea_modes[24].vtotal != 312 || + edid_cea_modes[27].vtotal != 312 || + edid_cea_modes[28].vtotal != 312); +#endif + + if (((vic == 8 || vic == 9 || + vic == 12 || vic == 13) && mode->vtotal < 263) || + ((vic == 23 || vic == 24 || + vic == 27 || vic == 28) && mode->vtotal < 314)) { + mode->vsync_start++; + mode->vsync_end++; + mode->vtotal++; + + return true; + } + + return false; +} + +static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_match, + unsigned int clock_tolerance) +{ + unsigned int match_flags = DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_FLAGS; + u8 vic; + + if (!to_match->clock) + return 0; + + if (to_match->picture_aspect_ratio) + match_flags |= DRM_MODE_MATCH_ASPECT_RATIO; + + for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) { + struct drm_display_mode cea_mode = edid_cea_modes[vic]; + unsigned int clock1, clock2; + + /* Check both 60Hz and 59.94Hz */ + clock1 = cea_mode.clock; + clock2 = cea_mode_alternate_clock(&cea_mode); + + if (abs(to_match->clock - clock1) > clock_tolerance && + abs(to_match->clock - clock2) > clock_tolerance) + continue; + + do { + if (drm_mode_match(to_match, &cea_mode, match_flags)) + return vic; + } while (cea_mode_alternate_timings(vic, &cea_mode)); + } + + return 0; +} + +/** + * drm_match_cea_mode - look for a CEA mode matching given mode + * @to_match: display mode + * + * Return: The CEA Video ID (VIC) of the mode or 0 if it isn't a CEA-861 + * mode. + */ +u8 drm_match_cea_mode(const struct drm_display_mode *to_match) +{ + unsigned int match_flags = DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_FLAGS; + u8 vic; + + if (!to_match->clock) + return 0; + + if (to_match->picture_aspect_ratio) + match_flags |= DRM_MODE_MATCH_ASPECT_RATIO; + + for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) { + struct drm_display_mode cea_mode = edid_cea_modes[vic]; + unsigned int clock1, clock2; + + /* Check both 60Hz and 59.94Hz */ + clock1 = cea_mode.clock; + clock2 = cea_mode_alternate_clock(&cea_mode); + + if (KHZ2PICOS(to_match->clock) != KHZ2PICOS(clock1) && + KHZ2PICOS(to_match->clock) != KHZ2PICOS(clock2)) + continue; + + do { + if (drm_mode_match(to_match, &cea_mode, match_flags)) + return vic; + } while (cea_mode_alternate_timings(vic, &cea_mode)); + } + + return 0; +} +EXPORT_SYMBOL(drm_match_cea_mode); + +static bool drm_valid_cea_vic(u8 vic) +{ + return vic > 0 && vic < ARRAY_SIZE(edid_cea_modes); +} + +/** + * drm_get_cea_aspect_ratio - get the picture aspect ratio corresponding to + * the input VIC from the CEA mode list + * @video_code: ID given to each of the CEA modes + * + * Returns picture aspect ratio + */ +enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code) +{ + return edid_cea_modes[video_code].picture_aspect_ratio; +} +EXPORT_SYMBOL(drm_get_cea_aspect_ratio); + +/* + * Calculate the alternate clock for HDMI modes (those from the HDMI vendor + * specific block). + * + * It's almost like cea_mode_alternate_clock(), we just need to add an + * exception for the VIC 4 mode (4096x2160@24Hz): no alternate clock for this + * one. + */ +static unsigned int +hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode) +{ + if (hdmi_mode->vdisplay == 4096 && hdmi_mode->hdisplay == 2160) + return hdmi_mode->clock; + + return cea_mode_alternate_clock(hdmi_mode); +} + +static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_match, + unsigned int clock_tolerance) +{ + unsigned int match_flags = DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_FLAGS; + u8 vic; + + if (!to_match->clock) + return 0; + + for (vic = 1; vic < ARRAY_SIZE(edid_4k_modes); vic++) { + const struct drm_display_mode *hdmi_mode = &edid_4k_modes[vic]; + unsigned int clock1, clock2; + + /* Make sure to also match alternate clocks */ + clock1 = hdmi_mode->clock; + clock2 = hdmi_mode_alternate_clock(hdmi_mode); + + if (abs(to_match->clock - clock1) > clock_tolerance && + abs(to_match->clock - clock2) > clock_tolerance) + continue; + + if (drm_mode_match(to_match, hdmi_mode, match_flags)) + return vic; + } + + return 0; +} + +/* + * drm_match_hdmi_mode - look for a HDMI mode matching given mode + * @to_match: display mode + * + * An HDMI mode is one defined in the HDMI vendor specific block. + * + * Returns the HDMI Video ID (VIC) of the mode or 0 if it isn't one. + */ +static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match) +{ + unsigned int match_flags = DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_FLAGS; + u8 vic; + + if (!to_match->clock) + return 0; + + for (vic = 1; vic < ARRAY_SIZE(edid_4k_modes); vic++) { + const struct drm_display_mode *hdmi_mode = &edid_4k_modes[vic]; + unsigned int clock1, clock2; + + /* Make sure to also match alternate clocks */ + clock1 = hdmi_mode->clock; + clock2 = hdmi_mode_alternate_clock(hdmi_mode); + + if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) || + KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) && + drm_mode_match(to_match, hdmi_mode, match_flags)) + return vic; + } + return 0; +} + +static bool drm_valid_hdmi_vic(u8 vic) +{ + return vic > 0 && vic < ARRAY_SIZE(edid_4k_modes); +} + +static int +add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid) +{ + struct drm_device *dev = connector->dev; + struct drm_display_mode *mode, *tmp; + LIST_HEAD(list); + int modes = 0; + + /* Don't add CEA modes if the CEA extension block is missing */ + if (!drm_find_cea_extension(edid)) + return 0; + + /* + * Go through all probed modes and create a new mode + * with the alternate clock for certain CEA modes. + */ + list_for_each_entry(mode, &connector->probed_modes, head) { + const struct drm_display_mode *cea_mode = NULL; + struct drm_display_mode *newmode; + u8 vic = drm_match_cea_mode(mode); + unsigned int clock1, clock2; + + if (drm_valid_cea_vic(vic)) { + cea_mode = &edid_cea_modes[vic]; + clock2 = cea_mode_alternate_clock(cea_mode); + } else { + vic = drm_match_hdmi_mode(mode); + if (drm_valid_hdmi_vic(vic)) { + cea_mode = &edid_4k_modes[vic]; + clock2 = hdmi_mode_alternate_clock(cea_mode); + } + } + + if (!cea_mode) + continue; + + clock1 = cea_mode->clock; + + if (clock1 == clock2) + continue; + + if (mode->clock != clock1 && mode->clock != clock2) + continue; + + newmode = drm_mode_duplicate(dev, cea_mode); + if (!newmode) + continue; + + /* Carry over the stereo flags */ + newmode->flags |= mode->flags & DRM_MODE_FLAG_3D_MASK; + + /* + * The current mode could be either variant. Make + * sure to pick the "other" clock for the new mode. + */ + if (mode->clock != clock1) + newmode->clock = clock1; + else + newmode->clock = clock2; + + list_add_tail(&newmode->head, &list); + } + + list_for_each_entry_safe(mode, tmp, &list, head) { + list_del(&mode->head); + drm_mode_probed_add(connector, mode); + modes++; + } + + return modes; +} + +static u8 svd_to_vic(u8 svd) +{ + /* 0-6 bit vic, 7th bit native mode indicator */ + if ((svd >= 1 && svd <= 64) || (svd >= 129 && svd <= 192)) + return svd & 127; + + return svd; +} + +static struct drm_display_mode * +drm_display_mode_from_vic_index(struct drm_connector *connector, + const u8 *video_db, u8 video_len, + u8 video_index) +{ + struct drm_device *dev = connector->dev; + struct drm_display_mode *newmode; + u8 vic; + + if (video_db == NULL || video_index >= video_len) + return NULL; + + /* CEA modes are numbered 1..127 */ + vic = svd_to_vic(video_db[video_index]); + if (!drm_valid_cea_vic(vic)) + return NULL; + + newmode = drm_mode_duplicate(dev, &edid_cea_modes[vic]); + if (!newmode) + return NULL; + + newmode->vrefresh = 0; + + return newmode; +} + +/* + * do_y420vdb_modes - Parse YCBCR 420 only modes + * @connector: connector corresponding to the HDMI sink + * @svds: start of the data block of CEA YCBCR 420 VDB + * @len: length of the CEA YCBCR 420 VDB + * + * Parse the CEA-861-F YCBCR 420 Video Data Block (Y420VDB) + * which contains modes which can be supported in YCBCR 420 + * output format only. + */ +static int do_y420vdb_modes(struct drm_connector *connector, + const u8 *svds, u8 svds_len) +{ + int modes = 0, i; + struct drm_device *dev = connector->dev; + struct drm_display_info *info = &connector->display_info; + struct drm_hdmi_info *hdmi = &info->hdmi; + + for (i = 0; i < svds_len; i++) { + u8 vic = svd_to_vic(svds[i]); + struct drm_display_mode *newmode; + + if (!drm_valid_cea_vic(vic)) + continue; + + newmode = drm_mode_duplicate(dev, &edid_cea_modes[vic]); + if (!newmode) + break; + bitmap_set(hdmi->y420_vdb_modes, vic, 1); + drm_mode_probed_add(connector, newmode); + modes++; + } + + if (modes > 0) + info->color_formats |= DRM_COLOR_FORMAT_YCRCB420; + return modes; +} + +/* + * drm_add_cmdb_modes - Add a YCBCR 420 mode into bitmap + * @connector: connector corresponding to the HDMI sink + * @vic: CEA vic for the video mode to be added in the map + * + * Makes an entry for a videomode in the YCBCR 420 bitmap + */ +static void +drm_add_cmdb_modes(struct drm_connector *connector, u8 svd) +{ + u8 vic = svd_to_vic(svd); + struct drm_hdmi_info *hdmi = &connector->display_info.hdmi; + + if (!drm_valid_cea_vic(vic)) + return; + + bitmap_set(hdmi->y420_cmdb_modes, vic, 1); +} + +static int +do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len) +{ + int i, modes = 0; + struct drm_hdmi_info *hdmi = &connector->display_info.hdmi; + + for (i = 0; i < len; i++) { + struct drm_display_mode *mode; + mode = drm_display_mode_from_vic_index(connector, db, len, i); + if (mode) { + /* + * YCBCR420 capability block contains a bitmap which + * gives the index of CEA modes from CEA VDB, which + * can support YCBCR 420 sampling output also (apart + * from RGB/YCBCR444 etc). + * For example, if the bit 0 in bitmap is set, + * first mode in VDB can support YCBCR420 output too. + * Add YCBCR420 modes only if sink is HDMI 2.0 capable. + */ + if (i < 64 && hdmi->y420_cmdb_map & (1ULL << i)) + drm_add_cmdb_modes(connector, db[i]); + + drm_mode_probed_add(connector, mode); + modes++; + } + } + + return modes; +} + +struct stereo_mandatory_mode { + int width, height, vrefresh; + unsigned int flags; +}; + +static const struct stereo_mandatory_mode stereo_mandatory_modes[] = { + { 1920, 1080, 24, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM }, + { 1920, 1080, 24, DRM_MODE_FLAG_3D_FRAME_PACKING }, + { 1920, 1080, 50, + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF }, + { 1920, 1080, 60, + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF }, + { 1280, 720, 50, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM }, + { 1280, 720, 50, DRM_MODE_FLAG_3D_FRAME_PACKING }, + { 1280, 720, 60, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM }, + { 1280, 720, 60, DRM_MODE_FLAG_3D_FRAME_PACKING } +}; + +static bool +stereo_match_mandatory(const struct drm_display_mode *mode, + const struct stereo_mandatory_mode *stereo_mode) +{ + unsigned int interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE; + + return mode->hdisplay == stereo_mode->width && + mode->vdisplay == stereo_mode->height && + interlaced == (stereo_mode->flags & DRM_MODE_FLAG_INTERLACE) && + drm_mode_vrefresh(mode) == stereo_mode->vrefresh; +} + +static int add_hdmi_mandatory_stereo_modes(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + const struct drm_display_mode *mode; + struct list_head stereo_modes; + int modes = 0, i; + + INIT_LIST_HEAD(&stereo_modes); + + list_for_each_entry(mode, &connector->probed_modes, head) { + for (i = 0; i < ARRAY_SIZE(stereo_mandatory_modes); i++) { + const struct stereo_mandatory_mode *mandatory; + struct drm_display_mode *new_mode; + + if (!stereo_match_mandatory(mode, + &stereo_mandatory_modes[i])) + continue; + + mandatory = &stereo_mandatory_modes[i]; + new_mode = drm_mode_duplicate(dev, mode); + if (!new_mode) + continue; + + new_mode->flags |= mandatory->flags; + list_add_tail(&new_mode->head, &stereo_modes); + modes++; + } + } + + list_splice_tail(&stereo_modes, &connector->probed_modes); + + return modes; +} + +static int add_hdmi_mode(struct drm_connector *connector, u8 vic) +{ + struct drm_device *dev = connector->dev; + struct drm_display_mode *newmode; + + if (!drm_valid_hdmi_vic(vic)) { + DRM_ERROR("Unknown HDMI VIC: %d\n", vic); + return 0; + } + + newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]); + if (!newmode) + return 0; + + drm_mode_probed_add(connector, newmode); + + return 1; +} + +static int add_3d_struct_modes(struct drm_connector *connector, u16 structure, + const u8 *video_db, u8 video_len, u8 video_index) +{ + struct drm_display_mode *newmode; + int modes = 0; + + if (structure & (1 << 0)) { + newmode = drm_display_mode_from_vic_index(connector, video_db, + video_len, + video_index); + if (newmode) { + newmode->flags |= DRM_MODE_FLAG_3D_FRAME_PACKING; + drm_mode_probed_add(connector, newmode); + modes++; + } + } + if (structure & (1 << 6)) { + newmode = drm_display_mode_from_vic_index(connector, video_db, + video_len, + video_index); + if (newmode) { + newmode->flags |= DRM_MODE_FLAG_3D_TOP_AND_BOTTOM; + drm_mode_probed_add(connector, newmode); + modes++; + } + } + if (structure & (1 << 8)) { + newmode = drm_display_mode_from_vic_index(connector, video_db, + video_len, + video_index); + if (newmode) { + newmode->flags |= DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF; + drm_mode_probed_add(connector, newmode); + modes++; + } + } + + return modes; +} + +/* + * do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block + * @connector: connector corresponding to the HDMI sink + * @db: start of the CEA vendor specific block + * @len: length of the CEA block payload, ie. one can access up to db[len] + * + * Parses the HDMI VSDB looking for modes to add to @connector. This function + * also adds the stereo 3d modes when applicable. + */ +static int +do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len, + const u8 *video_db, u8 video_len) +{ + struct drm_display_info *info = &connector->display_info; + int modes = 0, offset = 0, i, multi_present = 0, multi_len; + u8 vic_len, hdmi_3d_len = 0; + u16 mask; + u16 structure_all; + + if (len < 8) + goto out; + + /* no HDMI_Video_Present */ + if (!(db[8] & (1 << 5))) + goto out; + + /* Latency_Fields_Present */ + if (db[8] & (1 << 7)) + offset += 2; + + /* I_Latency_Fields_Present */ + if (db[8] & (1 << 6)) + offset += 2; + + /* the declared length is not long enough for the 2 first bytes + * of additional video format capabilities */ + if (len < (8 + offset + 2)) + goto out; + + /* 3D_Present */ + offset++; + if (db[8 + offset] & (1 << 7)) { + modes += add_hdmi_mandatory_stereo_modes(connector); + + /* 3D_Multi_present */ + multi_present = (db[8 + offset] & 0x60) >> 5; + } + + offset++; + vic_len = db[8 + offset] >> 5; + hdmi_3d_len = db[8 + offset] & 0x1f; + + for (i = 0; i < vic_len && len >= (9 + offset + i); i++) { + u8 vic; + + vic = db[9 + offset + i]; + modes += add_hdmi_mode(connector, vic); + } + offset += 1 + vic_len; + + if (multi_present == 1) + multi_len = 2; + else if (multi_present == 2) + multi_len = 4; + else + multi_len = 0; + + if (len < (8 + offset + hdmi_3d_len - 1)) + goto out; + + if (hdmi_3d_len < multi_len) + goto out; + + if (multi_present == 1 || multi_present == 2) { + /* 3D_Structure_ALL */ + structure_all = (db[8 + offset] << 8) | db[9 + offset]; + + /* check if 3D_MASK is present */ + if (multi_present == 2) + mask = (db[10 + offset] << 8) | db[11 + offset]; + else + mask = 0xffff; + + for (i = 0; i < 16; i++) { + if (mask & (1 << i)) + modes += add_3d_struct_modes(connector, + structure_all, + video_db, + video_len, i); + } + } + + offset += multi_len; + + for (i = 0; i < (hdmi_3d_len - multi_len); i++) { + int vic_index; + struct drm_display_mode *newmode = NULL; + unsigned int newflag = 0; + bool detail_present; + + detail_present = ((db[8 + offset + i] & 0x0f) > 7); + + if (detail_present && (i + 1 == hdmi_3d_len - multi_len)) + break; + + /* 2D_VIC_order_X */ + vic_index = db[8 + offset + i] >> 4; + + /* 3D_Structure_X */ + switch (db[8 + offset + i] & 0x0f) { + case 0: + newflag = DRM_MODE_FLAG_3D_FRAME_PACKING; + break; + case 6: + newflag = DRM_MODE_FLAG_3D_TOP_AND_BOTTOM; + break; + case 8: + /* 3D_Detail_X */ + if ((db[9 + offset + i] >> 4) == 1) + newflag = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF; + break; + } + + if (newflag != 0) { + newmode = drm_display_mode_from_vic_index(connector, + video_db, + video_len, + vic_index); + + if (newmode) { + newmode->flags |= newflag; + drm_mode_probed_add(connector, newmode); + modes++; + } + } + + if (detail_present) + i++; + } + +out: + if (modes > 0) + info->has_hdmi_infoframe = true; + return modes; +} + +static int +cea_db_payload_len(const u8 *db) +{ + return db[0] & 0x1f; +} + +static int +cea_db_extended_tag(const u8 *db) +{ + return db[1]; +} + +static int +cea_db_tag(const u8 *db) +{ + return db[0] >> 5; +} + +static int +cea_revision(const u8 *cea) +{ + return cea[1]; +} + +static int +cea_db_offsets(const u8 *cea, int *start, int *end) +{ + /* DisplayID CTA extension blocks and top-level CEA EDID + * block header definitions differ in the following bytes: + * 1) Byte 2 of the header specifies length differently, + * 2) Byte 3 is only present in the CEA top level block. + * + * The different definitions for byte 2 follow. + * + * DisplayID CTA extension block defines byte 2 as: + * Number of payload bytes + * + * CEA EDID block defines byte 2 as: + * Byte number (decimal) within this block where the 18-byte + * DTDs begin. If no non-DTD data is present in this extension + * block, the value should be set to 04h (the byte after next). + * If set to 00h, there are no DTDs present in this block and + * no non-DTD data. + */ + if (cea[0] == DATA_BLOCK_CTA) { + *start = 3; + *end = *start + cea[2]; + } else if (cea[0] == CEA_EXT) { + /* Data block offset in CEA extension block */ + *start = 4; + *end = cea[2]; + if (*end == 0) + *end = 127; + if (*end < 4 || *end > 127) + return -ERANGE; + } else { + return -ENOTSUPP; + } + + return 0; +} + +static bool cea_db_is_hdmi_vsdb(const u8 *db) +{ + int hdmi_id; + + if (cea_db_tag(db) != VENDOR_BLOCK) + return false; + + if (cea_db_payload_len(db) < 5) + return false; + + hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16); + + return hdmi_id == HDMI_IEEE_OUI; +} + +static bool cea_db_is_hdmi_forum_vsdb(const u8 *db) +{ + unsigned int oui; + + if (cea_db_tag(db) != VENDOR_BLOCK) + return false; + + if (cea_db_payload_len(db) < 7) + return false; + + oui = db[3] << 16 | db[2] << 8 | db[1]; + + return oui == HDMI_FORUM_IEEE_OUI; +} + +static bool cea_db_is_vcdb(const u8 *db) +{ + if (cea_db_tag(db) != USE_EXTENDED_TAG) + return false; + + if (cea_db_payload_len(db) != 2) + return false; + + if (cea_db_extended_tag(db) != EXT_VIDEO_CAPABILITY_BLOCK) + return false; + + return true; +} + +static bool cea_db_is_y420cmdb(const u8 *db) +{ + if (cea_db_tag(db) != USE_EXTENDED_TAG) + return false; + + if (!cea_db_payload_len(db)) + return false; + + if (cea_db_extended_tag(db) != EXT_VIDEO_CAP_BLOCK_Y420CMDB) + return false; + + return true; +} + +static bool cea_db_is_y420vdb(const u8 *db) +{ + if (cea_db_tag(db) != USE_EXTENDED_TAG) + return false; + + if (!cea_db_payload_len(db)) + return false; + + if (cea_db_extended_tag(db) != EXT_VIDEO_DATA_BLOCK_420) + return false; + + return true; +} + +#define for_each_cea_db(cea, i, start, end) \ + for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1) + +static void drm_parse_y420cmdb_bitmap(struct drm_connector *connector, + const u8 *db) +{ + struct drm_display_info *info = &connector->display_info; + struct drm_hdmi_info *hdmi = &info->hdmi; + u8 map_len = cea_db_payload_len(db) - 1; + u8 count; + u64 map = 0; + + if (map_len == 0) { + /* All CEA modes support ycbcr420 sampling also.*/ + hdmi->y420_cmdb_map = U64_MAX; + info->color_formats |= DRM_COLOR_FORMAT_YCRCB420; + return; + } + + /* + * This map indicates which of the existing CEA block modes + * from VDB can support YCBCR420 output too. So if bit=0 is + * set, first mode from VDB can support YCBCR420 output too. + * We will parse and keep this map, before parsing VDB itself + * to avoid going through the same block again and again. + * + * Spec is not clear about max possible size of this block. + * Clamping max bitmap block size at 8 bytes. Every byte can + * address 8 CEA modes, in this way this map can address + * 8*8 = first 64 SVDs. + */ + if (WARN_ON_ONCE(map_len > 8)) + map_len = 8; + + for (count = 0; count < map_len; count++) + map |= (u64)db[2 + count] << (8 * count); + + if (map) + info->color_formats |= DRM_COLOR_FORMAT_YCRCB420; + + hdmi->y420_cmdb_map = map; +} + +static int +add_cea_modes(struct drm_connector *connector, struct edid *edid) +{ + const u8 *cea = drm_find_cea_extension(edid); + const u8 *db, *hdmi = NULL, *video = NULL; + u8 dbl, hdmi_len, video_len = 0; + int modes = 0; + + if (cea && cea_revision(cea) >= 3) { + int i, start, end; + + if (cea_db_offsets(cea, &start, &end)) + return 0; + + for_each_cea_db(cea, i, start, end) { + db = &cea[i]; + dbl = cea_db_payload_len(db); + + if (cea_db_tag(db) == VIDEO_BLOCK) { + video = db + 1; + video_len = dbl; + modes += do_cea_modes(connector, video, dbl); + } else if (cea_db_is_hdmi_vsdb(db)) { + hdmi = db; + hdmi_len = dbl; + } else if (cea_db_is_y420vdb(db)) { + const u8 *vdb420 = &db[2]; + + /* Add 4:2:0(only) modes present in EDID */ + modes += do_y420vdb_modes(connector, + vdb420, + dbl - 1); + } + } + } + + /* + * We parse the HDMI VSDB after having added the cea modes as we will + * be patching their flags when the sink supports stereo 3D. + */ + if (hdmi) + modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len, video, + video_len); + + return modes; +} + +static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode) +{ + const struct drm_display_mode *cea_mode; + int clock1, clock2, clock; + u8 vic; + const char *type; + + /* + * allow 5kHz clock difference either way to account for + * the 10kHz clock resolution limit of detailed timings. + */ + vic = drm_match_cea_mode_clock_tolerance(mode, 5); + if (drm_valid_cea_vic(vic)) { + type = "CEA"; + cea_mode = &edid_cea_modes[vic]; + clock1 = cea_mode->clock; + clock2 = cea_mode_alternate_clock(cea_mode); + } else { + vic = drm_match_hdmi_mode_clock_tolerance(mode, 5); + if (drm_valid_hdmi_vic(vic)) { + type = "HDMI"; + cea_mode = &edid_4k_modes[vic]; + clock1 = cea_mode->clock; + clock2 = hdmi_mode_alternate_clock(cea_mode); + } else { + return; + } + } + + /* pick whichever is closest */ + if (abs(mode->clock - clock1) < abs(mode->clock - clock2)) + clock = clock1; + else + clock = clock2; + + if (mode->clock == clock) + return; + + DRM_DEBUG("detailed mode matches %s VIC %d, adjusting clock %d -> %d\n", + type, vic, mode->clock, clock); + mode->clock = clock; +} + +static bool cea_db_is_hdmi_hdr_metadata_block(const u8 *db) +{ + if (cea_db_tag(db) != USE_EXTENDED_TAG) + return false; + + if (db[1] != HDR_STATIC_METADATA_BLOCK) + return false; + + if (cea_db_payload_len(db) < 3) + return false; + + return true; +} + +static uint8_t eotf_supported(const u8 *edid_ext) +{ + return edid_ext[2] & + (BIT(HDMI_EOTF_TRADITIONAL_GAMMA_SDR) | + BIT(HDMI_EOTF_TRADITIONAL_GAMMA_HDR) | + BIT(HDMI_EOTF_SMPTE_ST2084) | + BIT(HDMI_EOTF_BT_2100_HLG)); +} + +static uint8_t hdr_metadata_type(const u8 *edid_ext) +{ + return edid_ext[3] & + BIT(HDMI_STATIC_METADATA_TYPE1); +} + +static void +drm_parse_hdr_metadata_block(struct drm_connector *connector, const u8 *db) +{ + u16 len; + + len = cea_db_payload_len(db); + + connector->hdr_sink_metadata.hdmi_type1.eotf = + eotf_supported(db); + connector->hdr_sink_metadata.hdmi_type1.metadata_type = + hdr_metadata_type(db); + + if (len >= 4) + connector->hdr_sink_metadata.hdmi_type1.max_cll = db[4]; + if (len >= 5) + connector->hdr_sink_metadata.hdmi_type1.max_fall = db[5]; + if (len >= 6) + connector->hdr_sink_metadata.hdmi_type1.min_cll = db[6]; +} + +static void +drm_parse_hdmi_vsdb_audio(struct drm_connector *connector, const u8 *db) +{ + u8 len = cea_db_payload_len(db); + + if (len >= 6 && (db[6] & (1 << 7))) + connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_SUPPORTS_AI; + if (len >= 8) { + connector->latency_present[0] = db[8] >> 7; + connector->latency_present[1] = (db[8] >> 6) & 1; + } + if (len >= 9) + connector->video_latency[0] = db[9]; + if (len >= 10) + connector->audio_latency[0] = db[10]; + if (len >= 11) + connector->video_latency[1] = db[11]; + if (len >= 12) + connector->audio_latency[1] = db[12]; + + DRM_DEBUG_KMS("HDMI: latency present %d %d, " + "video latency %d %d, " + "audio latency %d %d\n", + connector->latency_present[0], + connector->latency_present[1], + connector->video_latency[0], + connector->video_latency[1], + connector->audio_latency[0], + connector->audio_latency[1]); +} + +static void +monitor_name(struct detailed_timing *t, void *data) +{ + if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME) + *(u8 **)data = t->data.other_data.data.str.str; +} + +static int get_monitor_name(struct edid *edid, char name[13]) +{ + char *edid_name = NULL; + int mnl; + + if (!edid || !name) + return 0; + + drm_for_each_detailed_block((u8 *)edid, monitor_name, &edid_name); + for (mnl = 0; edid_name && mnl < 13; mnl++) { + if (edid_name[mnl] == 0x0a) + break; + + name[mnl] = edid_name[mnl]; + } + + return mnl; +} + +/** + * drm_edid_get_monitor_name - fetch the monitor name from the edid + * @edid: monitor EDID information + * @name: pointer to a character array to hold the name of the monitor + * @bufsize: The size of the name buffer (should be at least 14 chars.) + * + */ +void drm_edid_get_monitor_name(struct edid *edid, char *name, int bufsize) +{ + int name_length; + char buf[13]; + + if (bufsize <= 0) + return; + + name_length = min(get_monitor_name(edid, buf), bufsize - 1); + memcpy(name, buf, name_length); + name[name_length] = '\0'; +} +EXPORT_SYMBOL(drm_edid_get_monitor_name); + +static void clear_eld(struct drm_connector *connector) +{ + memset(connector->eld, 0, sizeof(connector->eld)); + + connector->latency_present[0] = false; + connector->latency_present[1] = false; + connector->video_latency[0] = 0; + connector->audio_latency[0] = 0; + connector->video_latency[1] = 0; + connector->audio_latency[1] = 0; +} + +/* + * drm_edid_to_eld - build ELD from EDID + * @connector: connector corresponding to the HDMI/DP sink + * @edid: EDID to parse + * + * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver. The + * HDCP and Port_ID ELD fields are left for the graphics driver to fill in. + */ +static void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid) +{ + uint8_t *eld = connector->eld; + u8 *cea; + u8 *db; + int total_sad_count = 0; + int mnl; + int dbl; + + clear_eld(connector); + + if (!edid) + return; + + cea = drm_find_cea_extension(edid); + if (!cea) { + DRM_DEBUG_KMS("ELD: no CEA Extension found\n"); + return; + } + + mnl = get_monitor_name(edid, &eld[DRM_ELD_MONITOR_NAME_STRING]); + DRM_DEBUG_KMS("ELD monitor %s\n", &eld[DRM_ELD_MONITOR_NAME_STRING]); + + eld[DRM_ELD_CEA_EDID_VER_MNL] = cea[1] << DRM_ELD_CEA_EDID_VER_SHIFT; + eld[DRM_ELD_CEA_EDID_VER_MNL] |= mnl; + + eld[DRM_ELD_VER] = DRM_ELD_VER_CEA861D; + + eld[DRM_ELD_MANUFACTURER_NAME0] = edid->mfg_id[0]; + eld[DRM_ELD_MANUFACTURER_NAME1] = edid->mfg_id[1]; + eld[DRM_ELD_PRODUCT_CODE0] = edid->prod_code[0]; + eld[DRM_ELD_PRODUCT_CODE1] = edid->prod_code[1]; + + if (cea_revision(cea) >= 3) { + int i, start, end; + + if (cea_db_offsets(cea, &start, &end)) { + start = 0; + end = 0; + } + + for_each_cea_db(cea, i, start, end) { + db = &cea[i]; + dbl = cea_db_payload_len(db); + + switch (cea_db_tag(db)) { + int sad_count; + + case AUDIO_BLOCK: + /* Audio Data Block, contains SADs */ + sad_count = min(dbl / 3, 15 - total_sad_count); + if (sad_count >= 1) + memcpy(&eld[DRM_ELD_CEA_SAD(mnl, total_sad_count)], + &db[1], sad_count * 3); + total_sad_count += sad_count; + break; + case SPEAKER_BLOCK: + /* Speaker Allocation Data Block */ + if (dbl >= 1) + eld[DRM_ELD_SPEAKER] = db[1]; + break; + case VENDOR_BLOCK: + /* HDMI Vendor-Specific Data Block */ + if (cea_db_is_hdmi_vsdb(db)) + drm_parse_hdmi_vsdb_audio(connector, db); + break; + default: + break; + } + } + } + eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= total_sad_count << DRM_ELD_SAD_COUNT_SHIFT; + + if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector->connector_type == DRM_MODE_CONNECTOR_eDP) + eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_CONN_TYPE_DP; + else + eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_CONN_TYPE_HDMI; + + eld[DRM_ELD_BASELINE_ELD_LEN] = + DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4); + + DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", + drm_eld_size(eld), total_sad_count); +} + +/** + * drm_edid_to_sad - extracts SADs from EDID + * @edid: EDID to parse + * @sads: pointer that will be set to the extracted SADs + * + * Looks for CEA EDID block and extracts SADs (Short Audio Descriptors) from it. + * + * Note: The returned pointer needs to be freed using kfree(). + * + * Return: The number of found SADs or negative number on error. + */ +int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads) +{ + int count = 0; + int i, start, end, dbl; + u8 *cea; + + cea = drm_find_cea_extension(edid); + if (!cea) { + DRM_DEBUG_KMS("SAD: no CEA Extension found\n"); + return -ENOENT; + } + + if (cea_revision(cea) < 3) { + DRM_DEBUG_KMS("SAD: wrong CEA revision\n"); + return -ENOTSUPP; + } + + if (cea_db_offsets(cea, &start, &end)) { + DRM_DEBUG_KMS("SAD: invalid data block offsets\n"); + return -EPROTO; + } + + for_each_cea_db(cea, i, start, end) { + u8 *db = &cea[i]; + + if (cea_db_tag(db) == AUDIO_BLOCK) { + int j; + dbl = cea_db_payload_len(db); + + count = dbl / 3; /* SAD is 3B */ + *sads = kcalloc(count, sizeof(**sads), GFP_KERNEL); + if (!*sads) + return -ENOMEM; + for (j = 0; j < count; j++) { + u8 *sad = &db[1 + j * 3]; + + (*sads)[j].format = (sad[0] & 0x78) >> 3; + (*sads)[j].channels = sad[0] & 0x7; + (*sads)[j].freq = sad[1] & 0x7F; + (*sads)[j].byte2 = sad[2]; + } + break; + } + } + + return count; +} +EXPORT_SYMBOL(drm_edid_to_sad); + +/** + * drm_edid_to_speaker_allocation - extracts Speaker Allocation Data Blocks from EDID + * @edid: EDID to parse + * @sadb: pointer to the speaker block + * + * Looks for CEA EDID block and extracts the Speaker Allocation Data Block from it. + * + * Note: The returned pointer needs to be freed using kfree(). + * + * Return: The number of found Speaker Allocation Blocks or negative number on + * error. + */ +int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb) +{ + int count = 0; + int i, start, end, dbl; + const u8 *cea; + + cea = drm_find_cea_extension(edid); + if (!cea) { + DRM_DEBUG_KMS("SAD: no CEA Extension found\n"); + return -ENOENT; + } + + if (cea_revision(cea) < 3) { + DRM_DEBUG_KMS("SAD: wrong CEA revision\n"); + return -ENOTSUPP; + } + + if (cea_db_offsets(cea, &start, &end)) { + DRM_DEBUG_KMS("SAD: invalid data block offsets\n"); + return -EPROTO; + } + + for_each_cea_db(cea, i, start, end) { + const u8 *db = &cea[i]; + + if (cea_db_tag(db) == SPEAKER_BLOCK) { + dbl = cea_db_payload_len(db); + + /* Speaker Allocation Data Block */ + if (dbl == 3) { + *sadb = kmemdup(&db[1], dbl, GFP_KERNEL); + if (!*sadb) + return -ENOMEM; + count = dbl; + break; + } + } + } + + return count; +} +EXPORT_SYMBOL(drm_edid_to_speaker_allocation); + +/** + * drm_av_sync_delay - compute the HDMI/DP sink audio-video sync delay + * @connector: connector associated with the HDMI/DP sink + * @mode: the display mode + * + * Return: The HDMI/DP sink's audio-video sync delay in milliseconds or 0 if + * the sink doesn't support audio or video. + */ +int drm_av_sync_delay(struct drm_connector *connector, + const struct drm_display_mode *mode) +{ + int i = !!(mode->flags & DRM_MODE_FLAG_INTERLACE); + int a, v; + + if (!connector->latency_present[0]) + return 0; + if (!connector->latency_present[1]) + i = 0; + + a = connector->audio_latency[i]; + v = connector->video_latency[i]; + + /* + * HDMI/DP sink doesn't support audio or video? + */ + if (a == 255 || v == 255) + return 0; + + /* + * Convert raw EDID values to millisecond. + * Treat unknown latency as 0ms. + */ + if (a) + a = min(2 * (a - 1), 500); + if (v) + v = min(2 * (v - 1), 500); + + return max(v - a, 0); +} +EXPORT_SYMBOL(drm_av_sync_delay); + +/** + * drm_detect_hdmi_monitor - detect whether monitor is HDMI + * @edid: monitor EDID information + * + * Parse the CEA extension according to CEA-861-B. + * + * Return: True if the monitor is HDMI, false if not or unknown. + */ +bool drm_detect_hdmi_monitor(struct edid *edid) +{ + u8 *edid_ext; + int i; + int start_offset, end_offset; + + edid_ext = drm_find_cea_extension(edid); + if (!edid_ext) + return false; + + if (cea_db_offsets(edid_ext, &start_offset, &end_offset)) + return false; + + /* + * Because HDMI identifier is in Vendor Specific Block, + * search it from all data blocks of CEA extension. + */ + for_each_cea_db(edid_ext, i, start_offset, end_offset) { + if (cea_db_is_hdmi_vsdb(&edid_ext[i])) + return true; + } + + return false; +} +EXPORT_SYMBOL(drm_detect_hdmi_monitor); + +/** + * drm_detect_monitor_audio - check monitor audio capability + * @edid: EDID block to scan + * + * Monitor should have CEA extension block. + * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic + * audio' only. If there is any audio extension block and supported + * audio format, assume at least 'basic audio' support, even if 'basic + * audio' is not defined in EDID. + * + * Return: True if the monitor supports audio, false otherwise. + */ +bool drm_detect_monitor_audio(struct edid *edid) +{ + u8 *edid_ext; + int i, j; + bool has_audio = false; + int start_offset, end_offset; + + edid_ext = drm_find_cea_extension(edid); + if (!edid_ext) + goto end; + + has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0); + + if (has_audio) { + DRM_DEBUG_KMS("Monitor has basic audio support\n"); + goto end; + } + + if (cea_db_offsets(edid_ext, &start_offset, &end_offset)) + goto end; + + for_each_cea_db(edid_ext, i, start_offset, end_offset) { + if (cea_db_tag(&edid_ext[i]) == AUDIO_BLOCK) { + has_audio = true; + for (j = 1; j < cea_db_payload_len(&edid_ext[i]) + 1; j += 3) + DRM_DEBUG_KMS("CEA audio format %d\n", + (edid_ext[i + j] >> 3) & 0xf); + goto end; + } + } +end: + return has_audio; +} +EXPORT_SYMBOL(drm_detect_monitor_audio); + + +/** + * drm_default_rgb_quant_range - default RGB quantization range + * @mode: display mode + * + * Determine the default RGB quantization range for the mode, + * as specified in CEA-861. + * + * Return: The default RGB quantization range for the mode + */ +enum hdmi_quantization_range +drm_default_rgb_quant_range(const struct drm_display_mode *mode) +{ + /* All CEA modes other than VIC 1 use limited quantization range. */ + return drm_match_cea_mode(mode) > 1 ? + HDMI_QUANTIZATION_RANGE_LIMITED : + HDMI_QUANTIZATION_RANGE_FULL; +} +EXPORT_SYMBOL(drm_default_rgb_quant_range); + +static void drm_parse_vcdb(struct drm_connector *connector, const u8 *db) +{ + struct drm_display_info *info = &connector->display_info; + + DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", db[2]); + + if (db[2] & EDID_CEA_VCDB_QS) + info->rgb_quant_range_selectable = true; +} + +static void drm_parse_ycbcr420_deep_color_info(struct drm_connector *connector, + const u8 *db) +{ + u8 dc_mask; + struct drm_hdmi_info *hdmi = &connector->display_info.hdmi; + + dc_mask = db[7] & DRM_EDID_YCBCR420_DC_MASK; + hdmi->y420_dc_modes = dc_mask; +} + +static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector, + const u8 *hf_vsdb) +{ + struct drm_display_info *display = &connector->display_info; + struct drm_hdmi_info *hdmi = &display->hdmi; + + display->has_hdmi_infoframe = true; + + if (hf_vsdb[6] & 0x80) { + hdmi->scdc.supported = true; + if (hf_vsdb[6] & 0x40) + hdmi->scdc.read_request = true; + } + + /* + * All HDMI 2.0 monitors must support scrambling at rates > 340 MHz. + * And as per the spec, three factors confirm this: + * * Availability of a HF-VSDB block in EDID (check) + * * Non zero Max_TMDS_Char_Rate filed in HF-VSDB (let's check) + * * SCDC support available (let's check) + * Lets check it out. + */ + + if (hf_vsdb[5]) { + /* max clock is 5000 KHz times block value */ + u32 max_tmds_clock = hf_vsdb[5] * 5000; + struct drm_scdc *scdc = &hdmi->scdc; + + if (max_tmds_clock > 340000) { + display->max_tmds_clock = max_tmds_clock; + DRM_DEBUG_KMS("HF-VSDB: max TMDS clock %d kHz\n", + display->max_tmds_clock); + } + + if (scdc->supported) { + scdc->scrambling.supported = true; + + /* Few sinks support scrambling for cloks < 340M */ + if ((hf_vsdb[6] & 0x8)) + scdc->scrambling.low_rates = true; + } + } + + drm_parse_ycbcr420_deep_color_info(connector, hf_vsdb); +} + +static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector, + const u8 *hdmi) +{ + struct drm_display_info *info = &connector->display_info; + unsigned int dc_bpc = 0; + + /* HDMI supports at least 8 bpc */ + info->bpc = 8; + + if (cea_db_payload_len(hdmi) < 6) + return; + + if (hdmi[6] & DRM_EDID_HDMI_DC_30) { + dc_bpc = 10; + info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_30; + DRM_DEBUG("%s: HDMI sink does deep color 30.\n", + connector->name); + } + + if (hdmi[6] & DRM_EDID_HDMI_DC_36) { + dc_bpc = 12; + info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_36; + DRM_DEBUG("%s: HDMI sink does deep color 36.\n", + connector->name); + } + + if (hdmi[6] & DRM_EDID_HDMI_DC_48) { + dc_bpc = 16; + info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_48; + DRM_DEBUG("%s: HDMI sink does deep color 48.\n", + connector->name); + } + + if (dc_bpc == 0) { + DRM_DEBUG("%s: No deep color support on this HDMI sink.\n", + connector->name); + return; + } + + DRM_DEBUG("%s: Assigning HDMI sink color depth as %d bpc.\n", + connector->name, dc_bpc); + info->bpc = dc_bpc; + + /* + * Deep color support mandates RGB444 support for all video + * modes and forbids YCRCB422 support for all video modes per + * HDMI 1.3 spec. + */ + info->color_formats = DRM_COLOR_FORMAT_RGB444; + + /* YCRCB444 is optional according to spec. */ + if (hdmi[6] & DRM_EDID_HDMI_DC_Y444) { + info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; + DRM_DEBUG("%s: HDMI sink does YCRCB444 in deep color.\n", + connector->name); + } + + /* + * Spec says that if any deep color mode is supported at all, + * then deep color 36 bit must be supported. + */ + if (!(hdmi[6] & DRM_EDID_HDMI_DC_36)) { + DRM_DEBUG("%s: HDMI sink should do DC_36, but does not!\n", + connector->name); + } +} + +static void +drm_parse_hdmi_vsdb_video(struct drm_connector *connector, const u8 *db) +{ + struct drm_display_info *info = &connector->display_info; + u8 len = cea_db_payload_len(db); + + if (len >= 6) + info->dvi_dual = db[6] & 1; + if (len >= 7) + info->max_tmds_clock = db[7] * 5000; + + DRM_DEBUG_KMS("HDMI: DVI dual %d, " + "max TMDS clock %d kHz\n", + info->dvi_dual, + info->max_tmds_clock); + + drm_parse_hdmi_deep_color_info(connector, db); +} + +static void drm_parse_cea_ext(struct drm_connector *connector, + const struct edid *edid) +{ + struct drm_display_info *info = &connector->display_info; + const u8 *edid_ext; + int i, start, end; + + edid_ext = drm_find_cea_extension(edid); + if (!edid_ext) + return; + + info->cea_rev = edid_ext[1]; + + /* The existence of a CEA block should imply RGB support */ + info->color_formats = DRM_COLOR_FORMAT_RGB444; + if (edid_ext[3] & EDID_CEA_YCRCB444) + info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; + if (edid_ext[3] & EDID_CEA_YCRCB422) + info->color_formats |= DRM_COLOR_FORMAT_YCRCB422; + + if (cea_db_offsets(edid_ext, &start, &end)) + return; + + for_each_cea_db(edid_ext, i, start, end) { + const u8 *db = &edid_ext[i]; + + if (cea_db_is_hdmi_vsdb(db)) + drm_parse_hdmi_vsdb_video(connector, db); + if (cea_db_is_hdmi_forum_vsdb(db)) + drm_parse_hdmi_forum_vsdb(connector, db); + if (cea_db_is_y420cmdb(db)) + drm_parse_y420cmdb_bitmap(connector, db); + if (cea_db_is_vcdb(db)) + drm_parse_vcdb(connector, db); + if (cea_db_is_hdmi_hdr_metadata_block(db)) + drm_parse_hdr_metadata_block(connector, db); + } +} + +/* A connector has no EDID information, so we've got no EDID to compute quirks from. Reset + * all of the values which would have been set from EDID + */ +void +drm_reset_display_info(struct drm_connector *connector) +{ + struct drm_display_info *info = &connector->display_info; + + info->width_mm = 0; + info->height_mm = 0; + + info->bpc = 0; + info->color_formats = 0; + info->cea_rev = 0; + info->max_tmds_clock = 0; + info->dvi_dual = false; + info->has_hdmi_infoframe = false; + info->rgb_quant_range_selectable = false; + memset(&info->hdmi, 0, sizeof(info->hdmi)); + + info->non_desktop = 0; +} + +u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid) +{ + struct drm_display_info *info = &connector->display_info; + + u32 quirks = edid_get_quirks(edid); + + drm_reset_display_info(connector); + + info->width_mm = edid->width_cm * 10; + info->height_mm = edid->height_cm * 10; + + info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP); + + DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop); + + if (edid->revision < 3) + return quirks; + + if (!(edid->input & DRM_EDID_INPUT_DIGITAL)) + return quirks; + + drm_parse_cea_ext(connector, edid); + + /* + * Digital sink with "DFP 1.x compliant TMDS" according to EDID 1.3? + * + * For such displays, the DFP spec 1.0, section 3.10 "EDID support" + * tells us to assume 8 bpc color depth if the EDID doesn't have + * extensions which tell otherwise. + */ + if (info->bpc == 0 && edid->revision == 3 && + edid->input & DRM_EDID_DIGITAL_DFP_1_X) { + info->bpc = 8; + DRM_DEBUG("%s: Assigning DFP sink color depth as %d bpc.\n", + connector->name, info->bpc); + } + + /* Only defined for 1.4 with digital displays */ + if (edid->revision < 4) + return quirks; + + switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) { + case DRM_EDID_DIGITAL_DEPTH_6: + info->bpc = 6; + break; + case DRM_EDID_DIGITAL_DEPTH_8: + info->bpc = 8; + break; + case DRM_EDID_DIGITAL_DEPTH_10: + info->bpc = 10; + break; + case DRM_EDID_DIGITAL_DEPTH_12: + info->bpc = 12; + break; + case DRM_EDID_DIGITAL_DEPTH_14: + info->bpc = 14; + break; + case DRM_EDID_DIGITAL_DEPTH_16: + info->bpc = 16; + break; + case DRM_EDID_DIGITAL_DEPTH_UNDEF: + default: + info->bpc = 0; + break; + } + + DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n", + connector->name, info->bpc); + + info->color_formats |= DRM_COLOR_FORMAT_RGB444; + if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444) + info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; + if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422) + info->color_formats |= DRM_COLOR_FORMAT_YCRCB422; + return quirks; +} + +static int validate_displayid(u8 *displayid, int length, int idx) +{ + int i; + u8 csum = 0; + struct displayid_hdr *base; + + base = (struct displayid_hdr *)&displayid[idx]; + + DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n", + base->rev, base->bytes, base->prod_id, base->ext_count); + + if (base->bytes + 5 > length - idx) + return -EINVAL; + for (i = idx; i <= base->bytes + 5; i++) { + csum += displayid[i]; + } + if (csum) { + DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum); + return -EINVAL; + } + return 0; +} + +static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *dev, + struct displayid_detailed_timings_1 *timings) +{ + struct drm_display_mode *mode; + unsigned pixel_clock = (timings->pixel_clock[0] | + (timings->pixel_clock[1] << 8) | + (timings->pixel_clock[2] << 16)); + unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1; + unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1; + unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1; + unsigned hsync_width = (timings->hsw[0] | timings->hsw[1] << 8) + 1; + unsigned vactive = (timings->vactive[0] | timings->vactive[1] << 8) + 1; + unsigned vblank = (timings->vblank[0] | timings->vblank[1] << 8) + 1; + unsigned vsync = (timings->vsync[0] | (timings->vsync[1] & 0x7f) << 8) + 1; + unsigned vsync_width = (timings->vsw[0] | timings->vsw[1] << 8) + 1; + bool hsync_positive = (timings->hsync[1] >> 7) & 0x1; + bool vsync_positive = (timings->vsync[1] >> 7) & 0x1; + mode = drm_mode_create(dev); + if (!mode) + return NULL; + + mode->clock = pixel_clock * 10; + mode->hdisplay = hactive; + mode->hsync_start = mode->hdisplay + hsync; + mode->hsync_end = mode->hsync_start + hsync_width; + mode->htotal = mode->hdisplay + hblank; + + mode->vdisplay = vactive; + mode->vsync_start = mode->vdisplay + vsync; + mode->vsync_end = mode->vsync_start + vsync_width; + mode->vtotal = mode->vdisplay + vblank; + + mode->flags = 0; + mode->flags |= hsync_positive ? DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC; + mode->flags |= vsync_positive ? DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; + mode->type = DRM_MODE_TYPE_DRIVER; + + if (timings->flags & 0x80) + mode->type |= DRM_MODE_TYPE_PREFERRED; + mode->vrefresh = drm_mode_vrefresh(mode); + drm_mode_set_name(mode); + + return mode; +} + +static int add_displayid_detailed_1_modes(struct drm_connector *connector, + struct displayid_block *block) +{ + struct displayid_detailed_timing_block *det = (struct displayid_detailed_timing_block *)block; + int i; + int num_timings; + struct drm_display_mode *newmode; + int num_modes = 0; + /* blocks must be multiple of 20 bytes length */ + if (block->num_bytes % 20) + return 0; + + num_timings = block->num_bytes / 20; + for (i = 0; i < num_timings; i++) { + struct displayid_detailed_timings_1 *timings = &det->timings[i]; + + newmode = drm_mode_displayid_detailed(connector->dev, timings); + if (!newmode) + continue; + + drm_mode_probed_add(connector, newmode); + num_modes++; + } + return num_modes; +} + +static int add_displayid_detailed_modes(struct drm_connector *connector, + struct edid *edid) +{ + u8 *displayid; + int ret; + int idx = 1; + int length = EDID_LENGTH; + struct displayid_block *block; + int num_modes = 0; + + displayid = drm_find_displayid_extension(edid); + if (!displayid) + return 0; + + ret = validate_displayid(displayid, length, idx); + if (ret) + return 0; + + idx += sizeof(struct displayid_hdr); + for_each_displayid_db(displayid, block, idx, length) { + switch (block->tag) { + case DATA_BLOCK_TYPE_1_DETAILED_TIMING: + num_modes += add_displayid_detailed_1_modes(connector, block); + break; + } + } + return num_modes; +} + +/** + * drm_add_edid_modes - add modes from EDID data, if available + * @connector: connector we're probing + * @edid: EDID data + * + * Add the specified modes to the connector's mode list. Also fills out the + * &drm_display_info structure and ELD in @connector with any information which + * can be derived from the edid. + * + * Return: The number of modes added or 0 if we couldn't find any. + */ +int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) +{ + int num_modes = 0; + u32 quirks; + + if (edid == NULL) { + clear_eld(connector); + return 0; + } + if (!drm_edid_is_valid(edid)) { + clear_eld(connector); + dev_warn(connector->dev->dev, "%s: EDID invalid.\n", + connector->name); + return 0; + } + + drm_edid_to_eld(connector, edid); + + /* + * CEA-861-F adds ycbcr capability map block, for HDMI 2.0 sinks. + * To avoid multiple parsing of same block, lets parse that map + * from sink info, before parsing CEA modes. + */ + quirks = drm_add_display_info(connector, edid); + + /* + * EDID spec says modes should be preferred in this order: + * - preferred detailed mode + * - other detailed modes from base block + * - detailed modes from extension blocks + * - CVT 3-byte code modes + * - standard timing codes + * - established timing codes + * - modes inferred from GTF or CVT range information + * + * We get this pretty much right. + * + * XXX order for additional mode types in extension blocks? + */ + num_modes += add_detailed_modes(connector, edid, quirks); + num_modes += add_cvt_modes(connector, edid); + num_modes += add_standard_modes(connector, edid); + num_modes += add_established_modes(connector, edid); + num_modes += add_cea_modes(connector, edid); + num_modes += add_alternate_cea_modes(connector, edid); + num_modes += add_displayid_detailed_modes(connector, edid); + if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF) + num_modes += add_inferred_modes(connector, edid); + + if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) + edid_fixup_preferred(connector, quirks); + + if (quirks & EDID_QUIRK_FORCE_6BPC) + connector->display_info.bpc = 6; + + if (quirks & EDID_QUIRK_FORCE_8BPC) + connector->display_info.bpc = 8; + + if (quirks & EDID_QUIRK_FORCE_10BPC) + connector->display_info.bpc = 10; + + if (quirks & EDID_QUIRK_FORCE_12BPC) + connector->display_info.bpc = 12; + + return num_modes; +} +EXPORT_SYMBOL(drm_add_edid_modes); + +/** + * drm_add_modes_noedid - add modes for the connectors without EDID + * @connector: connector we're probing + * @hdisplay: the horizontal display limit + * @vdisplay: the vertical display limit + * + * Add the specified modes to the connector's mode list. Only when the + * hdisplay/vdisplay is not beyond the given limit, it will be added. + * + * Return: The number of modes added or 0 if we couldn't find any. + */ +int drm_add_modes_noedid(struct drm_connector *connector, + int hdisplay, int vdisplay) +{ + int i, count, num_modes = 0; + struct drm_display_mode *mode; + struct drm_device *dev = connector->dev; + + count = ARRAY_SIZE(drm_dmt_modes); + if (hdisplay < 0) + hdisplay = 0; + if (vdisplay < 0) + vdisplay = 0; + + for (i = 0; i < count; i++) { + const struct drm_display_mode *ptr = &drm_dmt_modes[i]; + if (hdisplay && vdisplay) { + /* + * Only when two are valid, they will be used to check + * whether the mode should be added to the mode list of + * the connector. + */ + if (ptr->hdisplay > hdisplay || + ptr->vdisplay > vdisplay) + continue; + } + if (drm_mode_vrefresh(ptr) > 61) + continue; + mode = drm_mode_duplicate(dev, ptr); + if (mode) { + drm_mode_probed_add(connector, mode); + num_modes++; + } + } + return num_modes; +} +EXPORT_SYMBOL(drm_add_modes_noedid); + +/** + * drm_set_preferred_mode - Sets the preferred mode of a connector + * @connector: connector whose mode list should be processed + * @hpref: horizontal resolution of preferred mode + * @vpref: vertical resolution of preferred mode + * + * Marks a mode as preferred if it matches the resolution specified by @hpref + * and @vpref. + */ +void drm_set_preferred_mode(struct drm_connector *connector, + int hpref, int vpref) +{ + struct drm_display_mode *mode; + + list_for_each_entry(mode, &connector->probed_modes, head) { + if (mode->hdisplay == hpref && + mode->vdisplay == vpref) + mode->type |= DRM_MODE_TYPE_PREFERRED; + } +} +EXPORT_SYMBOL(drm_set_preferred_mode); + +static bool is_hdmi2_sink(struct drm_connector *connector) +{ + /* + * FIXME: sil-sii8620 doesn't have a connector around when + * we need one, so we have to be prepared for a NULL connector. + */ + if (!connector) + return true; + + return connector->display_info.hdmi.scdc.supported || + connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB420; +} + +static inline bool is_eotf_supported(u8 output_eotf, u8 sink_eotf) +{ + return sink_eotf & BIT(output_eotf); +} + +/** + * drm_hdmi_infoframe_set_hdr_metadata() - fill an HDMI DRM infoframe with + * HDR metadata from userspace + * @frame: HDMI DRM infoframe + * @conn_state: Connector state containing HDR metadata + * + * Return: 0 on success or a negative error code on failure. + */ +int +drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame, + const struct drm_connector_state *conn_state) +{ + struct drm_connector *connector; + struct hdr_output_metadata *hdr_metadata; + int err; + + if (!frame || !conn_state) + return -EINVAL; + + connector = conn_state->connector; + + if (!conn_state->hdr_output_metadata) + return -EINVAL; + + hdr_metadata = conn_state->hdr_output_metadata->data; + + if (!hdr_metadata || !connector) + return -EINVAL; + + /* Sink EOTF is Bit map while infoframe is absolute values */ + if (!is_eotf_supported(hdr_metadata->hdmi_metadata_type1.eotf, + connector->hdr_sink_metadata.hdmi_type1.eotf)) { + DRM_DEBUG_KMS("EOTF Not Supported\n"); + return -EINVAL; + } + + err = hdmi_drm_infoframe_init(frame); + if (err < 0) + return err; + + frame->eotf = hdr_metadata->hdmi_metadata_type1.eotf; + frame->metadata_type = hdr_metadata->hdmi_metadata_type1.metadata_type; + + BUILD_BUG_ON(sizeof(frame->display_primaries) != + sizeof(hdr_metadata->hdmi_metadata_type1.display_primaries)); + BUILD_BUG_ON(sizeof(frame->white_point) != + sizeof(hdr_metadata->hdmi_metadata_type1.white_point)); + + memcpy(&frame->display_primaries, + &hdr_metadata->hdmi_metadata_type1.display_primaries, + sizeof(frame->display_primaries)); + + memcpy(&frame->white_point, + &hdr_metadata->hdmi_metadata_type1.white_point, + sizeof(frame->white_point)); + + frame->max_display_mastering_luminance = + hdr_metadata->hdmi_metadata_type1.max_display_mastering_luminance; + frame->min_display_mastering_luminance = + hdr_metadata->hdmi_metadata_type1.min_display_mastering_luminance; + frame->max_fall = hdr_metadata->hdmi_metadata_type1.max_fall; + frame->max_cll = hdr_metadata->hdmi_metadata_type1.max_cll; + + return 0; +} +EXPORT_SYMBOL(drm_hdmi_infoframe_set_hdr_metadata); + +/** + * drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with + * data from a DRM display mode + * @frame: HDMI AVI infoframe + * @connector: the connector + * @mode: DRM display mode + * + * Return: 0 on success or a negative error code on failure. + */ +int +drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, + struct drm_connector *connector, + const struct drm_display_mode *mode) +{ + enum hdmi_picture_aspect picture_aspect; + int err; + + if (!frame || !mode) + return -EINVAL; + + err = hdmi_avi_infoframe_init(frame); + if (err < 0) + return err; + + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + frame->pixel_repeat = 1; + + frame->video_code = drm_match_cea_mode(mode); + + /* + * HDMI 1.4 VIC range: 1 <= VIC <= 64 (CEA-861-D) but + * HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we + * have to make sure we dont break HDMI 1.4 sinks. + */ + if (!is_hdmi2_sink(connector) && frame->video_code > 64) + frame->video_code = 0; + + /* + * HDMI spec says if a mode is found in HDMI 1.4b 4K modes + * we should send its VIC in vendor infoframes, else send the + * VIC in AVI infoframes. Lets check if this mode is present in + * HDMI 1.4b 4K modes + */ + if (frame->video_code) { + u8 vendor_if_vic = drm_match_hdmi_mode(mode); + bool is_s3d = mode->flags & DRM_MODE_FLAG_3D_MASK; + + if (drm_valid_hdmi_vic(vendor_if_vic) && !is_s3d) + frame->video_code = 0; + } + + frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE; + + /* + * As some drivers don't support atomic, we can't use connector state. + * So just initialize the frame with default values, just the same way + * as it's done with other properties here. + */ + frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS; + frame->itc = 0; + + /* + * Populate picture aspect ratio from either + * user input (if specified) or from the CEA mode list. + */ + picture_aspect = mode->picture_aspect_ratio; + if (picture_aspect == HDMI_PICTURE_ASPECT_NONE) + picture_aspect = drm_get_cea_aspect_ratio(frame->video_code); + + /* + * The infoframe can't convey anything but none, 4:3 + * and 16:9, so if the user has asked for anything else + * we can only satisfy it by specifying the right VIC. + */ + if (picture_aspect > HDMI_PICTURE_ASPECT_16_9) { + if (picture_aspect != + drm_get_cea_aspect_ratio(frame->video_code)) + return -EINVAL; + picture_aspect = HDMI_PICTURE_ASPECT_NONE; + } + + frame->picture_aspect = picture_aspect; + frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE; + frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN; + + return 0; +} +EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode); + +/* HDMI Colorspace Spec Definitions */ +#define FULL_COLORIMETRY_MASK 0x1FF +#define NORMAL_COLORIMETRY_MASK 0x3 +#define EXTENDED_COLORIMETRY_MASK 0x7 +#define EXTENDED_ACE_COLORIMETRY_MASK 0xF + +#define C(x) ((x) << 0) +#define EC(x) ((x) << 2) +#define ACE(x) ((x) << 5) + +#define HDMI_COLORIMETRY_NO_DATA 0x0 +#define HDMI_COLORIMETRY_SMPTE_170M_YCC (C(1) | EC(0) | ACE(0)) +#define HDMI_COLORIMETRY_BT709_YCC (C(2) | EC(0) | ACE(0)) +#define HDMI_COLORIMETRY_XVYCC_601 (C(3) | EC(0) | ACE(0)) +#define HDMI_COLORIMETRY_XVYCC_709 (C(3) | EC(1) | ACE(0)) +#define HDMI_COLORIMETRY_SYCC_601 (C(3) | EC(2) | ACE(0)) +#define HDMI_COLORIMETRY_OPYCC_601 (C(3) | EC(3) | ACE(0)) +#define HDMI_COLORIMETRY_OPRGB (C(3) | EC(4) | ACE(0)) +#define HDMI_COLORIMETRY_BT2020_CYCC (C(3) | EC(5) | ACE(0)) +#define HDMI_COLORIMETRY_BT2020_RGB (C(3) | EC(6) | ACE(0)) +#define HDMI_COLORIMETRY_BT2020_YCC (C(3) | EC(6) | ACE(0)) +#define HDMI_COLORIMETRY_DCI_P3_RGB_D65 (C(3) | EC(7) | ACE(0)) +#define HDMI_COLORIMETRY_DCI_P3_RGB_THEATER (C(3) | EC(7) | ACE(1)) + +static const u32 hdmi_colorimetry_val[] = { + [DRM_MODE_COLORIMETRY_NO_DATA] = HDMI_COLORIMETRY_NO_DATA, + [DRM_MODE_COLORIMETRY_SMPTE_170M_YCC] = HDMI_COLORIMETRY_SMPTE_170M_YCC, + [DRM_MODE_COLORIMETRY_BT709_YCC] = HDMI_COLORIMETRY_BT709_YCC, + [DRM_MODE_COLORIMETRY_XVYCC_601] = HDMI_COLORIMETRY_XVYCC_601, + [DRM_MODE_COLORIMETRY_XVYCC_709] = HDMI_COLORIMETRY_XVYCC_709, + [DRM_MODE_COLORIMETRY_SYCC_601] = HDMI_COLORIMETRY_SYCC_601, + [DRM_MODE_COLORIMETRY_OPYCC_601] = HDMI_COLORIMETRY_OPYCC_601, + [DRM_MODE_COLORIMETRY_OPRGB] = HDMI_COLORIMETRY_OPRGB, + [DRM_MODE_COLORIMETRY_BT2020_CYCC] = HDMI_COLORIMETRY_BT2020_CYCC, + [DRM_MODE_COLORIMETRY_BT2020_RGB] = HDMI_COLORIMETRY_BT2020_RGB, + [DRM_MODE_COLORIMETRY_BT2020_YCC] = HDMI_COLORIMETRY_BT2020_YCC, +}; + +#undef C +#undef EC +#undef ACE + +/** + * drm_hdmi_avi_infoframe_colorspace() - fill the HDMI AVI infoframe + * colorspace information + * @frame: HDMI AVI infoframe + * @conn_state: connector state + */ +void +drm_hdmi_avi_infoframe_colorspace(struct hdmi_avi_infoframe *frame, + const struct drm_connector_state *conn_state) +{ + u32 colorimetry_val; + u32 colorimetry_index = conn_state->colorspace & FULL_COLORIMETRY_MASK; + + if (colorimetry_index >= ARRAY_SIZE(hdmi_colorimetry_val)) + colorimetry_val = HDMI_COLORIMETRY_NO_DATA; + else + colorimetry_val = hdmi_colorimetry_val[colorimetry_index]; + + frame->colorimetry = colorimetry_val & NORMAL_COLORIMETRY_MASK; + /* + * ToDo: Extend it for ACE formats as well. Modify the infoframe + * structure and extend it in drivers/video/hdmi + */ + frame->extended_colorimetry = (colorimetry_val >> 2) & + EXTENDED_COLORIMETRY_MASK; +} +EXPORT_SYMBOL(drm_hdmi_avi_infoframe_colorspace); + +/** + * drm_hdmi_avi_infoframe_quant_range() - fill the HDMI AVI infoframe + * quantization range information + * @frame: HDMI AVI infoframe + * @connector: the connector + * @mode: DRM display mode + * @rgb_quant_range: RGB quantization range (Q) + */ +void +drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, + struct drm_connector *connector, + const struct drm_display_mode *mode, + enum hdmi_quantization_range rgb_quant_range) +{ + const struct drm_display_info *info = &connector->display_info; + + /* + * CEA-861: + * "A Source shall not send a non-zero Q value that does not correspond + * to the default RGB Quantization Range for the transmitted Picture + * unless the Sink indicates support for the Q bit in a Video + * Capabilities Data Block." + * + * HDMI 2.0 recommends sending non-zero Q when it does match the + * default RGB quantization range for the mode, even when QS=0. + */ + if (info->rgb_quant_range_selectable || + rgb_quant_range == drm_default_rgb_quant_range(mode)) + frame->quantization_range = rgb_quant_range; + else + frame->quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT; + + /* + * CEA-861-F: + * "When transmitting any RGB colorimetry, the Source should set the + * YQ-field to match the RGB Quantization Range being transmitted + * (e.g., when Limited Range RGB, set YQ=0 or when Full Range RGB, + * set YQ=1) and the Sink shall ignore the YQ-field." + * + * Unfortunate certain sinks (eg. VIZ Model 67/E261VA) get confused + * by non-zero YQ when receiving RGB. There doesn't seem to be any + * good way to tell which version of CEA-861 the sink supports, so + * we limit non-zero YQ to HDMI 2.0 sinks only as HDMI 2.0 is based + * on on CEA-861-F. + */ + if (!is_hdmi2_sink(connector) || + rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED) + frame->ycc_quantization_range = + HDMI_YCC_QUANTIZATION_RANGE_LIMITED; + else + frame->ycc_quantization_range = + HDMI_YCC_QUANTIZATION_RANGE_FULL; +} +EXPORT_SYMBOL(drm_hdmi_avi_infoframe_quant_range); + +static enum hdmi_3d_structure +s3d_structure_from_display_mode(const struct drm_display_mode *mode) +{ + u32 layout = mode->flags & DRM_MODE_FLAG_3D_MASK; + + switch (layout) { + case DRM_MODE_FLAG_3D_FRAME_PACKING: + return HDMI_3D_STRUCTURE_FRAME_PACKING; + case DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE: + return HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE; + case DRM_MODE_FLAG_3D_LINE_ALTERNATIVE: + return HDMI_3D_STRUCTURE_LINE_ALTERNATIVE; + case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL: + return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL; + case DRM_MODE_FLAG_3D_L_DEPTH: + return HDMI_3D_STRUCTURE_L_DEPTH; + case DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH: + return HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH; + case DRM_MODE_FLAG_3D_TOP_AND_BOTTOM: + return HDMI_3D_STRUCTURE_TOP_AND_BOTTOM; + case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF: + return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF; + default: + return HDMI_3D_STRUCTURE_INVALID; + } +} + +/** + * drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with + * data from a DRM display mode + * @frame: HDMI vendor infoframe + * @connector: the connector + * @mode: DRM display mode + * + * Note that there's is a need to send HDMI vendor infoframes only when using a + * 4k or stereoscopic 3D mode. So when giving any other mode as input this + * function will return -EINVAL, error that can be safely ignored. + * + * Return: 0 on success or a negative error code on failure. + */ +int +drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, + struct drm_connector *connector, + const struct drm_display_mode *mode) +{ + /* + * FIXME: sil-sii8620 doesn't have a connector around when + * we need one, so we have to be prepared for a NULL connector. + */ + bool has_hdmi_infoframe = connector ? + connector->display_info.has_hdmi_infoframe : false; + int err; + u32 s3d_flags; + u8 vic; + + if (!frame || !mode) + return -EINVAL; + + if (!has_hdmi_infoframe) + return -EINVAL; + + vic = drm_match_hdmi_mode(mode); + s3d_flags = mode->flags & DRM_MODE_FLAG_3D_MASK; + + /* + * Even if it's not absolutely necessary to send the infoframe + * (ie.vic==0 and s3d_struct==0) we will still send it if we + * know that the sink can handle it. This is based on a + * suggestion in HDMI 2.0 Appendix F. Apparently some sinks + * have trouble realizing that they shuld switch from 3D to 2D + * mode if the source simply stops sending the infoframe when + * it wants to switch from 3D to 2D. + */ + + if (vic && s3d_flags) + return -EINVAL; + + err = hdmi_vendor_infoframe_init(frame); + if (err < 0) + return err; + + frame->vic = vic; + frame->s3d_struct = s3d_structure_from_display_mode(mode); + + return 0; +} +EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode); + +static int drm_parse_tiled_block(struct drm_connector *connector, + struct displayid_block *block) +{ + struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block; + u16 w, h; + u8 tile_v_loc, tile_h_loc; + u8 num_v_tile, num_h_tile; + struct drm_tile_group *tg; + + w = tile->tile_size[0] | tile->tile_size[1] << 8; + h = tile->tile_size[2] | tile->tile_size[3] << 8; + + num_v_tile = (tile->topo[0] & 0xf) | (tile->topo[2] & 0x30); + num_h_tile = (tile->topo[0] >> 4) | ((tile->topo[2] >> 2) & 0x30); + tile_v_loc = (tile->topo[1] & 0xf) | ((tile->topo[2] & 0x3) << 4); + tile_h_loc = (tile->topo[1] >> 4) | (((tile->topo[2] >> 2) & 0x3) << 4); + + connector->has_tile = true; + if (tile->tile_cap & 0x80) + connector->tile_is_single_monitor = true; + + connector->num_h_tile = num_h_tile + 1; + connector->num_v_tile = num_v_tile + 1; + connector->tile_h_loc = tile_h_loc; + connector->tile_v_loc = tile_v_loc; + connector->tile_h_size = w + 1; + connector->tile_v_size = h + 1; + + DRM_DEBUG_KMS("tile cap 0x%x\n", tile->tile_cap); + DRM_DEBUG_KMS("tile_size %d x %d\n", w + 1, h + 1); + DRM_DEBUG_KMS("topo num tiles %dx%d, location %dx%d\n", + num_h_tile + 1, num_v_tile + 1, tile_h_loc, tile_v_loc); + DRM_DEBUG_KMS("vend %c%c%c\n", tile->topology_id[0], tile->topology_id[1], tile->topology_id[2]); + + tg = drm_mode_get_tile_group(connector->dev, tile->topology_id); + if (!tg) { + tg = drm_mode_create_tile_group(connector->dev, tile->topology_id); + } + if (!tg) + return -ENOMEM; + + if (connector->tile_group != tg) { + /* if we haven't got a pointer, + take the reference, drop ref to old tile group */ + if (connector->tile_group) { + drm_mode_put_tile_group(connector->dev, connector->tile_group); + } + connector->tile_group = tg; + } else + /* if same tile group, then release the ref we just took. */ + drm_mode_put_tile_group(connector->dev, tg); + return 0; +} + +static int drm_parse_display_id(struct drm_connector *connector, + u8 *displayid, int length, + bool is_edid_extension) +{ + /* if this is an EDID extension the first byte will be 0x70 */ + int idx = 0; + struct displayid_block *block; + int ret; + + if (is_edid_extension) + idx = 1; + + ret = validate_displayid(displayid, length, idx); + if (ret) + return ret; + + idx += sizeof(struct displayid_hdr); + for_each_displayid_db(displayid, block, idx, length) { + DRM_DEBUG_KMS("block id 0x%x, rev %d, len %d\n", + block->tag, block->rev, block->num_bytes); + + switch (block->tag) { + case DATA_BLOCK_TILED_DISPLAY: + ret = drm_parse_tiled_block(connector, block); + if (ret) + return ret; + break; + case DATA_BLOCK_TYPE_1_DETAILED_TIMING: + /* handled in mode gathering code. */ + break; + case DATA_BLOCK_CTA: + /* handled in the cea parser code. */ + break; + default: + DRM_DEBUG_KMS("found DisplayID tag 0x%x, unhandled\n", block->tag); + break; + } + } + return 0; +} + +static void drm_get_displayid(struct drm_connector *connector, + struct edid *edid) +{ + void *displayid = NULL; + int ret; + connector->has_tile = false; + displayid = drm_find_displayid_extension(edid); + if (!displayid) { + /* drop reference to any tile group we had */ + goto out_drop_ref; + } + + ret = drm_parse_display_id(connector, displayid, EDID_LENGTH, true); + if (ret < 0) + goto out_drop_ref; + if (!connector->has_tile) + goto out_drop_ref; + return; +out_drop_ref: + if (connector->tile_group) { + drm_mode_put_tile_group(connector->dev, connector->tile_group); + connector->tile_group = NULL; + } + return; +} Index: sys/dev/drm/core/drm_encoder.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_encoder.c @@ -0,0 +1,248 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include + +#include +#include +#include + +#include "drm_crtc_internal.h" + +/** + * DOC: overview + * + * Encoders represent the connecting element between the CRTC (as the overall + * pixel pipeline, represented by &struct drm_crtc) and the connectors (as the + * generic sink entity, represented by &struct drm_connector). An encoder takes + * pixel data from a CRTC and converts it to a format suitable for any attached + * connector. Encoders are objects exposed to userspace, originally to allow + * userspace to infer cloning and connector/CRTC restrictions. Unfortunately + * almost all drivers get this wrong, making the uabi pretty much useless. On + * top of that the exposed restrictions are too simple for today's hardware, and + * the recommended way to infer restrictions is by using the + * DRM_MODE_ATOMIC_TEST_ONLY flag for the atomic IOCTL. + * + * Otherwise encoders aren't used in the uapi at all (any modeset request from + * userspace directly connects a connector with a CRTC), drivers are therefore + * free to use them however they wish. Modeset helper libraries make strong use + * of encoders to facilitate code sharing. But for more complex settings it is + * usually better to move shared code into a separate &drm_bridge. Compared to + * encoders, bridges also have the benefit of being purely an internal + * abstraction since they are not exposed to userspace at all. + * + * Encoders are initialized with drm_encoder_init() and cleaned up using + * drm_encoder_cleanup(). + */ +static const struct drm_prop_enum_list drm_encoder_enum_list[] = { + { DRM_MODE_ENCODER_NONE, "None" }, + { DRM_MODE_ENCODER_DAC, "DAC" }, + { DRM_MODE_ENCODER_TMDS, "TMDS" }, + { DRM_MODE_ENCODER_LVDS, "LVDS" }, + { DRM_MODE_ENCODER_TVDAC, "TV" }, + { DRM_MODE_ENCODER_VIRTUAL, "Virtual" }, + { DRM_MODE_ENCODER_DSI, "DSI" }, + { DRM_MODE_ENCODER_DPMST, "DP MST" }, + { DRM_MODE_ENCODER_DPI, "DPI" }, +}; + +int drm_encoder_register_all(struct drm_device *dev) +{ + struct drm_encoder *encoder; + int ret = 0; + + drm_for_each_encoder(encoder, dev) { + if (encoder->funcs->late_register) + ret = encoder->funcs->late_register(encoder); + if (ret) + return ret; + } + + return 0; +} + +void drm_encoder_unregister_all(struct drm_device *dev) +{ + struct drm_encoder *encoder; + + drm_for_each_encoder(encoder, dev) { + if (encoder->funcs->early_unregister) + encoder->funcs->early_unregister(encoder); + } +} + +/** + * drm_encoder_init - Init a preallocated encoder + * @dev: drm device + * @encoder: the encoder to init + * @funcs: callbacks for this encoder + * @encoder_type: user visible type of the encoder + * @name: printf style format string for the encoder name, or NULL for default name + * + * Initialises a preallocated encoder. Encoder should be subclassed as part of + * driver encoder objects. At driver unload time drm_encoder_cleanup() should be + * called from the driver's &drm_encoder_funcs.destroy hook. + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_encoder_init(struct drm_device *dev, + struct drm_encoder *encoder, + const struct drm_encoder_funcs *funcs, + int encoder_type, const char *name, ...) +{ + int ret; + + /* encoder index is used with 32bit bitmasks */ + if (WARN_ON(dev->mode_config.num_encoder >= 32)) + return -EINVAL; + + ret = drm_mode_object_add(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER); + if (ret) + return ret; + + encoder->dev = dev; + encoder->encoder_type = encoder_type; + encoder->funcs = funcs; + if (name) { + va_list ap; + + va_start(ap, name); + encoder->name = kvasprintf(GFP_KERNEL, name, ap); + va_end(ap); + } else { + encoder->name = kasprintf(GFP_KERNEL, "%s-%d", + drm_encoder_enum_list[encoder_type].name, + encoder->base.id); + } + if (!encoder->name) { + ret = -ENOMEM; + goto out_put; + } + + list_add_tail(&encoder->head, &dev->mode_config.encoder_list); + encoder->index = dev->mode_config.num_encoder++; + +out_put: + if (ret) + drm_mode_object_unregister(dev, &encoder->base); + + return ret; +} +EXPORT_SYMBOL(drm_encoder_init); + +/** + * drm_encoder_cleanup - cleans up an initialised encoder + * @encoder: encoder to cleanup + * + * Cleans up the encoder but doesn't free the object. + */ +void drm_encoder_cleanup(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + + /* Note that the encoder_list is considered to be static; should we + * remove the drm_encoder at runtime we would have to decrement all + * the indices on the drm_encoder after us in the encoder_list. + */ + + if (encoder->bridge) { + struct drm_bridge *bridge = encoder->bridge; + struct drm_bridge *next; + + while (bridge) { + next = bridge->next; + drm_bridge_detach(bridge); + bridge = next; + } + } + + drm_mode_object_unregister(dev, &encoder->base); + kfree(encoder->name); + list_del(&encoder->head); + dev->mode_config.num_encoder--; + + memset(encoder, 0, sizeof(*encoder)); +} +EXPORT_SYMBOL(drm_encoder_cleanup); + +static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder) +{ + struct drm_connector *connector; + struct drm_device *dev = encoder->dev; + bool uses_atomic = false; + struct drm_connector_list_iter conn_iter; + + /* For atomic drivers only state objects are synchronously updated and + * protected by modeset locks, so check those first. */ + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (!connector->state) + continue; + + uses_atomic = true; + + if (connector->state->best_encoder != encoder) + continue; + + drm_connector_list_iter_end(&conn_iter); + return connector->state->crtc; + } + drm_connector_list_iter_end(&conn_iter); + + /* Don't return stale data (e.g. pending async disable). */ + if (uses_atomic) + return NULL; + + return encoder->crtc; +} + +int drm_mode_getencoder(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_mode_get_encoder *enc_resp = data; + struct drm_encoder *encoder; + struct drm_crtc *crtc; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EOPNOTSUPP; + + encoder = drm_encoder_find(dev, file_priv, enc_resp->encoder_id); + if (!encoder) + return -ENOENT; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + crtc = drm_encoder_get_crtc(encoder); + if (crtc && drm_lease_held(file_priv, crtc->base.id)) + enc_resp->crtc_id = crtc->base.id; + else + enc_resp->crtc_id = 0; + drm_modeset_unlock(&dev->mode_config.connection_mutex); + + enc_resp->encoder_type = encoder->encoder_type; + enc_resp->encoder_id = encoder->base.id; + enc_resp->possible_crtcs = drm_lease_filter_crtcs(file_priv, + encoder->possible_crtcs); + enc_resp->possible_clones = encoder->possible_clones; + + return 0; +} Index: sys/dev/drm/core/drm_encoder_slave.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_encoder_slave.c @@ -0,0 +1,187 @@ +/* + * Copyright (C) 2009 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include + +#include + +/** + * drm_i2c_encoder_init - Initialize an I2C slave encoder + * @dev: DRM device. + * @encoder: Encoder to be attached to the I2C device. You aren't + * required to have called drm_encoder_init() before. + * @adap: I2C adapter that will be used to communicate with + * the device. + * @info: Information that will be used to create the I2C device. + * Required fields are @addr and @type. + * + * Create an I2C device on the specified bus (the module containing its + * driver is transparently loaded) and attach it to the specified + * &drm_encoder_slave. The @slave_funcs field will be initialized with + * the hooks provided by the slave driver. + * + * If @info.platform_data is non-NULL it will be used as the initial + * slave config. + * + * Returns 0 on success or a negative errno on failure, in particular, + * -ENODEV is returned when no matching driver is found. + */ +int drm_i2c_encoder_init(struct drm_device *dev, + struct drm_encoder_slave *encoder, + struct i2c_adapter *adap, + const struct i2c_board_info *info) +{ + struct module *module = NULL; + struct i2c_client *client; + struct drm_i2c_encoder_driver *encoder_drv; + int err = 0; + + request_module("%s%s", I2C_MODULE_PREFIX, info->type); + + client = i2c_new_device(adap, info); + if (!client) { + err = -ENOMEM; + goto fail; + } + + if (!client->dev.driver) { + err = -ENODEV; + goto fail_unregister; + } + + module = client->dev.driver->owner; + if (!try_module_get(module)) { + err = -ENODEV; + goto fail_unregister; + } + + encoder->bus_priv = client; + + encoder_drv = to_drm_i2c_encoder_driver(to_i2c_driver(client->dev.driver)); + + err = encoder_drv->encoder_init(client, dev, encoder); + if (err) + goto fail_unregister; + + if (info->platform_data) + encoder->slave_funcs->set_config(&encoder->base, + info->platform_data); + + return 0; + +fail_unregister: + i2c_unregister_device(client); + module_put(module); +fail: + return err; +} +EXPORT_SYMBOL(drm_i2c_encoder_init); + +/** + * drm_i2c_encoder_destroy - Unregister the I2C device backing an encoder + * @drm_encoder: Encoder to be unregistered. + * + * This should be called from the @destroy method of an I2C slave + * encoder driver once I2C access is no longer needed. + */ +void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder) +{ + struct drm_encoder_slave *encoder = to_encoder_slave(drm_encoder); + struct i2c_client *client = drm_i2c_encoder_get_client(drm_encoder); + struct module *module = client->dev.driver->owner; + + i2c_unregister_device(client); + encoder->bus_priv = NULL; + + module_put(module); +} +EXPORT_SYMBOL(drm_i2c_encoder_destroy); + +/* + * Wrapper fxns which can be plugged in to drm_encoder_helper_funcs: + */ + +static inline const struct drm_encoder_slave_funcs * +get_slave_funcs(struct drm_encoder *enc) +{ + return to_encoder_slave(enc)->slave_funcs; +} + +void drm_i2c_encoder_dpms(struct drm_encoder *encoder, int mode) +{ + get_slave_funcs(encoder)->dpms(encoder, mode); +} +EXPORT_SYMBOL(drm_i2c_encoder_dpms); + +bool drm_i2c_encoder_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + if (!get_slave_funcs(encoder)->mode_fixup) + return true; + + return get_slave_funcs(encoder)->mode_fixup(encoder, mode, adjusted_mode); +} +EXPORT_SYMBOL(drm_i2c_encoder_mode_fixup); + +void drm_i2c_encoder_prepare(struct drm_encoder *encoder) +{ + drm_i2c_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); +} +EXPORT_SYMBOL(drm_i2c_encoder_prepare); + +void drm_i2c_encoder_commit(struct drm_encoder *encoder) +{ + drm_i2c_encoder_dpms(encoder, DRM_MODE_DPMS_ON); +} +EXPORT_SYMBOL(drm_i2c_encoder_commit); + +void drm_i2c_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + get_slave_funcs(encoder)->mode_set(encoder, mode, adjusted_mode); +} +EXPORT_SYMBOL(drm_i2c_encoder_mode_set); + +enum drm_connector_status drm_i2c_encoder_detect(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + return get_slave_funcs(encoder)->detect(encoder, connector); +} +EXPORT_SYMBOL(drm_i2c_encoder_detect); + +void drm_i2c_encoder_save(struct drm_encoder *encoder) +{ + get_slave_funcs(encoder)->save(encoder); +} +EXPORT_SYMBOL(drm_i2c_encoder_save); + +void drm_i2c_encoder_restore(struct drm_encoder *encoder) +{ + get_slave_funcs(encoder)->restore(encoder); +} +EXPORT_SYMBOL(drm_i2c_encoder_restore); Index: sys/dev/drm/core/drm_fb_helper.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_fb_helper.c @@ -0,0 +1,3626 @@ +/* + * Copyright (c) 2006-2009 Red Hat Inc. + * Copyright (c) 2006-2008 Intel Corporation + * Copyright (c) 2007 Dave Airlie + * + * DRM framebuffer helper functions + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + * + * Authors: + * Dave Airlie + * Jesse Barnes + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "drm_crtc_helper_internal.h" +#include "drm_crtc_internal.h" +#include "drm_internal.h" + +static bool drm_fbdev_emulation = true; +module_param_named(fbdev_emulation, drm_fbdev_emulation, bool, 0600); +MODULE_PARM_DESC(fbdev_emulation, + "Enable legacy fbdev emulation [default=true]"); + +static int drm_fbdev_overalloc = CONFIG_DRM_FBDEV_OVERALLOC; +module_param(drm_fbdev_overalloc, int, 0444); +MODULE_PARM_DESC(drm_fbdev_overalloc, + "Overallocation of the fbdev buffer (%) [default=" + __MODULE_STRING(CONFIG_DRM_FBDEV_OVERALLOC) "]"); + +/* + * In order to keep user-space compatibility, we want in certain use-cases + * to keep leaking the fbdev physical address to the user-space program + * handling the fbdev buffer. + * This is a bad habit essentially kept into closed source opengl driver + * that should really be moved into open-source upstream projects instead + * of using legacy physical addresses in user space to communicate with + * other out-of-tree kernel modules. + * + * This module_param *should* be removed as soon as possible and be + * considered as a broken and legacy behaviour from a modern fbdev device. + */ +#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) +static bool drm_leak_fbdev_smem = false; +module_param_unsafe(drm_leak_fbdev_smem, bool, 0600); +MODULE_PARM_DESC(drm_leak_fbdev_smem, + "Allow unsafe leaking fbdev physical smem address [default=false]"); +#endif + +static LIST_HEAD(kernel_fb_helper_list); +static DEFINE_MUTEX(kernel_fb_helper_lock); + +/** + * DOC: fbdev helpers + * + * The fb helper functions are useful to provide an fbdev on top of a drm kernel + * mode setting driver. They can be used mostly independently from the crtc + * helper functions used by many drivers to implement the kernel mode setting + * interfaces. + * + * Drivers that support a dumb buffer with a virtual address and mmap support, + * should try out the generic fbdev emulation using drm_fbdev_generic_setup(). + * + * Setup fbdev emulation by calling drm_fb_helper_fbdev_setup() and tear it + * down by calling drm_fb_helper_fbdev_teardown(). + * + * Drivers that need to handle connector hotplugging (e.g. dp mst) can't use + * the setup helper and will need to do the whole four-step setup process with + * drm_fb_helper_prepare(), drm_fb_helper_init(), + * drm_fb_helper_single_add_all_connectors(), enable hotplugging and + * drm_fb_helper_initial_config() to avoid a possible race window. + * + * At runtime drivers should restore the fbdev console by using + * drm_fb_helper_lastclose() as their &drm_driver.lastclose callback. + * They should also notify the fb helper code from updates to the output + * configuration by using drm_fb_helper_output_poll_changed() as their + * &drm_mode_config_funcs.output_poll_changed callback. + * + * For suspend/resume consider using drm_mode_config_helper_suspend() and + * drm_mode_config_helper_resume() which takes care of fbdev as well. + * + * All other functions exported by the fb helper library can be used to + * implement the fbdev driver interface by the driver. + * + * It is possible, though perhaps somewhat tricky, to implement race-free + * hotplug detection using the fbdev helpers. The drm_fb_helper_prepare() + * helper must be called first to initialize the minimum required to make + * hotplug detection work. Drivers also need to make sure to properly set up + * the &drm_mode_config.funcs member. After calling drm_kms_helper_poll_init() + * it is safe to enable interrupts and start processing hotplug events. At the + * same time, drivers should initialize all modeset objects such as CRTCs, + * encoders and connectors. To finish up the fbdev helper initialization, the + * drm_fb_helper_init() function is called. To probe for all attached displays + * and set up an initial configuration using the detected hardware, drivers + * should call drm_fb_helper_single_add_all_connectors() followed by + * drm_fb_helper_initial_config(). + * + * If &drm_framebuffer_funcs.dirty is set, the + * drm_fb_helper_{cfb,sys}_{write,fillrect,copyarea,imageblit} functions will + * accumulate changes and schedule &drm_fb_helper.dirty_work to run right + * away. This worker then calls the dirty() function ensuring that it will + * always run in process context since the fb_*() function could be running in + * atomic context. If drm_fb_helper_deferred_io() is used as the deferred_io + * callback it will also schedule dirty_work with the damage collected from the + * mmap page writes. Drivers can use drm_fb_helper_defio_init() to setup + * deferred I/O (coupled with drm_fb_helper_fbdev_teardown()). + */ + +#define drm_fb_helper_for_each_connector(fbh, i__) \ + for (({ lockdep_assert_held(&(fbh)->lock); }), \ + i__ = 0; i__ < (fbh)->connector_count; i__++) + +static int __drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, + struct drm_connector *connector) +{ + struct drm_fb_helper_connector *fb_conn; + struct drm_fb_helper_connector **temp; + unsigned int count; + + if (!drm_fbdev_emulation) + return 0; + + lockdep_assert_held(&fb_helper->lock); + + count = fb_helper->connector_count + 1; + + if (count > fb_helper->connector_info_alloc_count) { + size_t size = count * sizeof(fb_conn); + + temp = krealloc(fb_helper->connector_info, size, GFP_KERNEL); + if (!temp) + return -ENOMEM; + + fb_helper->connector_info_alloc_count = count; + fb_helper->connector_info = temp; + } + + fb_conn = kzalloc(sizeof(*fb_conn), GFP_KERNEL); + if (!fb_conn) + return -ENOMEM; + + drm_connector_get(connector); + fb_conn->connector = connector; + fb_helper->connector_info[fb_helper->connector_count++] = fb_conn; + + return 0; +} + +int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, + struct drm_connector *connector) +{ + int err; + + if (!fb_helper) + return 0; + + mutex_lock(&fb_helper->lock); + err = __drm_fb_helper_add_one_connector(fb_helper, connector); + mutex_unlock(&fb_helper->lock); + + return err; +} +EXPORT_SYMBOL(drm_fb_helper_add_one_connector); + +/** + * drm_fb_helper_single_add_all_connectors() - add all connectors to fbdev + * emulation helper + * @fb_helper: fbdev initialized with drm_fb_helper_init, can be NULL + * + * This functions adds all the available connectors for use with the given + * fb_helper. This is a separate step to allow drivers to freely assign + * connectors to the fbdev, e.g. if some are reserved for special purposes or + * not adequate to be used for the fbcon. + * + * This function is protected against concurrent connector hotadds/removals + * using drm_fb_helper_add_one_connector() and + * drm_fb_helper_remove_one_connector(). + */ +int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) +{ + struct drm_device *dev; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + int i, ret = 0; + + if (!drm_fbdev_emulation || !fb_helper) + return 0; + + dev = fb_helper->dev; + + mutex_lock(&fb_helper->lock); + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + + ret = __drm_fb_helper_add_one_connector(fb_helper, connector); + if (ret) + goto fail; + } + goto out; + +fail: + drm_fb_helper_for_each_connector(fb_helper, i) { + struct drm_fb_helper_connector *fb_helper_connector = + fb_helper->connector_info[i]; + + drm_connector_put(fb_helper_connector->connector); + + kfree(fb_helper_connector); + fb_helper->connector_info[i] = NULL; + } + fb_helper->connector_count = 0; +out: + drm_connector_list_iter_end(&conn_iter); + mutex_unlock(&fb_helper->lock); + + return ret; +} +EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors); + +static int __drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, + struct drm_connector *connector) +{ + struct drm_fb_helper_connector *fb_helper_connector; + int i, j; + + if (!drm_fbdev_emulation) + return 0; + + lockdep_assert_held(&fb_helper->lock); + + drm_fb_helper_for_each_connector(fb_helper, i) { + if (fb_helper->connector_info[i]->connector == connector) + break; + } + + if (i == fb_helper->connector_count) + return -EINVAL; + fb_helper_connector = fb_helper->connector_info[i]; + drm_connector_put(fb_helper_connector->connector); + + for (j = i + 1; j < fb_helper->connector_count; j++) + fb_helper->connector_info[j - 1] = fb_helper->connector_info[j]; + + fb_helper->connector_count--; + kfree(fb_helper_connector); + + return 0; +} + +int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, + struct drm_connector *connector) +{ + int err; + + if (!fb_helper) + return 0; + + mutex_lock(&fb_helper->lock); + err = __drm_fb_helper_remove_one_connector(fb_helper, connector); + mutex_unlock(&fb_helper->lock); + + return err; +} +EXPORT_SYMBOL(drm_fb_helper_remove_one_connector); + +#ifdef __linux__ +static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc) +{ + uint16_t *r_base, *g_base, *b_base; + + if (crtc->funcs->gamma_set == NULL) + return; + + r_base = crtc->gamma_store; + g_base = r_base + crtc->gamma_size; + b_base = g_base + crtc->gamma_size; + + crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, + crtc->gamma_size, NULL); +} + +/** + * drm_fb_helper_debug_enter - implementation for &fb_ops.fb_debug_enter + * @info: fbdev registered by the helper + */ +int drm_fb_helper_debug_enter(struct fb_info *info) +{ + struct drm_fb_helper *helper = info->par; + const struct drm_crtc_helper_funcs *funcs; + int i; + + list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { + for (i = 0; i < helper->crtc_count; i++) { + struct drm_mode_set *mode_set = + &helper->crtc_info[i].mode_set; + + if (!mode_set->crtc->enabled) + continue; + + funcs = mode_set->crtc->helper_private; + if (funcs->mode_set_base_atomic == NULL) + continue; + + if (drm_drv_uses_atomic_modeset(mode_set->crtc->dev)) + continue; + + funcs->mode_set_base_atomic(mode_set->crtc, + mode_set->fb, + mode_set->x, + mode_set->y, + ENTER_ATOMIC_MODE_SET); + } + } + + return 0; +} +EXPORT_SYMBOL(drm_fb_helper_debug_enter); + +/** + * drm_fb_helper_debug_leave - implementation for &fb_ops.fb_debug_leave + * @info: fbdev registered by the helper + */ +int drm_fb_helper_debug_leave(struct fb_info *info) +{ + struct drm_fb_helper *helper = info->par; + struct drm_crtc *crtc; + const struct drm_crtc_helper_funcs *funcs; + struct drm_framebuffer *fb; + int i; + + for (i = 0; i < helper->crtc_count; i++) { + struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set; + + crtc = mode_set->crtc; + if (drm_drv_uses_atomic_modeset(crtc->dev)) + continue; + + funcs = crtc->helper_private; + fb = crtc->primary->fb; + + if (!crtc->enabled) + continue; + + if (!fb) { + DRM_ERROR("no fb to restore??\n"); + continue; + } + + if (funcs->mode_set_base_atomic == NULL) + continue; + + drm_fb_helper_restore_lut_atomic(mode_set->crtc); + funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x, + crtc->y, LEAVE_ATOMIC_MODE_SET); + } + + return 0; +} +EXPORT_SYMBOL(drm_fb_helper_debug_leave); +#endif /* __linux__ */ + +/* Check if the plane can hw rotate to match panel orientation */ +static bool drm_fb_helper_panel_rotation(struct drm_mode_set *modeset, + unsigned int *rotation) +{ + struct drm_connector *connector = modeset->connectors[0]; + struct drm_plane *plane = modeset->crtc->primary; + u64 valid_mask = 0; + unsigned int i; + + if (!modeset->num_connectors) + return false; + + switch (connector->display_info.panel_orientation) { + case DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP: + *rotation = DRM_MODE_ROTATE_180; + break; + case DRM_MODE_PANEL_ORIENTATION_LEFT_UP: + *rotation = DRM_MODE_ROTATE_90; + break; + case DRM_MODE_PANEL_ORIENTATION_RIGHT_UP: + *rotation = DRM_MODE_ROTATE_270; + break; + default: + *rotation = DRM_MODE_ROTATE_0; + } + + /* + * TODO: support 90 / 270 degree hardware rotation, + * depending on the hardware this may require the framebuffer + * to be in a specific tiling format. + */ + if (*rotation != DRM_MODE_ROTATE_180 || !plane->rotation_property) + return false; + + for (i = 0; i < plane->rotation_property->num_values; i++) + valid_mask |= (1ULL << plane->rotation_property->values[i]); + + if (!(*rotation & valid_mask)) + return false; + + return true; +} + +static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper, bool active) +{ + struct drm_device *dev = fb_helper->dev; + struct drm_plane_state *plane_state; + struct drm_plane *plane; + struct drm_atomic_state *state; + int i, ret; + struct drm_modeset_acquire_ctx ctx; + + drm_modeset_acquire_init(&ctx, 0); + + state = drm_atomic_state_alloc(dev); + if (!state) { + ret = -ENOMEM; + goto out_ctx; + } + + state->acquire_ctx = &ctx; +retry: + drm_for_each_plane(plane, dev) { + plane_state = drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) { + ret = PTR_ERR(plane_state); + goto out_state; + } + + plane_state->rotation = DRM_MODE_ROTATE_0; + + /* disable non-primary: */ + if (plane->type == DRM_PLANE_TYPE_PRIMARY) + continue; + + ret = __drm_atomic_helper_disable_plane(plane, plane_state); + if (ret != 0) + goto out_state; + } + + for (i = 0; i < fb_helper->crtc_count; i++) { + struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set; + struct drm_plane *primary = mode_set->crtc->primary; + unsigned int rotation; + + if (drm_fb_helper_panel_rotation(mode_set, &rotation)) { + /* Cannot fail as we've already gotten the plane state above */ + plane_state = drm_atomic_get_new_plane_state(state, primary); + plane_state->rotation = rotation; + } + + ret = __drm_atomic_helper_set_config(mode_set, state); + if (ret != 0) + goto out_state; + + /* + * __drm_atomic_helper_set_config() sets active when a + * mode is set, unconditionally clear it if we force DPMS off + */ + if (!active) { + struct drm_crtc *crtc = mode_set->crtc; + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + + crtc_state->active = false; + } + } + + ret = drm_atomic_commit(state); + +out_state: + if (ret == -EDEADLK) + goto backoff; + + drm_atomic_state_put(state); +out_ctx: + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + return ret; + +backoff: + drm_atomic_state_clear(state); + drm_modeset_backoff(&ctx); + + goto retry; +} + +static int restore_fbdev_mode_legacy(struct drm_fb_helper *fb_helper) +{ + struct drm_device *dev = fb_helper->dev; + struct drm_plane *plane; + int i, ret = 0; + + drm_modeset_lock_all(fb_helper->dev); + drm_for_each_plane(plane, dev) { + if (plane->type != DRM_PLANE_TYPE_PRIMARY) + drm_plane_force_disable(plane); + + if (plane->rotation_property) + drm_mode_plane_set_obj_prop(plane, + plane->rotation_property, + DRM_MODE_ROTATE_0); + } + + for (i = 0; i < fb_helper->crtc_count; i++) { + struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set; + struct drm_crtc *crtc = mode_set->crtc; + + if (crtc->funcs->cursor_set2) { + ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0); + if (ret) + goto out; + } else if (crtc->funcs->cursor_set) { + ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0); + if (ret) + goto out; + } + + ret = drm_mode_set_config_internal(mode_set); + if (ret) + goto out; + } +out: + drm_modeset_unlock_all(fb_helper->dev); + + return ret; +} + +static int restore_fbdev_mode_force(struct drm_fb_helper *fb_helper) +{ + struct drm_device *dev = fb_helper->dev; + + if (drm_drv_uses_atomic_modeset(dev)) + return restore_fbdev_mode_atomic(fb_helper, true); + else + return restore_fbdev_mode_legacy(fb_helper); +} + +static int restore_fbdev_mode(struct drm_fb_helper *fb_helper) +{ + struct drm_device *dev = fb_helper->dev; + int ret; + + if (!drm_master_internal_acquire(dev)) + return -EBUSY; + + ret = restore_fbdev_mode_force(fb_helper); + + drm_master_internal_release(dev); + + return ret; +} + +/** + * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration + * @fb_helper: driver-allocated fbdev helper, can be NULL + * + * This should be called from driver's drm &drm_driver.lastclose callback + * when implementing an fbcon on top of kms using this helper. This ensures that + * the user isn't greeted with a black screen when e.g. X dies. + * + * RETURNS: + * Zero if everything went ok, negative error code otherwise. + */ +int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) +{ + bool do_delayed; + int ret; + + if (!drm_fbdev_emulation || !fb_helper) + return -ENODEV; + + if (READ_ONCE(fb_helper->deferred_setup)) + return 0; + + mutex_lock(&fb_helper->lock); + /* + * TODO: + * We should bail out here if there is a master by dropping _force. + * Currently these igt tests fail if we do that: + * - kms_fbcon_fbt@psr + * - kms_fbcon_fbt@psr-suspend + * + * So first these tests need to be fixed so they drop master or don't + * have an fd open. + */ + ret = restore_fbdev_mode_force(fb_helper); + + do_delayed = fb_helper->delayed_hotplug; + if (do_delayed) + fb_helper->delayed_hotplug = false; + mutex_unlock(&fb_helper->lock); + + if (do_delayed) + drm_fb_helper_hotplug_event(fb_helper); + + return ret; +} +EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked); + +#ifdef CONFIG_MAGIC_SYSRQ +/* + * restore fbcon display for all kms driver's using this helper, used for sysrq + * and panic handling. + */ +static bool drm_fb_helper_force_kernel_mode(void) +{ + bool ret, error = false; + struct drm_fb_helper *helper; + + if (list_empty(&kernel_fb_helper_list)) + return false; + + list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { + struct drm_device *dev = helper->dev; + + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) + continue; + + mutex_lock(&helper->lock); + ret = restore_fbdev_mode_force(helper); + if (ret) + error = true; + mutex_unlock(&helper->lock); + } + return error; +} + +static void drm_fb_helper_restore_work_fn(struct work_struct *ignored) +{ + bool ret; + + ret = drm_fb_helper_force_kernel_mode(); + if (ret == true) + DRM_ERROR("Failed to restore crtc configuration\n"); +} +static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn); + +static void drm_fb_helper_sysrq(int dummy1) +{ + schedule_work(&drm_fb_helper_restore_work); +} + +static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { + .handler = drm_fb_helper_sysrq, + .help_msg = "force-fb(V)", + .action_msg = "Restore framebuffer console", +}; +#endif + +static void dpms_legacy(struct drm_fb_helper *fb_helper, int dpms_mode) +{ + struct drm_device *dev = fb_helper->dev; + struct drm_connector *connector; + struct drm_mode_set *modeset; + int i, j; + + drm_modeset_lock_all(dev); + for (i = 0; i < fb_helper->crtc_count; i++) { + modeset = &fb_helper->crtc_info[i].mode_set; + + if (!modeset->crtc->enabled) + continue; + + for (j = 0; j < modeset->num_connectors; j++) { + connector = modeset->connectors[j]; + connector->funcs->dpms(connector, dpms_mode); + drm_object_property_set_value(&connector->base, + dev->mode_config.dpms_property, dpms_mode); + } + } + drm_modeset_unlock_all(dev); +} + +static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) +{ +#ifdef __linux__ + struct drm_fb_helper *fb_helper = info->par; +#else + struct vt_kms_softc *sc = info->fb_priv; + struct drm_fb_helper *fb_helper = sc->fb_helper; +#endif + struct drm_device *dev = fb_helper->dev; + + /* + * For each CRTC in this fb, turn the connectors on/off. + */ + mutex_lock(&fb_helper->lock); + if (!drm_master_internal_acquire(dev)) + goto unlock; + + if (drm_drv_uses_atomic_modeset(dev)) + restore_fbdev_mode_atomic(fb_helper, dpms_mode == DRM_MODE_DPMS_ON); + else + dpms_legacy(fb_helper, dpms_mode); + +unlock: + mutex_unlock(&fb_helper->lock); +} + +/** + * drm_fb_helper_blank - implementation for &fb_ops.fb_blank + * @blank: desired blanking state + * @info: fbdev registered by the helper + */ +int drm_fb_helper_blank(int blank, struct fb_info *info) +{ + if (oops_in_progress) + return -EBUSY; + + switch (blank) { + /* Display: On; HSync: On, VSync: On */ + case FB_BLANK_UNBLANK: + drm_fb_helper_dpms(info, DRM_MODE_DPMS_ON); + break; + /* Display: Off; HSync: On, VSync: On */ + case FB_BLANK_NORMAL: + drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY); + break; + /* Display: Off; HSync: Off, VSync: On */ + case FB_BLANK_HSYNC_SUSPEND: + drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY); + break; + /* Display: Off; HSync: On, VSync: Off */ + case FB_BLANK_VSYNC_SUSPEND: + drm_fb_helper_dpms(info, DRM_MODE_DPMS_SUSPEND); + break; + /* Display: Off; HSync: Off, VSync: Off */ + case FB_BLANK_POWERDOWN: + drm_fb_helper_dpms(info, DRM_MODE_DPMS_OFF); + break; + } + return 0; +} +EXPORT_SYMBOL(drm_fb_helper_blank); + +static void drm_fb_helper_modeset_release(struct drm_fb_helper *helper, + struct drm_mode_set *modeset) +{ + int i; + + for (i = 0; i < modeset->num_connectors; i++) { + drm_connector_put(modeset->connectors[i]); + modeset->connectors[i] = NULL; + } + modeset->num_connectors = 0; + + drm_mode_destroy(helper->dev, modeset->mode); + modeset->mode = NULL; + + /* FIXME should hold a ref? */ + modeset->fb = NULL; +} + +static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper) +{ + int i; + + for (i = 0; i < helper->connector_count; i++) { + drm_connector_put(helper->connector_info[i]->connector); + kfree(helper->connector_info[i]); + } + kfree(helper->connector_info); + + for (i = 0; i < helper->crtc_count; i++) { + struct drm_mode_set *modeset = &helper->crtc_info[i].mode_set; + + drm_fb_helper_modeset_release(helper, modeset); + kfree(modeset->connectors); + } + kfree(helper->crtc_info); +} + +#ifdef __linux__ +static void drm_fb_helper_resume_worker(struct work_struct *work) +{ + struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, + resume_work); + + console_lock(); + fb_set_suspend(helper->fbdev, 0); + console_unlock(); +} + +static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper, + struct drm_clip_rect *clip) +{ + struct drm_framebuffer *fb = fb_helper->fb; + unsigned int cpp = fb->format->cpp[0]; + size_t offset = clip->y1 * fb->pitches[0] + clip->x1 * cpp; + void *src = fb_helper->fbdev->screen_buffer + offset; + void *dst = fb_helper->buffer->vaddr + offset; + size_t len = (clip->x2 - clip->x1) * cpp; + unsigned int y; + + for (y = clip->y1; y < clip->y2; y++) { + memcpy(dst, src, len); + src += fb->pitches[0]; + dst += fb->pitches[0]; + } +} + +static void drm_fb_helper_dirty_work(struct work_struct *work) +{ + struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, + dirty_work); + struct drm_clip_rect *clip = &helper->dirty_clip; + struct drm_clip_rect clip_copy; + unsigned long flags; + + spin_lock_irqsave(&helper->dirty_lock, flags); + clip_copy = *clip; + clip->x1 = clip->y1 = ~0; + clip->x2 = clip->y2 = 0; + spin_unlock_irqrestore(&helper->dirty_lock, flags); + + /* call dirty callback only when it has been really touched */ + if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) { + /* Generic fbdev uses a shadow buffer */ + if (helper->buffer) + drm_fb_helper_dirty_blit_real(helper, &clip_copy); + helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1); + } +} +#endif /* __linux__ */ + +/** + * drm_fb_helper_prepare - setup a drm_fb_helper structure + * @dev: DRM device + * @helper: driver-allocated fbdev helper structure to set up + * @funcs: pointer to structure of functions associate with this helper + * + * Sets up the bare minimum to make the framebuffer helper usable. This is + * useful to implement race-free initialization of the polling helpers. + */ +void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, + const struct drm_fb_helper_funcs *funcs) +{ + INIT_LIST_HEAD(&helper->kernel_fb_list); +#ifdef __linux__ + spin_lock_init(&helper->dirty_lock); + INIT_WORK(&helper->resume_work, drm_fb_helper_resume_worker); + INIT_WORK(&helper->dirty_work, drm_fb_helper_dirty_work); + helper->dirty_clip.x1 = helper->dirty_clip.y1 = ~0; +#endif /* __linux__ */ + mutex_init(&helper->lock); + helper->funcs = funcs; + helper->dev = dev; +} +EXPORT_SYMBOL(drm_fb_helper_prepare); + +/** + * drm_fb_helper_init - initialize a &struct drm_fb_helper + * @dev: drm device + * @fb_helper: driver-allocated fbdev helper structure to initialize + * @max_conn_count: max connector count + * + * This allocates the structures for the fbdev helper with the given limits. + * Note that this won't yet touch the hardware (through the driver interfaces) + * nor register the fbdev. This is only done in drm_fb_helper_initial_config() + * to allow driver writes more control over the exact init sequence. + * + * Drivers must call drm_fb_helper_prepare() before calling this function. + * + * RETURNS: + * Zero if everything went ok, nonzero otherwise. + */ +int drm_fb_helper_init(struct drm_device *dev, + struct drm_fb_helper *fb_helper, + int max_conn_count) +{ + struct drm_crtc *crtc; + struct drm_mode_config *config = &dev->mode_config; + int i; + + if (!drm_fbdev_emulation) { + dev->fb_helper = fb_helper; + return 0; + } + + if (!max_conn_count) + return -EINVAL; + + fb_helper->crtc_info = kcalloc(config->num_crtc, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL); + if (!fb_helper->crtc_info) + return -ENOMEM; + + fb_helper->crtc_count = config->num_crtc; + fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL); + if (!fb_helper->connector_info) { + kfree(fb_helper->crtc_info); + return -ENOMEM; + } + fb_helper->connector_info_alloc_count = dev->mode_config.num_connector; + fb_helper->connector_count = 0; + + for (i = 0; i < fb_helper->crtc_count; i++) { + fb_helper->crtc_info[i].mode_set.connectors = + kcalloc(max_conn_count, + sizeof(struct drm_connector *), + GFP_KERNEL); + + if (!fb_helper->crtc_info[i].mode_set.connectors) + goto out_free; + fb_helper->crtc_info[i].mode_set.num_connectors = 0; + } + + i = 0; + drm_for_each_crtc(crtc, dev) { + fb_helper->crtc_info[i].mode_set.crtc = crtc; + i++; + } + + dev->fb_helper = fb_helper; + + return 0; +out_free: + drm_fb_helper_crtc_free(fb_helper); + return -ENOMEM; +} +EXPORT_SYMBOL(drm_fb_helper_init); + +/** + * drm_fb_helper_alloc_fbi - allocate fb_info and some of its members + * @fb_helper: driver-allocated fbdev helper + * + * A helper to alloc fb_info and the members cmap and apertures. Called + * by the driver within the fb_probe fb_helper callback function. Drivers do not + * need to release the allocated fb_info structure themselves, this is + * automatically done when calling drm_fb_helper_fini(). + * + * RETURNS: + * fb_info pointer if things went okay, pointer containing error code + * otherwise + */ +struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper) +{ + struct device *dev = fb_helper->dev->dev; + struct fb_info *info; +#ifdef __linux__ + int ret; +#endif + + info = framebuffer_alloc(0, dev); + if (!info) + return ERR_PTR(-ENOMEM); + +#ifdef __linux__ + ret = fb_alloc_cmap(&info->cmap, 256, 0); + if (ret) + goto err_release; + + info->apertures = alloc_apertures(1); + if (!info->apertures) { + ret = -ENOMEM; + goto err_free_cmap; + } + + fb_helper->fbdev = info; + info->skip_vt_switch = true; +#elif defined(__FreeBSD__) + fb_helper->fbdev = info; +#endif + + return info; + +#ifdef __linux +err_free_cmap: + fb_dealloc_cmap(&info->cmap); +err_release: + framebuffer_release(info); + return ERR_PTR(ret); +#endif +} +EXPORT_SYMBOL(drm_fb_helper_alloc_fbi); + +/** + * drm_fb_helper_unregister_fbi - unregister fb_info framebuffer device + * @fb_helper: driver-allocated fbdev helper, can be NULL + * + * A wrapper around unregister_framebuffer, to release the fb_info + * framebuffer device. This must be called before releasing all resources for + * @fb_helper by calling drm_fb_helper_fini(). + */ +void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper) +{ + if (fb_helper && fb_helper->fbdev) + unregister_framebuffer(fb_helper->fbdev); +} +EXPORT_SYMBOL(drm_fb_helper_unregister_fbi); + +/** + * drm_fb_helper_fini - finialize a &struct drm_fb_helper + * @fb_helper: driver-allocated fbdev helper, can be NULL + * + * This cleans up all remaining resources associated with @fb_helper. Must be + * called after drm_fb_helper_unlink_fbi() was called. + */ +void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) +{ + struct fb_info *info; + + if (!fb_helper) + return; + + fb_helper->dev->fb_helper = NULL; + + if (!drm_fbdev_emulation) + return; + +#ifdef __linux__ + cancel_work_sync(&fb_helper->resume_work); + cancel_work_sync(&fb_helper->dirty_work); +#endif + + info = fb_helper->fbdev; + if (info) { +#ifdef __linux__ + if (info->cmap.len) + fb_dealloc_cmap(&info->cmap); +#endif + framebuffer_release(info); + } + fb_helper->fbdev = NULL; + + mutex_lock(&kernel_fb_helper_lock); + if (!list_empty(&fb_helper->kernel_fb_list)) { + list_del(&fb_helper->kernel_fb_list); +#ifdef __linux__ + if (list_empty(&kernel_fb_helper_list)) + unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); +#endif + } + mutex_unlock(&kernel_fb_helper_lock); + + mutex_destroy(&fb_helper->lock); + drm_fb_helper_crtc_free(fb_helper); + +} +EXPORT_SYMBOL(drm_fb_helper_fini); + +#ifdef __linux__ +/** + * drm_fb_helper_unlink_fbi - wrapper around unlink_framebuffer + * @fb_helper: driver-allocated fbdev helper, can be NULL + * + * A wrapper around unlink_framebuffer implemented by fbdev core + */ +void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper) +{ + if (fb_helper && fb_helper->fbdev) + unlink_framebuffer(fb_helper->fbdev); +} +EXPORT_SYMBOL(drm_fb_helper_unlink_fbi); + +static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y, + u32 width, u32 height) +{ + struct drm_fb_helper *helper = info->par; + struct drm_clip_rect *clip = &helper->dirty_clip; + unsigned long flags; + + if (!helper->fb->funcs->dirty) + return; + + spin_lock_irqsave(&helper->dirty_lock, flags); + clip->x1 = min_t(u32, clip->x1, x); + clip->y1 = min_t(u32, clip->y1, y); + clip->x2 = max_t(u32, clip->x2, x + width); + clip->y2 = max_t(u32, clip->y2, y + height); + spin_unlock_irqrestore(&helper->dirty_lock, flags); + + schedule_work(&helper->dirty_work); +} + +/** + * drm_fb_helper_deferred_io() - fbdev deferred_io callback function + * @info: fb_info struct pointer + * @pagelist: list of dirty mmap framebuffer pages + * + * This function is used as the &fb_deferred_io.deferred_io + * callback function for flushing the fbdev mmap writes. + */ +void drm_fb_helper_deferred_io(struct fb_info *info, + struct list_head *pagelist) +{ + unsigned long start, end, min, max; + struct page *page; + u32 y1, y2; + + min = ULONG_MAX; + max = 0; + list_for_each_entry(page, pagelist, lru) { + start = page->index << PAGE_SHIFT; + end = start + PAGE_SIZE - 1; + min = min(min, start); + max = max(max, end); + } + + if (min < max) { + y1 = min / info->fix.line_length; + y2 = min_t(u32, DIV_ROUND_UP(max, info->fix.line_length), + info->var.yres); + drm_fb_helper_dirty(info, 0, y1, info->var.xres, y2 - y1); + } +} +EXPORT_SYMBOL(drm_fb_helper_deferred_io); + +/** + * drm_fb_helper_defio_init - fbdev deferred I/O initialization + * @fb_helper: driver-allocated fbdev helper + * + * This function allocates &fb_deferred_io, sets callback to + * drm_fb_helper_deferred_io(), delay to 50ms and calls fb_deferred_io_init(). + * It should be called from the &drm_fb_helper_funcs->fb_probe callback. + * drm_fb_helper_fbdev_teardown() cleans up deferred I/O. + * + * NOTE: A copy of &fb_ops is made and assigned to &info->fbops. This is done + * because fb_deferred_io_cleanup() clears &fbops->fb_mmap and would thereby + * affect other instances of that &fb_ops. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper) +{ + struct fb_info *info = fb_helper->fbdev; + struct fb_deferred_io *fbdefio; + struct fb_ops *fbops; + + fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL); + fbops = kzalloc(sizeof(*fbops), GFP_KERNEL); + if (!fbdefio || !fbops) { + kfree(fbdefio); + kfree(fbops); + return -ENOMEM; + } + + info->fbdefio = fbdefio; + fbdefio->delay = msecs_to_jiffies(50); + fbdefio->deferred_io = drm_fb_helper_deferred_io; + + *fbops = *info->fbops; + info->fbops = fbops; + + fb_deferred_io_init(info); + + return 0; +} +EXPORT_SYMBOL(drm_fb_helper_defio_init); + +/** + * drm_fb_helper_sys_read - wrapper around fb_sys_read + * @info: fb_info struct pointer + * @buf: userspace buffer to read from framebuffer memory + * @count: number of bytes to read from framebuffer memory + * @ppos: read offset within framebuffer memory + * + * A wrapper around fb_sys_read implemented by fbdev core + */ +ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, + size_t count, loff_t *ppos) +{ + return fb_sys_read(info, buf, count, ppos); +} +EXPORT_SYMBOL(drm_fb_helper_sys_read); + +/** + * drm_fb_helper_sys_write - wrapper around fb_sys_write + * @info: fb_info struct pointer + * @buf: userspace buffer to write to framebuffer memory + * @count: number of bytes to write to framebuffer memory + * @ppos: write offset within framebuffer memory + * + * A wrapper around fb_sys_write implemented by fbdev core + */ +ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf, + size_t count, loff_t *ppos) +{ + ssize_t ret; + + ret = fb_sys_write(info, buf, count, ppos); + if (ret > 0) + drm_fb_helper_dirty(info, 0, 0, info->var.xres, + info->var.yres); + + return ret; +} +EXPORT_SYMBOL(drm_fb_helper_sys_write); + +/** + * drm_fb_helper_sys_fillrect - wrapper around sys_fillrect + * @info: fbdev registered by the helper + * @rect: info about rectangle to fill + * + * A wrapper around sys_fillrect implemented by fbdev core + */ +void drm_fb_helper_sys_fillrect(struct fb_info *info, + const struct fb_fillrect *rect) +{ + sys_fillrect(info, rect); + drm_fb_helper_dirty(info, rect->dx, rect->dy, + rect->width, rect->height); +} +EXPORT_SYMBOL(drm_fb_helper_sys_fillrect); + +/** + * drm_fb_helper_sys_copyarea - wrapper around sys_copyarea + * @info: fbdev registered by the helper + * @area: info about area to copy + * + * A wrapper around sys_copyarea implemented by fbdev core + */ +void drm_fb_helper_sys_copyarea(struct fb_info *info, + const struct fb_copyarea *area) +{ + sys_copyarea(info, area); + drm_fb_helper_dirty(info, area->dx, area->dy, + area->width, area->height); +} +EXPORT_SYMBOL(drm_fb_helper_sys_copyarea); + +/** + * drm_fb_helper_sys_imageblit - wrapper around sys_imageblit + * @info: fbdev registered by the helper + * @image: info about image to blit + * + * A wrapper around sys_imageblit implemented by fbdev core + */ +void drm_fb_helper_sys_imageblit(struct fb_info *info, + const struct fb_image *image) +{ + sys_imageblit(info, image); + drm_fb_helper_dirty(info, image->dx, image->dy, + image->width, image->height); +} +EXPORT_SYMBOL(drm_fb_helper_sys_imageblit); + +/** + * drm_fb_helper_cfb_fillrect - wrapper around cfb_fillrect + * @info: fbdev registered by the helper + * @rect: info about rectangle to fill + * + * A wrapper around cfb_fillrect implemented by fbdev core + */ +void drm_fb_helper_cfb_fillrect(struct fb_info *info, + const struct fb_fillrect *rect) +{ + cfb_fillrect(info, rect); + drm_fb_helper_dirty(info, rect->dx, rect->dy, + rect->width, rect->height); +} +EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect); + +/** + * drm_fb_helper_cfb_copyarea - wrapper around cfb_copyarea + * @info: fbdev registered by the helper + * @area: info about area to copy + * + * A wrapper around cfb_copyarea implemented by fbdev core + */ +void drm_fb_helper_cfb_copyarea(struct fb_info *info, + const struct fb_copyarea *area) +{ + cfb_copyarea(info, area); + drm_fb_helper_dirty(info, area->dx, area->dy, + area->width, area->height); +} +EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea); + +/** + * drm_fb_helper_cfb_imageblit - wrapper around cfb_imageblit + * @info: fbdev registered by the helper + * @image: info about image to blit + * + * A wrapper around cfb_imageblit implemented by fbdev core + */ +void drm_fb_helper_cfb_imageblit(struct fb_info *info, + const struct fb_image *image) +{ + cfb_imageblit(info, image); + drm_fb_helper_dirty(info, image->dx, image->dy, + image->width, image->height); +} +EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit); +#endif /* __linux__ */ + +/** + * drm_fb_helper_set_suspend - wrapper around fb_set_suspend + * @fb_helper: driver-allocated fbdev helper, can be NULL + * @suspend: whether to suspend or resume + * + * A wrapper around fb_set_suspend implemented by fbdev core. + * Use drm_fb_helper_set_suspend_unlocked() if you don't need to take + * the lock yourself + */ +void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend) +{ +#ifdef __linux__ + if (fb_helper && fb_helper->fbdev) + fb_set_suspend(fb_helper->fbdev, suspend); +#endif +} +EXPORT_SYMBOL(drm_fb_helper_set_suspend); + +/** + * drm_fb_helper_set_suspend_unlocked - wrapper around fb_set_suspend that also + * takes the console lock + * @fb_helper: driver-allocated fbdev helper, can be NULL + * @suspend: whether to suspend or resume + * + * A wrapper around fb_set_suspend() that takes the console lock. If the lock + * isn't available on resume, a worker is tasked with waiting for the lock + * to become available. The console lock can be pretty contented on resume + * due to all the printk activity. + * + * This function can be called multiple times with the same state since + * &fb_info.state is checked to see if fbdev is running or not before locking. + * + * Use drm_fb_helper_set_suspend() if you need to take the lock yourself. + */ +void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper, + bool suspend) +{ +#ifdef __linux__ + if (!fb_helper || !fb_helper->fbdev) + return; + + /* make sure there's no pending/ongoing resume */ + flush_work(&fb_helper->resume_work); + + if (suspend) { + if (fb_helper->fbdev->state != FBINFO_STATE_RUNNING) + return; + + console_lock(); + + } else { + if (fb_helper->fbdev->state == FBINFO_STATE_RUNNING) + return; + + if (!console_trylock()) { + schedule_work(&fb_helper->resume_work); + return; + } + } + + fb_set_suspend(fb_helper->fbdev, suspend); + console_unlock(); +#endif +} +EXPORT_SYMBOL(drm_fb_helper_set_suspend_unlocked); + +#ifdef __linux__ +static int setcmap_pseudo_palette(struct fb_cmap *cmap, struct fb_info *info) +{ + u32 *palette = (u32 *)info->pseudo_palette; + int i; + + if (cmap->start + cmap->len > 16) + return -EINVAL; + + for (i = 0; i < cmap->len; ++i) { + u16 red = cmap->red[i]; + u16 green = cmap->green[i]; + u16 blue = cmap->blue[i]; + u32 value; + + red >>= 16 - info->var.red.length; + green >>= 16 - info->var.green.length; + blue >>= 16 - info->var.blue.length; + value = (red << info->var.red.offset) | + (green << info->var.green.offset) | + (blue << info->var.blue.offset); + if (info->var.transp.length > 0) { + u32 mask = (1 << info->var.transp.length) - 1; + + mask <<= info->var.transp.offset; + value |= mask; + } + palette[cmap->start + i] = value; + } + + return 0; +} + +static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + struct drm_crtc *crtc; + u16 *r, *g, *b; + int i, ret = 0; + + drm_modeset_lock_all(fb_helper->dev); + for (i = 0; i < fb_helper->crtc_count; i++) { + crtc = fb_helper->crtc_info[i].mode_set.crtc; + if (!crtc->funcs->gamma_set || !crtc->gamma_size) + return -EINVAL; + + if (cmap->start + cmap->len > crtc->gamma_size) + return -EINVAL; + + r = crtc->gamma_store; + g = r + crtc->gamma_size; + b = g + crtc->gamma_size; + + memcpy(r + cmap->start, cmap->red, cmap->len * sizeof(*r)); + memcpy(g + cmap->start, cmap->green, cmap->len * sizeof(*g)); + memcpy(b + cmap->start, cmap->blue, cmap->len * sizeof(*b)); + + ret = crtc->funcs->gamma_set(crtc, r, g, b, + crtc->gamma_size, NULL); + if (ret) + return ret; + } + drm_modeset_unlock_all(fb_helper->dev); + + return ret; +} + +static struct drm_property_blob *setcmap_new_gamma_lut(struct drm_crtc *crtc, + struct fb_cmap *cmap) +{ + struct drm_device *dev = crtc->dev; + struct drm_property_blob *gamma_lut; + struct drm_color_lut *lut; + int size = crtc->gamma_size; + int i; + + if (!size || cmap->start + cmap->len > size) + return ERR_PTR(-EINVAL); + + gamma_lut = drm_property_create_blob(dev, sizeof(*lut) * size, NULL); + if (IS_ERR(gamma_lut)) + return gamma_lut; + + lut = gamma_lut->data; + if (cmap->start || cmap->len != size) { + u16 *r = crtc->gamma_store; + u16 *g = r + crtc->gamma_size; + u16 *b = g + crtc->gamma_size; + + for (i = 0; i < cmap->start; i++) { + lut[i].red = r[i]; + lut[i].green = g[i]; + lut[i].blue = b[i]; + } + for (i = cmap->start + cmap->len; i < size; i++) { + lut[i].red = r[i]; + lut[i].green = g[i]; + lut[i].blue = b[i]; + } + } + + for (i = 0; i < cmap->len; i++) { + lut[cmap->start + i].red = cmap->red[i]; + lut[cmap->start + i].green = cmap->green[i]; + lut[cmap->start + i].blue = cmap->blue[i]; + } + + return gamma_lut; +} + +static int setcmap_atomic(struct fb_cmap *cmap, struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + struct drm_device *dev = fb_helper->dev; + struct drm_property_blob *gamma_lut = NULL; + struct drm_modeset_acquire_ctx ctx; + struct drm_crtc_state *crtc_state; + struct drm_atomic_state *state; + struct drm_crtc *crtc; + u16 *r, *g, *b; + int i, ret = 0; + bool replaced; + + drm_modeset_acquire_init(&ctx, 0); + + state = drm_atomic_state_alloc(dev); + if (!state) { + ret = -ENOMEM; + goto out_ctx; + } + + state->acquire_ctx = &ctx; +retry: + for (i = 0; i < fb_helper->crtc_count; i++) { + crtc = fb_helper->crtc_info[i].mode_set.crtc; + + if (!gamma_lut) + gamma_lut = setcmap_new_gamma_lut(crtc, cmap); + if (IS_ERR(gamma_lut)) { + ret = PTR_ERR(gamma_lut); + gamma_lut = NULL; + goto out_state; + } + + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) { + ret = PTR_ERR(crtc_state); + goto out_state; + } + + replaced = drm_property_replace_blob(&crtc_state->degamma_lut, + NULL); + replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL); + replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, + gamma_lut); + crtc_state->color_mgmt_changed |= replaced; + } + + ret = drm_atomic_commit(state); + if (ret) + goto out_state; + + for (i = 0; i < fb_helper->crtc_count; i++) { + crtc = fb_helper->crtc_info[i].mode_set.crtc; + + r = crtc->gamma_store; + g = r + crtc->gamma_size; + b = g + crtc->gamma_size; + + memcpy(r + cmap->start, cmap->red, cmap->len * sizeof(*r)); + memcpy(g + cmap->start, cmap->green, cmap->len * sizeof(*g)); + memcpy(b + cmap->start, cmap->blue, cmap->len * sizeof(*b)); + } + +out_state: + if (ret == -EDEADLK) + goto backoff; + + drm_property_blob_put(gamma_lut); + drm_atomic_state_put(state); +out_ctx: + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + return ret; + +backoff: + drm_atomic_state_clear(state); + drm_modeset_backoff(&ctx); + goto retry; +} + +/** + * drm_fb_helper_setcmap - implementation for &fb_ops.fb_setcmap + * @cmap: cmap to set + * @info: fbdev registered by the helper + */ +int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + struct drm_device *dev = fb_helper->dev; + int ret; + + if (oops_in_progress) + return -EBUSY; + + mutex_lock(&fb_helper->lock); + + if (!drm_master_internal_acquire(dev)) { + ret = -EBUSY; + goto unlock; + } + + if (info->fix.visual == FB_VISUAL_TRUECOLOR) + ret = setcmap_pseudo_palette(cmap, info); + else if (drm_drv_uses_atomic_modeset(fb_helper->dev)) + ret = setcmap_atomic(cmap, info); + else + ret = setcmap_legacy(cmap, info); + + drm_master_internal_release(dev); +unlock: + mutex_unlock(&fb_helper->lock); + + return ret; +} +EXPORT_SYMBOL(drm_fb_helper_setcmap); + +/** + * drm_fb_helper_ioctl - legacy ioctl implementation + * @info: fbdev registered by the helper + * @cmd: ioctl command + * @arg: ioctl argument + * + * A helper to implement the standard fbdev ioctl. Only + * FBIO_WAITFORVSYNC is implemented for now. + */ +int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg) +{ + struct drm_fb_helper *fb_helper = info->par; + struct drm_device *dev = fb_helper->dev; + struct drm_mode_set *mode_set; + struct drm_crtc *crtc; + int ret = 0; + + mutex_lock(&fb_helper->lock); + if (!drm_master_internal_acquire(dev)) { + ret = -EBUSY; + goto unlock; + } + + switch (cmd) { + case FBIO_WAITFORVSYNC: + /* + * Only consider the first CRTC. + * + * This ioctl is supposed to take the CRTC number as + * an argument, but in fbdev times, what that number + * was supposed to be was quite unclear, different + * drivers were passing that argument differently + * (some by reference, some by value), and most of the + * userspace applications were just hardcoding 0 as an + * argument. + * + * The first CRTC should be the integrated panel on + * most drivers, so this is the best choice we can + * make. If we're not smart enough here, one should + * just consider switch the userspace to KMS. + */ + mode_set = &fb_helper->crtc_info[0].mode_set; + crtc = mode_set->crtc; + + /* + * Only wait for a vblank event if the CRTC is + * enabled, otherwise just don't do anythintg, + * not even report an error. + */ + ret = drm_crtc_vblank_get(crtc); + if (!ret) { + drm_crtc_wait_one_vblank(crtc); + drm_crtc_vblank_put(crtc); + } + + ret = 0; + break; + default: + ret = -ENOTTY; + } + + drm_master_internal_release(dev); +unlock: + mutex_unlock(&fb_helper->lock); + return ret; +} +EXPORT_SYMBOL(drm_fb_helper_ioctl); + +static bool drm_fb_pixel_format_equal(const struct fb_var_screeninfo *var_1, + const struct fb_var_screeninfo *var_2) +{ + return var_1->bits_per_pixel == var_2->bits_per_pixel && + var_1->grayscale == var_2->grayscale && + var_1->red.offset == var_2->red.offset && + var_1->red.length == var_2->red.length && + var_1->red.msb_right == var_2->red.msb_right && + var_1->green.offset == var_2->green.offset && + var_1->green.length == var_2->green.length && + var_1->green.msb_right == var_2->green.msb_right && + var_1->blue.offset == var_2->blue.offset && + var_1->blue.length == var_2->blue.length && + var_1->blue.msb_right == var_2->blue.msb_right && + var_1->transp.offset == var_2->transp.offset && + var_1->transp.length == var_2->transp.length && + var_1->transp.msb_right == var_2->transp.msb_right; +} + +static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var, + u8 depth) +{ + switch (depth) { + case 8: + var->red.offset = 0; + var->green.offset = 0; + var->blue.offset = 0; + var->red.length = 8; /* 8bit DAC */ + var->green.length = 8; + var->blue.length = 8; + var->transp.offset = 0; + var->transp.length = 0; + break; + case 15: + var->red.offset = 10; + var->green.offset = 5; + var->blue.offset = 0; + var->red.length = 5; + var->green.length = 5; + var->blue.length = 5; + var->transp.offset = 15; + var->transp.length = 1; + break; + case 16: + var->red.offset = 11; + var->green.offset = 5; + var->blue.offset = 0; + var->red.length = 5; + var->green.length = 6; + var->blue.length = 5; + var->transp.offset = 0; + break; + case 24: + var->red.offset = 16; + var->green.offset = 8; + var->blue.offset = 0; + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + var->transp.offset = 0; + var->transp.length = 0; + break; + case 32: + var->red.offset = 16; + var->green.offset = 8; + var->blue.offset = 0; + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + var->transp.offset = 24; + var->transp.length = 8; + break; + default: + break; + } +} + +/** + * drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var + * @var: screeninfo to check + * @info: fbdev registered by the helper + */ +int drm_fb_helper_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + struct drm_framebuffer *fb = fb_helper->fb; + + if (in_dbg_master()) + return -EINVAL; + + if (var->pixclock != 0) { + DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n"); + var->pixclock = 0; + } + + if ((drm_format_info_block_width(fb->format, 0) > 1) || + (drm_format_info_block_height(fb->format, 0) > 1)) + return -EINVAL; + + /* + * Changes struct fb_var_screeninfo are currently not pushed back + * to KMS, hence fail if different settings are requested. + */ + if (var->bits_per_pixel != fb->format->cpp[0] * 8 || + var->xres > fb->width || var->yres > fb->height || + var->xres_virtual > fb->width || var->yres_virtual > fb->height) { + DRM_DEBUG("fb requested width/height/bpp can't fit in current fb " + "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n", + var->xres, var->yres, var->bits_per_pixel, + var->xres_virtual, var->yres_virtual, + fb->width, fb->height, fb->format->cpp[0] * 8); + return -EINVAL; + } + + /* + * Workaround for SDL 1.2, which is known to be setting all pixel format + * fields values to zero in some cases. We treat this situation as a + * kind of "use some reasonable autodetected values". + */ + if (!var->red.offset && !var->green.offset && + !var->blue.offset && !var->transp.offset && + !var->red.length && !var->green.length && + !var->blue.length && !var->transp.length && + !var->red.msb_right && !var->green.msb_right && + !var->blue.msb_right && !var->transp.msb_right) { + drm_fb_helper_fill_pixel_fmt(var, fb->format->depth); + } + + /* + * drm fbdev emulation doesn't support changing the pixel format at all, + * so reject all pixel format changing requests. + */ + if (!drm_fb_pixel_format_equal(var, &info->var)) { + DRM_DEBUG("fbdev emulation doesn't support changing the pixel format\n"); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(drm_fb_helper_check_var); +#endif /* __linux__ */ + +/** + * drm_fb_helper_set_par - implementation for &fb_ops.fb_set_par + * @info: fbdev registered by the helper + * + * This will let fbcon do the mode init and is called at initialization time by + * the fbdev core when registering the driver, and later on through the hotplug + * callback. + */ +int drm_fb_helper_set_par(struct fb_info *info) +{ +#ifdef __linux__ + struct drm_fb_helper *fb_helper = info->par; + struct fb_var_screeninfo *var = &info->var; +#else + struct vt_kms_softc *sc = info->fb_priv; + struct drm_fb_helper *fb_helper = sc->fb_helper; + +#endif + + if (oops_in_progress) + return -EBUSY; + +#ifdef __linux__ + if (var->pixclock != 0) { + DRM_ERROR("PIXEL CLOCK SET\n"); + return -EINVAL; + } +#endif + + drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); + + return 0; +} +EXPORT_SYMBOL(drm_fb_helper_set_par); + +#ifdef __linux__ +static void pan_set(struct drm_fb_helper *fb_helper, int x, int y) +{ + int i; + + for (i = 0; i < fb_helper->crtc_count; i++) { + struct drm_mode_set *mode_set; + + mode_set = &fb_helper->crtc_info[i].mode_set; + + mode_set->x = x; + mode_set->y = y; + } +} + +static int pan_display_atomic(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + int ret; + + pan_set(fb_helper, var->xoffset, var->yoffset); + + ret = restore_fbdev_mode_atomic(fb_helper, true); + if (!ret) { + info->var.xoffset = var->xoffset; + info->var.yoffset = var->yoffset; + } else + pan_set(fb_helper, info->var.xoffset, info->var.yoffset); + + return ret; +} + +static int pan_display_legacy(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + struct drm_mode_set *modeset; + int ret = 0; + int i; + + drm_modeset_lock_all(fb_helper->dev); + for (i = 0; i < fb_helper->crtc_count; i++) { + modeset = &fb_helper->crtc_info[i].mode_set; + + modeset->x = var->xoffset; + modeset->y = var->yoffset; + + if (modeset->num_connectors) { + ret = drm_mode_set_config_internal(modeset); + if (!ret) { + info->var.xoffset = var->xoffset; + info->var.yoffset = var->yoffset; + } + } + } + drm_modeset_unlock_all(fb_helper->dev); + + return ret; +} + +/** + * drm_fb_helper_pan_display - implementation for &fb_ops.fb_pan_display + * @var: updated screen information + * @info: fbdev registered by the helper + */ +int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + struct drm_device *dev = fb_helper->dev; + int ret; + + if (oops_in_progress) + return -EBUSY; + + mutex_lock(&fb_helper->lock); + if (!drm_master_internal_acquire(dev)) { + ret = -EBUSY; + goto unlock; + } + + if (drm_drv_uses_atomic_modeset(dev)) + ret = pan_display_atomic(var, info); + else + ret = pan_display_legacy(var, info); + + drm_master_internal_release(dev); +unlock: + mutex_unlock(&fb_helper->lock); + + return ret; +} +EXPORT_SYMBOL(drm_fb_helper_pan_display); +#endif /* __linux__ */ + +/* + * Allocates the backing storage and sets up the fbdev info structure through + * the ->fb_probe callback. + */ +static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, + int preferred_bpp) +{ + int ret = 0; + int crtc_count = 0; + int i; + struct drm_fb_helper_surface_size sizes; + int best_depth = 0; + + memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size)); + sizes.surface_depth = 24; + sizes.surface_bpp = 32; + sizes.fb_width = (u32)-1; + sizes.fb_height = (u32)-1; + + /* + * If driver picks 8 or 16 by default use that for both depth/bpp + * to begin with + */ + if (preferred_bpp != sizes.surface_bpp) + sizes.surface_depth = sizes.surface_bpp = preferred_bpp; + + drm_fb_helper_for_each_connector(fb_helper, i) { + struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i]; + struct drm_cmdline_mode *cmdline_mode; + + cmdline_mode = &fb_helper_conn->connector->cmdline_mode; + + if (cmdline_mode->bpp_specified) { + switch (cmdline_mode->bpp) { + case 8: + sizes.surface_depth = sizes.surface_bpp = 8; + break; + case 15: + sizes.surface_depth = 15; + sizes.surface_bpp = 16; + break; + case 16: + sizes.surface_depth = sizes.surface_bpp = 16; + break; + case 24: + sizes.surface_depth = sizes.surface_bpp = 24; + break; + case 32: + sizes.surface_depth = 24; + sizes.surface_bpp = 32; + break; + } + break; + } + } + + /* + * If we run into a situation where, for example, the primary plane + * supports RGBA5551 (16 bpp, depth 15) but not RGB565 (16 bpp, depth + * 16) we need to scale down the depth of the sizes we request. + */ + for (i = 0; i < fb_helper->crtc_count; i++) { + struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set; + struct drm_crtc *crtc = mode_set->crtc; + struct drm_plane *plane = crtc->primary; + int j; + + DRM_DEBUG("test CRTC %d primary plane\n", i); + + for (j = 0; j < plane->format_count; j++) { + const struct drm_format_info *fmt; + + fmt = drm_format_info(plane->format_types[j]); + + /* + * Do not consider YUV or other complicated formats + * for framebuffers. This means only legacy formats + * are supported (fmt->depth is a legacy field) but + * the framebuffer emulation can only deal with such + * formats, specifically RGB/BGA formats. + */ + if (fmt->depth == 0) + continue; + + /* We found a perfect fit, great */ + if (fmt->depth == sizes.surface_depth) { + best_depth = fmt->depth; + break; + } + + /* Skip depths above what we're looking for */ + if (fmt->depth > sizes.surface_depth) + continue; + + /* Best depth found so far */ + if (fmt->depth > best_depth) + best_depth = fmt->depth; + } + } + if (sizes.surface_depth != best_depth && best_depth) { + DRM_INFO("requested bpp %d, scaled depth down to %d", + sizes.surface_bpp, best_depth); + sizes.surface_depth = best_depth; + } + + /* first up get a count of crtcs now in use and new min/maxes width/heights */ + crtc_count = 0; + for (i = 0; i < fb_helper->crtc_count; i++) { + struct drm_display_mode *desired_mode; + struct drm_mode_set *mode_set; + int x, y, j; + /* in case of tile group, are we the last tile vert or horiz? + * If no tile group you are always the last one both vertically + * and horizontally + */ + bool lastv = true, lasth = true; + + mode_set = &fb_helper->crtc_info[i].mode_set; + desired_mode = mode_set->mode; + + if (!desired_mode) + continue; + + crtc_count++; + + x = mode_set->x; + y = mode_set->y; + + sizes.surface_width = max_t(u32, desired_mode->hdisplay + x, sizes.surface_width); + sizes.surface_height = max_t(u32, desired_mode->vdisplay + y, sizes.surface_height); + + for (j = 0; j < mode_set->num_connectors; j++) { + struct drm_connector *connector = mode_set->connectors[j]; + + if (connector->has_tile) { + lasth = (connector->tile_h_loc == (connector->num_h_tile - 1)); + lastv = (connector->tile_v_loc == (connector->num_v_tile - 1)); + /* cloning to multiple tiles is just crazy-talk, so: */ + break; + } + } + + if (lasth) + sizes.fb_width = min_t(u32, desired_mode->hdisplay + x, sizes.fb_width); + if (lastv) + sizes.fb_height = min_t(u32, desired_mode->vdisplay + y, sizes.fb_height); + } + + if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) { + DRM_INFO("Cannot find any crtc or sizes\n"); + + /* First time: disable all crtc's.. */ + if (!fb_helper->deferred_setup) + restore_fbdev_mode(fb_helper); + return -EAGAIN; + } + + /* Handle our overallocation */ + sizes.surface_height *= drm_fbdev_overalloc; + sizes.surface_height /= 100; + + /* push down into drivers */ + ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes); + if (ret < 0) + return ret; + + strcpy(fb_helper->fb->comm, "[fbcon]"); + return 0; +} + +static void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, + uint32_t depth) +{ +#ifdef __linux__ + info->fix.type = FB_TYPE_PACKED_PIXELS; + info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR : + FB_VISUAL_TRUECOLOR; + info->fix.mmio_start = 0; + info->fix.mmio_len = 0; + info->fix.type_aux = 0; + info->fix.xpanstep = 1; /* doing it in hw */ + info->fix.ypanstep = 1; /* doing it in hw */ + info->fix.ywrapstep = 0; + info->fix.accel = FB_ACCEL_NONE; + + info->fix.line_length = pitch; +#else + info->fb_stride = pitch; +#endif +} + +static void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, + uint32_t fb_width, uint32_t fb_height) +{ + struct drm_framebuffer *fb = fb_helper->fb; +#ifdef __FreeBSD__ + struct vt_kms_softc *sc; +#endif + +#ifdef __linux__ + WARN_ON((drm_format_info_block_width(fb->format, 0) > 1) || + (drm_format_info_block_height(fb->format, 0) > 1)); + info->pseudo_palette = fb_helper->pseudo_palette; + info->var.xres_virtual = fb->width; + info->var.yres_virtual = fb->height; + info->var.bits_per_pixel = fb->format->cpp[0] * 8; + info->var.accel_flags = FB_ACCELF_TEXT; + info->var.xoffset = 0; + info->var.yoffset = 0; + info->var.activate = FB_ACTIVATE_NOW; + + drm_fb_helper_fill_pixel_fmt(&info->var, fb->format->depth); + + info->var.xres = fb_width; + info->var.yres = fb_height; +#elif defined(__FreeBSD__) + info->fb_name = device_get_nameunit(fb_helper->dev->dev); + info->fb_width = fb->width; + info->fb_height = fb->height; + info->fb_depth = fb->format->cpp[0] * 8; + + sc = (struct vt_kms_softc *)info->fb_priv; + sc->fb_helper = fb_helper; +#endif +} + +/** + * drm_fb_helper_fill_info - initializes fbdev information + * @info: fbdev instance to set up + * @fb_helper: fb helper instance to use as template + * @sizes: describes fbdev size and scanout surface size + * + * Sets up the variable and fixed fbdev metainformation from the given fb helper + * instance and the drm framebuffer allocated in &drm_fb_helper.fb. + * + * Drivers should call this (or their equivalent setup code) from their + * &drm_fb_helper_funcs.fb_probe callback after having allocated the fbdev + * backing storage framebuffer. + */ +void drm_fb_helper_fill_info(struct fb_info *info, + struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct drm_framebuffer *fb = fb_helper->fb; + + drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); + drm_fb_helper_fill_var(info, fb_helper, + sizes->fb_width, sizes->fb_height); + +#ifdef __linux__ + info->par = fb_helper; + snprintf(info->fix.id, sizeof(info->fix.id), "%sdrmfb", + fb_helper->dev->driver->name); +#endif + +} +EXPORT_SYMBOL(drm_fb_helper_fill_info); + +static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper, + uint32_t maxX, + uint32_t maxY) +{ + struct drm_connector *connector; + int i, count = 0; + + drm_fb_helper_for_each_connector(fb_helper, i) { + connector = fb_helper->connector_info[i]->connector; + count += connector->funcs->fill_modes(connector, maxX, maxY); + } + + return count; +} + +struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height) +{ + struct drm_display_mode *mode; + + list_for_each_entry(mode, &fb_connector->connector->modes, head) { + if (mode->hdisplay > width || + mode->vdisplay > height) + continue; + if (mode->type & DRM_MODE_TYPE_PREFERRED) + return mode; + } + return NULL; +} +EXPORT_SYMBOL(drm_has_preferred_mode); + +static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector) +{ + return fb_connector->connector->cmdline_mode.specified; +} + +struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn) +{ + struct drm_cmdline_mode *cmdline_mode; + struct drm_display_mode *mode; + bool prefer_non_interlace; + + cmdline_mode = &fb_helper_conn->connector->cmdline_mode; + if (cmdline_mode->specified == false) + return NULL; + + /* attempt to find a matching mode in the list of modes + * we have gotten so far, if not add a CVT mode that conforms + */ + if (cmdline_mode->rb || cmdline_mode->margins) + goto create_mode; + + prefer_non_interlace = !cmdline_mode->interlace; +again: + list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) { + /* check width/height */ + if (mode->hdisplay != cmdline_mode->xres || + mode->vdisplay != cmdline_mode->yres) + continue; + + if (cmdline_mode->refresh_specified) { + if (mode->vrefresh != cmdline_mode->refresh) + continue; + } + + if (cmdline_mode->interlace) { + if (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) + continue; + } else if (prefer_non_interlace) { + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + continue; + } + return mode; + } + + if (prefer_non_interlace) { + prefer_non_interlace = false; + goto again; + } + +create_mode: + mode = drm_mode_create_from_cmdline_mode(fb_helper_conn->connector->dev, + cmdline_mode); + list_add(&mode->head, &fb_helper_conn->connector->modes); + return mode; +} +EXPORT_SYMBOL(drm_pick_cmdline_mode); + +static bool drm_connector_enabled(struct drm_connector *connector, bool strict) +{ + bool enable; + + if (connector->display_info.non_desktop) + return false; + + if (strict) + enable = connector->status == connector_status_connected; + else + enable = connector->status != connector_status_disconnected; + + return enable; +} + +static void drm_enable_connectors(struct drm_fb_helper *fb_helper, + bool *enabled) +{ + bool any_enabled = false; + struct drm_connector *connector; + int i = 0; + + drm_fb_helper_for_each_connector(fb_helper, i) { + connector = fb_helper->connector_info[i]->connector; + enabled[i] = drm_connector_enabled(connector, true); + DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id, + connector->display_info.non_desktop ? "non desktop" : enabled[i] ? "yes" : "no"); + + any_enabled |= enabled[i]; + } + + if (any_enabled) + return; + + drm_fb_helper_for_each_connector(fb_helper, i) { + connector = fb_helper->connector_info[i]->connector; + enabled[i] = drm_connector_enabled(connector, false); + } +} + +static bool drm_target_cloned(struct drm_fb_helper *fb_helper, + struct drm_display_mode **modes, + struct drm_fb_offset *offsets, + bool *enabled, int width, int height) +{ + int count, i, j; + bool can_clone = false; + struct drm_fb_helper_connector *fb_helper_conn; + struct drm_display_mode *dmt_mode, *mode; + + /* only contemplate cloning in the single crtc case */ + if (fb_helper->crtc_count > 1) + return false; + + count = 0; + drm_fb_helper_for_each_connector(fb_helper, i) { + if (enabled[i]) + count++; + } + + /* only contemplate cloning if more than one connector is enabled */ + if (count <= 1) + return false; + + /* check the command line or if nothing common pick 1024x768 */ + can_clone = true; + drm_fb_helper_for_each_connector(fb_helper, i) { + if (!enabled[i]) + continue; + fb_helper_conn = fb_helper->connector_info[i]; + modes[i] = drm_pick_cmdline_mode(fb_helper_conn); + if (!modes[i]) { + can_clone = false; + break; + } + for (j = 0; j < i; j++) { + if (!enabled[j]) + continue; + if (!drm_mode_match(modes[j], modes[i], + DRM_MODE_MATCH_TIMINGS | + DRM_MODE_MATCH_CLOCK | + DRM_MODE_MATCH_FLAGS | + DRM_MODE_MATCH_3D_FLAGS)) + can_clone = false; + } + } + + if (can_clone) { + DRM_DEBUG_KMS("can clone using command line\n"); + return true; + } + + /* try and find a 1024x768 mode on each connector */ + can_clone = true; + dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60, false); + + drm_fb_helper_for_each_connector(fb_helper, i) { + if (!enabled[i]) + continue; + + fb_helper_conn = fb_helper->connector_info[i]; + list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) { + if (drm_mode_match(mode, dmt_mode, + DRM_MODE_MATCH_TIMINGS | + DRM_MODE_MATCH_CLOCK | + DRM_MODE_MATCH_FLAGS | + DRM_MODE_MATCH_3D_FLAGS)) + modes[i] = mode; + } + if (!modes[i]) + can_clone = false; + } + + if (can_clone) { + DRM_DEBUG_KMS("can clone using 1024x768\n"); + return true; + } + DRM_INFO("kms: can't enable cloning when we probably wanted to.\n"); + return false; +} + +static int drm_get_tile_offsets(struct drm_fb_helper *fb_helper, + struct drm_display_mode **modes, + struct drm_fb_offset *offsets, + int idx, + int h_idx, int v_idx) +{ + struct drm_fb_helper_connector *fb_helper_conn; + int i; + int hoffset = 0, voffset = 0; + + drm_fb_helper_for_each_connector(fb_helper, i) { + fb_helper_conn = fb_helper->connector_info[i]; + if (!fb_helper_conn->connector->has_tile) + continue; + + if (!modes[i] && (h_idx || v_idx)) { + DRM_DEBUG_KMS("no modes for connector tiled %d %d\n", i, + fb_helper_conn->connector->base.id); + continue; + } + if (fb_helper_conn->connector->tile_h_loc < h_idx) + hoffset += modes[i]->hdisplay; + + if (fb_helper_conn->connector->tile_v_loc < v_idx) + voffset += modes[i]->vdisplay; + } + offsets[idx].x = hoffset; + offsets[idx].y = voffset; + DRM_DEBUG_KMS("returned %d %d for %d %d\n", hoffset, voffset, h_idx, v_idx); + return 0; +} + +static bool drm_target_preferred(struct drm_fb_helper *fb_helper, + struct drm_display_mode **modes, + struct drm_fb_offset *offsets, + bool *enabled, int width, int height) +{ + struct drm_fb_helper_connector *fb_helper_conn; + const u64 mask = BIT_ULL(fb_helper->connector_count) - 1; + u64 conn_configured = 0; + int tile_pass = 0; + int i; + +retry: + drm_fb_helper_for_each_connector(fb_helper, i) { + fb_helper_conn = fb_helper->connector_info[i]; + + if (conn_configured & BIT_ULL(i)) + continue; + + if (enabled[i] == false) { + conn_configured |= BIT_ULL(i); + continue; + } + + /* first pass over all the untiled connectors */ + if (tile_pass == 0 && fb_helper_conn->connector->has_tile) + continue; + + if (tile_pass == 1) { + if (fb_helper_conn->connector->tile_h_loc != 0 || + fb_helper_conn->connector->tile_v_loc != 0) + continue; + + } else { + if (fb_helper_conn->connector->tile_h_loc != tile_pass - 1 && + fb_helper_conn->connector->tile_v_loc != tile_pass - 1) + /* if this tile_pass doesn't cover any of the tiles - keep going */ + continue; + + /* + * find the tile offsets for this pass - need to find + * all tiles left and above + */ + drm_get_tile_offsets(fb_helper, modes, offsets, + i, fb_helper_conn->connector->tile_h_loc, fb_helper_conn->connector->tile_v_loc); + } + DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n", + fb_helper_conn->connector->base.id); + + /* got for command line mode first */ + modes[i] = drm_pick_cmdline_mode(fb_helper_conn); + if (!modes[i]) { + DRM_DEBUG_KMS("looking for preferred mode on connector %d %d\n", + fb_helper_conn->connector->base.id, fb_helper_conn->connector->tile_group ? fb_helper_conn->connector->tile_group->id : 0); + modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height); + } + /* No preferred modes, pick one off the list */ + if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) { + list_for_each_entry(modes[i], &fb_helper_conn->connector->modes, head) + break; + } + DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name : + "none"); + conn_configured |= BIT_ULL(i); + } + + if ((conn_configured & mask) != mask) { + tile_pass++; + goto retry; + } + return true; +} + +static bool connector_has_possible_crtc(struct drm_connector *connector, + struct drm_crtc *crtc) +{ + struct drm_encoder *encoder; + int i; + + drm_connector_for_each_possible_encoder(connector, encoder, i) { + if (encoder->possible_crtcs & drm_crtc_mask(crtc)) + return true; + } + + return false; +} + +static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_crtc **best_crtcs, + struct drm_display_mode **modes, + int n, int width, int height) +{ + int c, o; + struct drm_connector *connector; + int my_score, best_score, score; + struct drm_fb_helper_crtc **crtcs, *crtc; + struct drm_fb_helper_connector *fb_helper_conn; + + if (n == fb_helper->connector_count) + return 0; + + fb_helper_conn = fb_helper->connector_info[n]; + connector = fb_helper_conn->connector; + + best_crtcs[n] = NULL; + best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height); + if (modes[n] == NULL) + return best_score; + + crtcs = kcalloc(fb_helper->connector_count, + sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL); + if (!crtcs) + return best_score; + + my_score = 1; + if (connector->status == connector_status_connected) + my_score++; + if (drm_has_cmdline_mode(fb_helper_conn)) + my_score++; + if (drm_has_preferred_mode(fb_helper_conn, width, height)) + my_score++; + + /* + * select a crtc for this connector and then attempt to configure + * remaining connectors + */ + for (c = 0; c < fb_helper->crtc_count; c++) { + crtc = &fb_helper->crtc_info[c]; + + if (!connector_has_possible_crtc(connector, + crtc->mode_set.crtc)) + continue; + + for (o = 0; o < n; o++) + if (best_crtcs[o] == crtc) + break; + + if (o < n) { + /* ignore cloning unless only a single crtc */ + if (fb_helper->crtc_count > 1) + continue; + + if (!drm_mode_equal(modes[o], modes[n])) + continue; + } + + crtcs[n] = crtc; + memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *)); + score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1, + width, height); + if (score > best_score) { + best_score = score; + memcpy(best_crtcs, crtcs, + fb_helper->connector_count * + sizeof(struct drm_fb_helper_crtc *)); + } + } + + kfree(crtcs); + return best_score; +} + +static struct drm_fb_helper_crtc * +drm_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc) +{ + int i; + + for (i = 0; i < fb_helper->crtc_count; i++) + if (fb_helper->crtc_info[i].mode_set.crtc == crtc) + return &fb_helper->crtc_info[i]; + + return NULL; +} + +/* Try to read the BIOS display configuration and use it for the initial config */ +static bool drm_fb_helper_firmware_config(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_crtc **crtcs, + struct drm_display_mode **modes, + struct drm_fb_offset *offsets, + bool *enabled, int width, int height) +{ + struct drm_device *dev = fb_helper->dev; + unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); + unsigned long conn_configured, conn_seq, mask; + int i, j; + bool *save_enabled; + bool fallback = true, ret = true; + int num_connectors_enabled = 0; + int num_connectors_detected = 0; + struct drm_modeset_acquire_ctx ctx; + + if (!drm_drv_uses_atomic_modeset(dev)) + return false; + + save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL); + if (!save_enabled) + return false; + + drm_modeset_acquire_init(&ctx, 0); + + while (drm_modeset_lock_all_ctx(dev, &ctx) != 0) + drm_modeset_backoff(&ctx); + + memcpy(save_enabled, enabled, count); + mask = GENMASK(count - 1, 0); + conn_configured = 0; +retry: + conn_seq = conn_configured; + for (i = 0; i < count; i++) { + struct drm_fb_helper_connector *fb_conn; + struct drm_connector *connector; + struct drm_encoder *encoder; + struct drm_fb_helper_crtc *new_crtc; + + fb_conn = fb_helper->connector_info[i]; + connector = fb_conn->connector; + + if (conn_configured & BIT(i)) + continue; + + if (conn_seq == 0 && !connector->has_tile) + continue; + + if (connector->status == connector_status_connected) + num_connectors_detected++; + + if (!enabled[i]) { + DRM_DEBUG_KMS("connector %s not enabled, skipping\n", + connector->name); + conn_configured |= BIT(i); + continue; + } + + if (connector->force == DRM_FORCE_OFF) { + DRM_DEBUG_KMS("connector %s is disabled by user, skipping\n", + connector->name); + enabled[i] = false; + continue; + } + + encoder = connector->state->best_encoder; + if (!encoder || WARN_ON(!connector->state->crtc)) { + if (connector->force > DRM_FORCE_OFF) + goto bail; + + DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n", + connector->name); + enabled[i] = false; + conn_configured |= BIT(i); + continue; + } + + num_connectors_enabled++; + + new_crtc = drm_fb_helper_crtc(fb_helper, connector->state->crtc); + + /* + * Make sure we're not trying to drive multiple connectors + * with a single CRTC, since our cloning support may not + * match the BIOS. + */ + for (j = 0; j < count; j++) { + if (crtcs[j] == new_crtc) { + DRM_DEBUG_KMS("fallback: cloned configuration\n"); + goto bail; + } + } + + DRM_DEBUG_KMS("looking for cmdline mode on connector %s\n", + connector->name); + + /* go for command line mode first */ + modes[i] = drm_pick_cmdline_mode(fb_conn); + + /* try for preferred next */ + if (!modes[i]) { + DRM_DEBUG_KMS("looking for preferred mode on connector %s %d\n", + connector->name, connector->has_tile); + modes[i] = drm_has_preferred_mode(fb_conn, width, + height); + } + + /* No preferred mode marked by the EDID? Are there any modes? */ + if (!modes[i] && !list_empty(&connector->modes)) { + DRM_DEBUG_KMS("using first mode listed on connector %s\n", + connector->name); + modes[i] = list_first_entry(&connector->modes, + struct drm_display_mode, + head); + } + + /* last resort: use current mode */ + if (!modes[i]) { + /* + * IMPORTANT: We want to use the adjusted mode (i.e. + * after the panel fitter upscaling) as the initial + * config, not the input mode, which is what crtc->mode + * usually contains. But since our current + * code puts a mode derived from the post-pfit timings + * into crtc->mode this works out correctly. + * + * This is crtc->mode and not crtc->state->mode for the + * fastboot check to work correctly. + */ + DRM_DEBUG_KMS("looking for current mode on connector %s\n", + connector->name); + modes[i] = &connector->state->crtc->mode; + } + crtcs[i] = new_crtc; + + DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n", + connector->name, + connector->state->crtc->base.id, + connector->state->crtc->name, + modes[i]->hdisplay, modes[i]->vdisplay, + modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" : ""); + + fallback = false; + conn_configured |= BIT(i); + } + + if ((conn_configured & mask) != mask && conn_configured != conn_seq) + goto retry; + + /* + * If the BIOS didn't enable everything it could, fall back to have the + * same user experiencing of lighting up as much as possible like the + * fbdev helper library. + */ + if (num_connectors_enabled != num_connectors_detected && + num_connectors_enabled < dev->mode_config.num_crtc) { + DRM_DEBUG_KMS("fallback: Not all outputs enabled\n"); + DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled, + num_connectors_detected); + fallback = true; + } + + if (fallback) { +bail: + DRM_DEBUG_KMS("Not using firmware configuration\n"); + memcpy(enabled, save_enabled, count); + ret = false; + } + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + kfree(save_enabled); + return ret; +} + +static void drm_setup_crtcs(struct drm_fb_helper *fb_helper, + u32 width, u32 height) +{ + struct drm_device *dev = fb_helper->dev; + struct drm_fb_helper_crtc **crtcs; + struct drm_display_mode **modes; + struct drm_fb_offset *offsets; + bool *enabled; + int i; + + DRM_DEBUG_KMS("\n"); + /* prevent concurrent modification of connector_count by hotplug */ + lockdep_assert_held(&fb_helper->lock); + + crtcs = kcalloc(fb_helper->connector_count, + sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL); + modes = kcalloc(fb_helper->connector_count, + sizeof(struct drm_display_mode *), GFP_KERNEL); + offsets = kcalloc(fb_helper->connector_count, + sizeof(struct drm_fb_offset), GFP_KERNEL); + enabled = kcalloc(fb_helper->connector_count, + sizeof(bool), GFP_KERNEL); + if (!crtcs || !modes || !enabled || !offsets) { + DRM_ERROR("Memory allocation failed\n"); + goto out; + } + + mutex_lock(&fb_helper->dev->mode_config.mutex); + if (drm_fb_helper_probe_connector_modes(fb_helper, width, height) == 0) + DRM_DEBUG_KMS("No connectors reported connected with modes\n"); + drm_enable_connectors(fb_helper, enabled); + + if (!drm_fb_helper_firmware_config(fb_helper, crtcs, modes, offsets, + enabled, width, height)) { + memset(modes, 0, fb_helper->connector_count*sizeof(modes[0])); + memset(crtcs, 0, fb_helper->connector_count*sizeof(crtcs[0])); + memset(offsets, 0, fb_helper->connector_count*sizeof(offsets[0])); + + if (!drm_target_cloned(fb_helper, modes, offsets, + enabled, width, height) && + !drm_target_preferred(fb_helper, modes, offsets, + enabled, width, height)) + DRM_ERROR("Unable to find initial modes\n"); + + DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", + width, height); + + drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height); + } + mutex_unlock(&fb_helper->dev->mode_config.mutex); + + /* need to set the modesets up here for use later */ + /* fill out the connector<->crtc mappings into the modesets */ + for (i = 0; i < fb_helper->crtc_count; i++) + drm_fb_helper_modeset_release(fb_helper, + &fb_helper->crtc_info[i].mode_set); + + drm_fb_helper_for_each_connector(fb_helper, i) { + struct drm_display_mode *mode = modes[i]; + struct drm_fb_helper_crtc *fb_crtc = crtcs[i]; + struct drm_fb_offset *offset = &offsets[i]; + + if (mode && fb_crtc) { + struct drm_mode_set *modeset = &fb_crtc->mode_set; + struct drm_connector *connector = + fb_helper->connector_info[i]->connector; + + DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n", + mode->name, fb_crtc->mode_set.crtc->base.id, offset->x, offset->y); + + modeset->mode = drm_mode_duplicate(dev, mode); + drm_connector_get(connector); + modeset->connectors[modeset->num_connectors++] = connector; + modeset->x = offset->x; + modeset->y = offset->y; + } + } +out: + kfree(crtcs); + kfree(modes); + kfree(offsets); + kfree(enabled); +} + +/* + * This is a continuation of drm_setup_crtcs() that sets up anything related + * to the framebuffer. During initialization, drm_setup_crtcs() is called before + * the framebuffer has been allocated (fb_helper->fb and fb_helper->fbdev). + * So, any setup that touches those fields needs to be done here instead of in + * drm_setup_crtcs(). + */ +static void drm_setup_crtcs_fb(struct drm_fb_helper *fb_helper) +{ +#ifdef __linux__ + struct fb_info *info = fb_helper->fbdev; +#endif + unsigned int rotation, sw_rotations = 0; + int i; + + for (i = 0; i < fb_helper->crtc_count; i++) { + struct drm_mode_set *modeset = &fb_helper->crtc_info[i].mode_set; + + if (!modeset->num_connectors) + continue; + + modeset->fb = fb_helper->fb; + + if (drm_fb_helper_panel_rotation(modeset, &rotation)) + /* Rotating in hardware, fbcon should not rotate */ + sw_rotations |= DRM_MODE_ROTATE_0; + else + sw_rotations |= rotation; + } + +#ifdef __linux__ + mutex_lock(&fb_helper->dev->mode_config.mutex); + drm_fb_helper_for_each_connector(fb_helper, i) { + struct drm_connector *connector = + fb_helper->connector_info[i]->connector; + + /* use first connected connector for the physical dimensions */ + if (connector->status == connector_status_connected) { + info->var.width = connector->display_info.width_mm; + info->var.height = connector->display_info.height_mm; + break; + } + } + mutex_unlock(&fb_helper->dev->mode_config.mutex); + + switch (sw_rotations) { + case DRM_MODE_ROTATE_0: + info->fbcon_rotate_hint = FB_ROTATE_UR; + break; + case DRM_MODE_ROTATE_90: + info->fbcon_rotate_hint = FB_ROTATE_CCW; + break; + case DRM_MODE_ROTATE_180: + info->fbcon_rotate_hint = FB_ROTATE_UD; + break; + case DRM_MODE_ROTATE_270: + info->fbcon_rotate_hint = FB_ROTATE_CW; + break; + default: + /* + * Multiple bits are set / multiple rotations requested + * fbcon cannot handle separate rotation settings per + * output, so fallback to unrotated. + */ + info->fbcon_rotate_hint = FB_ROTATE_UR; + } +#endif /* __linux__ */ +} + +/* Note: Drops fb_helper->lock before returning. */ +static int +__drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper, + int bpp_sel) +{ + struct drm_device *dev = fb_helper->dev; + struct fb_info *info; + unsigned int width, height; + int ret; + + width = dev->mode_config.max_width; + height = dev->mode_config.max_height; + + drm_setup_crtcs(fb_helper, width, height); + ret = drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); + if (ret < 0) { + if (ret == -EAGAIN) { + fb_helper->preferred_bpp = bpp_sel; + fb_helper->deferred_setup = true; + ret = 0; + } + mutex_unlock(&fb_helper->lock); + + return ret; + } + drm_setup_crtcs_fb(fb_helper); + + fb_helper->deferred_setup = false; + + info = fb_helper->fbdev; +#ifdef __linux__ + info->var.pixclock = 0; + /* Shamelessly allow physical address leaking to userspace */ +#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) + if (!drm_leak_fbdev_smem) +#endif + /* don't leak any physical addresses to userspace */ + info->flags |= FBINFO_HIDE_SMEM_START; + +#elif defined(__FreeBSD__) + info->fb_video_dev = device_get_parent(fb_helper->dev->dev); + info->fb_bpp = bpp_sel; +#endif + + /* Need to drop locks to avoid recursive deadlock in + * register_framebuffer. This is ok because the only thing left to do is + * register the fbdev emulation instance in kernel_fb_helper_list. */ + mutex_unlock(&fb_helper->lock); + + ret = register_framebuffer(info); + if (ret < 0) + return ret; + +#ifdef __linux__ + dev_info(dev->dev, "fb%d: %s frame buffer device\n", + info->node, info->fix.id); +#endif + + mutex_lock(&kernel_fb_helper_lock); +#ifdef __linux__ + if (list_empty(&kernel_fb_helper_list)) + register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); +#endif + + list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list); + mutex_unlock(&kernel_fb_helper_lock); + + return 0; +} + +/** + * drm_fb_helper_initial_config - setup a sane initial connector configuration + * @fb_helper: fb_helper device struct + * @bpp_sel: bpp value to use for the framebuffer configuration + * + * Scans the CRTCs and connectors and tries to put together an initial setup. + * At the moment, this is a cloned configuration across all heads with + * a new framebuffer object as the backing store. + * + * Note that this also registers the fbdev and so allows userspace to call into + * the driver through the fbdev interfaces. + * + * This function will call down into the &drm_fb_helper_funcs.fb_probe callback + * to let the driver allocate and initialize the fbdev info structure and the + * drm framebuffer used to back the fbdev. drm_fb_helper_fill_info() is provided + * as a helper to setup simple default values for the fbdev info structure. + * + * HANG DEBUGGING: + * + * When you have fbcon support built-in or already loaded, this function will do + * a full modeset to setup the fbdev console. Due to locking misdesign in the + * VT/fbdev subsystem that entire modeset sequence has to be done while holding + * console_lock. Until console_unlock is called no dmesg lines will be sent out + * to consoles, not even serial console. This means when your driver crashes, + * you will see absolutely nothing else but a system stuck in this function, + * with no further output. Any kind of printk() you place within your own driver + * or in the drm core modeset code will also never show up. + * + * Standard debug practice is to run the fbcon setup without taking the + * console_lock as a hack, to be able to see backtraces and crashes on the + * serial line. This can be done by setting the fb.lockless_register_fb=1 kernel + * cmdline option. + * + * The other option is to just disable fbdev emulation since very likely the + * first modeset from userspace will crash in the same way, and is even easier + * to debug. This can be done by setting the drm_kms_helper.fbdev_emulation=0 + * kernel cmdline option. + * + * RETURNS: + * Zero if everything went ok, nonzero otherwise. + */ +int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) +{ + int ret; + + if (!drm_fbdev_emulation) + return 0; + + mutex_lock(&fb_helper->lock); + ret = __drm_fb_helper_initial_config_and_unlock(fb_helper, bpp_sel); + + return ret; +} +EXPORT_SYMBOL(drm_fb_helper_initial_config); + +/** + * drm_fb_helper_hotplug_event - respond to a hotplug notification by + * probing all the outputs attached to the fb + * @fb_helper: driver-allocated fbdev helper, can be NULL + * + * Scan the connectors attached to the fb_helper and try to put together a + * setup after notification of a change in output configuration. + * + * Called at runtime, takes the mode config locks to be able to check/change the + * modeset configuration. Must be run from process context (which usually means + * either the output polling work or a work item launched from the driver's + * hotplug interrupt). + * + * Note that drivers may call this even before calling + * drm_fb_helper_initial_config but only after drm_fb_helper_init. This allows + * for a race-free fbcon setup and will make sure that the fbdev emulation will + * not miss any hotplug events. + * + * RETURNS: + * 0 on success and a non-zero error code otherwise. + */ +int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) +{ + int err = 0; + + if (!drm_fbdev_emulation || !fb_helper) + return 0; + + mutex_lock(&fb_helper->lock); + if (fb_helper->deferred_setup) { + err = __drm_fb_helper_initial_config_and_unlock(fb_helper, + fb_helper->preferred_bpp); + return err; + } + + if (!fb_helper->fb || !drm_master_internal_acquire(fb_helper->dev)) { + fb_helper->delayed_hotplug = true; + mutex_unlock(&fb_helper->lock); + return err; + } + + drm_master_internal_release(fb_helper->dev); + + DRM_DEBUG_KMS("\n"); + + drm_setup_crtcs(fb_helper, fb_helper->fb->width, fb_helper->fb->height); + drm_setup_crtcs_fb(fb_helper); + mutex_unlock(&fb_helper->lock); + + drm_fb_helper_set_par(fb_helper->fbdev); + + return 0; +} +EXPORT_SYMBOL(drm_fb_helper_hotplug_event); + +/** + * drm_fb_helper_fbdev_setup() - Setup fbdev emulation + * @dev: DRM device + * @fb_helper: fbdev helper structure to set up + * @funcs: fbdev helper functions + * @preferred_bpp: Preferred bits per pixel for the device. + * @dev->mode_config.preferred_depth is used if this is zero. + * @max_conn_count: Maximum number of connectors. + * @dev->mode_config.num_connector is used if this is zero. + * + * This function sets up fbdev emulation and registers fbdev for access by + * userspace. If all connectors are disconnected, setup is deferred to the next + * time drm_fb_helper_hotplug_event() is called. + * The caller must to provide a &drm_fb_helper_funcs->fb_probe callback + * function. + * + * Use drm_fb_helper_fbdev_teardown() to destroy the fbdev. + * + * See also: drm_fb_helper_initial_config(), drm_fbdev_generic_setup(). + * + * Returns: + * Zero on success or negative error code on failure. + */ +int drm_fb_helper_fbdev_setup(struct drm_device *dev, + struct drm_fb_helper *fb_helper, + const struct drm_fb_helper_funcs *funcs, + unsigned int preferred_bpp, + unsigned int max_conn_count) +{ + int ret; + + if (!preferred_bpp) + preferred_bpp = dev->mode_config.preferred_depth; + if (!preferred_bpp) + preferred_bpp = 32; + + if (!max_conn_count) + max_conn_count = dev->mode_config.num_connector; + if (!max_conn_count) { + DRM_DEV_ERROR(dev->dev, "fbdev: No connectors\n"); + return -EINVAL; + } + + drm_fb_helper_prepare(dev, fb_helper, funcs); + + ret = drm_fb_helper_init(dev, fb_helper, max_conn_count); + if (ret < 0) { + DRM_DEV_ERROR(dev->dev, "fbdev: Failed to initialize (ret=%d)\n", ret); + return ret; + } + + ret = drm_fb_helper_single_add_all_connectors(fb_helper); + if (ret < 0) { + DRM_DEV_ERROR(dev->dev, "fbdev: Failed to add connectors (ret=%d)\n", ret); + goto err_drm_fb_helper_fini; + } + + if (!drm_drv_uses_atomic_modeset(dev)) + drm_helper_disable_unused_functions(dev); + + ret = drm_fb_helper_initial_config(fb_helper, preferred_bpp); + if (ret < 0) { + DRM_DEV_ERROR(dev->dev, "fbdev: Failed to set configuration (ret=%d)\n", ret); + goto err_drm_fb_helper_fini; + } + + return 0; + +err_drm_fb_helper_fini: + drm_fb_helper_fbdev_teardown(dev); + + return ret; +} +EXPORT_SYMBOL(drm_fb_helper_fbdev_setup); + +/** + * drm_fb_helper_fbdev_teardown - Tear down fbdev emulation + * @dev: DRM device + * + * This function unregisters fbdev if not already done and cleans up the + * associated resources including the &drm_framebuffer. + * The driver is responsible for freeing the &drm_fb_helper structure which is + * stored in &drm_device->fb_helper. Do note that this pointer has been cleared + * when this function returns. + * + * In order to support device removal/unplug while file handles are still open, + * drm_fb_helper_unregister_fbi() should be called on device removal and + * drm_fb_helper_fbdev_teardown() in the &drm_driver->release callback when + * file handles are closed. + */ +void drm_fb_helper_fbdev_teardown(struct drm_device *dev) +{ + struct drm_fb_helper *fb_helper = dev->fb_helper; + struct fb_ops *fbops = NULL; + + if (!fb_helper) + return; + + /* Unregister if it hasn't been done already */ +#ifdef __linux__ + if (fb_helper->fbdev && fb_helper->fbdev->dev) +#elif defined(__FreeBSD__) + if (fb_helper->fbdev) +#endif + drm_fb_helper_unregister_fbi(fb_helper); + +#ifdef __linux__ + if (fb_helper->fbdev && fb_helper->fbdev->fbdefio) { + fb_deferred_io_cleanup(fb_helper->fbdev); + kfree(fb_helper->fbdev->fbdefio); + fbops = fb_helper->fbdev->fbops; + } +#endif + + drm_fb_helper_fini(fb_helper); + kfree(fbops); + + if (fb_helper->fb) + drm_framebuffer_remove(fb_helper->fb); +} +EXPORT_SYMBOL(drm_fb_helper_fbdev_teardown); + +/** + * drm_fb_helper_lastclose - DRM driver lastclose helper for fbdev emulation + * @dev: DRM device + * + * This function can be used as the &drm_driver->lastclose callback for drivers + * that only need to call drm_fb_helper_restore_fbdev_mode_unlocked(). + */ +void drm_fb_helper_lastclose(struct drm_device *dev) +{ + drm_fb_helper_restore_fbdev_mode_unlocked(dev->fb_helper); +} +EXPORT_SYMBOL(drm_fb_helper_lastclose); + +/** + * drm_fb_helper_output_poll_changed - DRM mode config \.output_poll_changed + * helper for fbdev emulation + * @dev: DRM device + * + * This function can be used as the + * &drm_mode_config_funcs.output_poll_changed callback for drivers that only + * need to call drm_fb_helper_hotplug_event(). + */ +void drm_fb_helper_output_poll_changed(struct drm_device *dev) +{ + drm_fb_helper_hotplug_event(dev->fb_helper); +} +EXPORT_SYMBOL(drm_fb_helper_output_poll_changed); + +#ifdef __linux__ +/* @user: 1=userspace, 0=fbcon */ +static int drm_fbdev_fb_open(struct fb_info *info, int user) +{ + struct drm_fb_helper *fb_helper = info->par; + + /* No need to take a ref for fbcon because it unbinds on unregister */ + if (user && !try_module_get(fb_helper->dev->driver->fops->owner)) + return -ENODEV; + + return 0; +} + +static int drm_fbdev_fb_release(struct fb_info *info, int user) +{ + struct drm_fb_helper *fb_helper = info->par; + + if (user) + module_put(fb_helper->dev->driver->fops->owner); + + return 0; +} + +static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper) +{ + struct fb_info *fbi = fb_helper->fbdev; + struct fb_ops *fbops = NULL; + void *shadow = NULL; + + if (!fb_helper->dev) + return; + + if (fbi && fbi->fbdefio) { + fb_deferred_io_cleanup(fbi); + shadow = fbi->screen_buffer; + fbops = fbi->fbops; + } + + drm_fb_helper_fini(fb_helper); + + if (shadow) { + vfree(shadow); + kfree(fbops); + } + + drm_client_framebuffer_delete(fb_helper->buffer); +} + +static void drm_fbdev_release(struct drm_fb_helper *fb_helper) +{ + drm_fbdev_cleanup(fb_helper); + drm_client_release(&fb_helper->client); + kfree(fb_helper); +} + +/* + * fb_ops.fb_destroy is called by the last put_fb_info() call at the end of + * unregister_framebuffer() or fb_release(). + */ +static void drm_fbdev_fb_destroy(struct fb_info *info) +{ + drm_fbdev_release(info->par); +} + +static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) +{ + struct drm_fb_helper *fb_helper = info->par; + + if (fb_helper->dev->driver->gem_prime_mmap) + return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma); + else + return -ENODEV; +} + +static struct fb_ops drm_fbdev_fb_ops = { + .owner = THIS_MODULE, + DRM_FB_HELPER_DEFAULT_OPS, + .fb_open = drm_fbdev_fb_open, + .fb_release = drm_fbdev_fb_release, + .fb_destroy = drm_fbdev_fb_destroy, + .fb_mmap = drm_fbdev_fb_mmap, + .fb_read = drm_fb_helper_sys_read, + .fb_write = drm_fb_helper_sys_write, + .fb_fillrect = drm_fb_helper_sys_fillrect, + .fb_copyarea = drm_fb_helper_sys_copyarea, + .fb_imageblit = drm_fb_helper_sys_imageblit, +}; + +static struct fb_deferred_io drm_fbdev_defio = { + .delay = HZ / 20, + .deferred_io = drm_fb_helper_deferred_io, +}; + +/** + * drm_fb_helper_generic_probe - Generic fbdev emulation probe helper + * @fb_helper: fbdev helper structure + * @sizes: describes fbdev size and scanout surface size + * + * This function uses the client API to create a framebuffer backed by a dumb buffer. + * + * The _sys_ versions are used for &fb_ops.fb_read, fb_write, fb_fillrect, + * fb_copyarea, fb_imageblit. + * + * Returns: + * Zero on success or negative error code on failure. + */ +int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct drm_client_dev *client = &fb_helper->client; + struct drm_client_buffer *buffer; + struct drm_framebuffer *fb; + struct fb_info *fbi; + u32 format; + + DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n", + sizes->surface_width, sizes->surface_height, + sizes->surface_bpp); + + format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); + buffer = drm_client_framebuffer_create(client, sizes->surface_width, + sizes->surface_height, format); + if (IS_ERR(buffer)) + return PTR_ERR(buffer); + + fb_helper->buffer = buffer; + fb_helper->fb = buffer->fb; + fb = buffer->fb; + + fbi = drm_fb_helper_alloc_fbi(fb_helper); + if (IS_ERR(fbi)) + return PTR_ERR(fbi); + + fbi->fbops = &drm_fbdev_fb_ops; + fbi->screen_size = fb->height * fb->pitches[0]; + fbi->fix.smem_len = fbi->screen_size; + fbi->screen_buffer = buffer->vaddr; + /* Shamelessly leak the physical address to user-space */ +#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) + if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0) + fbi->fix.smem_start = + page_to_phys(virt_to_page(fbi->screen_buffer)); +#endif + drm_fb_helper_fill_info(fbi, fb_helper, sizes); + + if (fb->funcs->dirty) { + struct fb_ops *fbops; + void *shadow; + + /* + * fb_deferred_io_cleanup() clears &fbops->fb_mmap so a per + * instance version is necessary. + */ + fbops = kzalloc(sizeof(*fbops), GFP_KERNEL); + shadow = vzalloc(fbi->screen_size); + if (!fbops || !shadow) { + kfree(fbops); + vfree(shadow); + return -ENOMEM; + } + + *fbops = *fbi->fbops; + fbi->fbops = fbops; + fbi->screen_buffer = shadow; + fbi->fbdefio = &drm_fbdev_defio; + + fb_deferred_io_init(fbi); + } + + return 0; +} +EXPORT_SYMBOL(drm_fb_helper_generic_probe); + +static const struct drm_fb_helper_funcs drm_fb_helper_generic_funcs = { + .fb_probe = drm_fb_helper_generic_probe, +}; + +static void drm_fbdev_client_unregister(struct drm_client_dev *client) +{ + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + + if (fb_helper->fbdev) + /* drm_fbdev_fb_destroy() takes care of cleanup */ + drm_fb_helper_unregister_fbi(fb_helper); + else + drm_fbdev_release(fb_helper); +} + +static int drm_fbdev_client_restore(struct drm_client_dev *client) +{ + drm_fb_helper_lastclose(client->dev); + + return 0; +} + +static int drm_fbdev_client_hotplug(struct drm_client_dev *client) +{ + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + struct drm_device *dev = client->dev; + int ret; + + /* Setup is not retried if it has failed */ + if (!fb_helper->dev && fb_helper->funcs) + return 0; + + if (dev->fb_helper) + return drm_fb_helper_hotplug_event(dev->fb_helper); + + if (!dev->mode_config.num_connector) { + DRM_DEV_DEBUG(dev->dev, "No connectors found, will not create framebuffer!\n"); + return 0; + } + + drm_fb_helper_prepare(dev, fb_helper, &drm_fb_helper_generic_funcs); + + ret = drm_fb_helper_init(dev, fb_helper, dev->mode_config.num_connector); + if (ret) + goto err; + + ret = drm_fb_helper_single_add_all_connectors(fb_helper); + if (ret) + goto err_cleanup; + + if (!drm_drv_uses_atomic_modeset(dev)) + drm_helper_disable_unused_functions(dev); + + ret = drm_fb_helper_initial_config(fb_helper, fb_helper->preferred_bpp); + if (ret) + goto err_cleanup; + + return 0; + +err_cleanup: + drm_fbdev_cleanup(fb_helper); +err: + fb_helper->dev = NULL; + fb_helper->fbdev = NULL; + + DRM_DEV_ERROR(dev->dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret); + + return ret; +} + +static const struct drm_client_funcs drm_fbdev_client_funcs = { + .owner = THIS_MODULE, + .unregister = drm_fbdev_client_unregister, + .restore = drm_fbdev_client_restore, + .hotplug = drm_fbdev_client_hotplug, +}; + +/** + * drm_fbdev_generic_setup() - Setup generic fbdev emulation + * @dev: DRM device + * @preferred_bpp: Preferred bits per pixel for the device. + * @dev->mode_config.preferred_depth is used if this is zero. + * + * This function sets up generic fbdev emulation for drivers that supports + * dumb buffers with a virtual address and that can be mmap'ed. If the driver + * does not support these functions, it could use drm_fb_helper_fbdev_setup(). + * + * Restore, hotplug events and teardown are all taken care of. Drivers that do + * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves. + * Simple drivers might use drm_mode_config_helper_suspend(). + * + * Drivers that set the dirty callback on their framebuffer will get a shadow + * fbdev buffer that is blitted onto the real buffer. This is done in order to + * make deferred I/O work with all kinds of buffers. + * + * This function is safe to call even when there are no connectors present. + * Setup will be retried on the next hotplug event. + * + * The fbdev is destroyed by drm_dev_unregister(). + * + * Returns: + * Zero on success or negative error code on failure. + */ +int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp) +{ + struct drm_fb_helper *fb_helper; + int ret; + + WARN(dev->fb_helper, "fb_helper is already set!\n"); + + if (!drm_fbdev_emulation) + return 0; + + fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); + if (!fb_helper) + return -ENOMEM; + + ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs); + if (ret) { + kfree(fb_helper); + DRM_DEV_ERROR(dev->dev, "Failed to register client: %d\n", ret); + return ret; + } + + if (!preferred_bpp) + preferred_bpp = dev->mode_config.preferred_depth; + if (!preferred_bpp) + preferred_bpp = 32; + fb_helper->preferred_bpp = preferred_bpp; + + ret = drm_fbdev_client_hotplug(&fb_helper->client); + if (ret) + DRM_DEV_DEBUG(dev->dev, "client hotplug ret=%d\n", ret); + + drm_client_register(&fb_helper->client); + + return 0; +} +EXPORT_SYMBOL(drm_fbdev_generic_setup); +#endif /* __linux__ */ + +/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT) + * but the module doesn't depend on any fb console symbols. At least + * attempt to load fbcon to avoid leaving the system without a usable console. + */ +int __init drm_fb_helper_modinit(void) +{ +#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT) + const char name[] = "fbcon"; + struct module *fbcon; + + mutex_lock(&module_mutex); + fbcon = find_module(name); + mutex_unlock(&module_mutex); + + if (!fbcon) + request_module_nowait(name); +#endif + return 0; +} +EXPORT_SYMBOL(drm_fb_helper_modinit); Index: sys/dev/drm/core/drm_file.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_file.c @@ -0,0 +1,789 @@ +/* + * \author Rickard E. (Rik) Faith + * \author Daryll Strauss + * \author Gareth Hughes + */ + +/* + * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "drm_crtc_internal.h" +#include "drm_internal.h" +#include "drm_legacy.h" + +/* from BKL pushdown */ +DEFINE_MUTEX(drm_global_mutex); + +/** + * DOC: file operations + * + * Drivers must define the file operations structure that forms the DRM + * userspace API entry point, even though most of those operations are + * implemented in the DRM core. The resulting &struct file_operations must be + * stored in the &drm_driver.fops field. The mandatory functions are drm_open(), + * drm_read(), drm_ioctl() and drm_compat_ioctl() if CONFIG_COMPAT is enabled + * Note that drm_compat_ioctl will be NULL if CONFIG_COMPAT=n, so there's no + * need to sprinkle #ifdef into the code. Drivers which implement private ioctls + * that require 32/64 bit compatibility support must provide their own + * &file_operations.compat_ioctl handler that processes private ioctls and calls + * drm_compat_ioctl() for core ioctls. + * + * In addition drm_read() and drm_poll() provide support for DRM events. DRM + * events are a generic and extensible means to send asynchronous events to + * userspace through the file descriptor. They are used to send vblank event and + * page flip completions by the KMS API. But drivers can also use it for their + * own needs, e.g. to signal completion of rendering. + * + * For the driver-side event interface see drm_event_reserve_init() and + * drm_send_event() as the main starting points. + * + * The memory mapping implementation will vary depending on how the driver + * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap() + * function, modern drivers should use one of the provided memory-manager + * specific implementations. For GEM-based drivers this is drm_gem_mmap(), and + * for drivers which use the CMA GEM helpers it's drm_gem_cma_mmap(). + * + * No other file operations are supported by the DRM userspace API. Overall the + * following is an example &file_operations structure:: + * + * static const example_drm_fops = { + * .owner = THIS_MODULE, + * .open = drm_open, + * .release = drm_release, + * .unlocked_ioctl = drm_ioctl, + * .compat_ioctl = drm_compat_ioctl, // NULL if CONFIG_COMPAT=n + * .poll = drm_poll, + * .read = drm_read, + * .llseek = no_llseek, + * .mmap = drm_gem_mmap, + * }; + * + * For plain GEM based drivers there is the DEFINE_DRM_GEM_FOPS() macro, and for + * CMA based drivers there is the DEFINE_DRM_GEM_CMA_FOPS() macro to make this + * simpler. + * + * The driver's &file_operations must be stored in &drm_driver.fops. + * + * For driver-private IOCTL handling see the more detailed discussion in + * :ref:`IOCTL support in the userland interfaces chapter`. + */ + +/** + * drm_file_alloc - allocate file context + * @minor: minor to allocate on + * + * This allocates a new DRM file context. It is not linked into any context and + * can be used by the caller freely. Note that the context keeps a pointer to + * @minor, so it must be freed before @minor is. + * + * RETURNS: + * Pointer to newly allocated context, ERR_PTR on failure. + */ +struct drm_file *drm_file_alloc(struct drm_minor *minor) +{ + struct drm_device *dev = minor->dev; + struct drm_file *file; + int ret; + + file = kzalloc(sizeof(*file), GFP_KERNEL); + if (!file) + return ERR_PTR(-ENOMEM); + + file->pid = get_pid(task_pid(current)); + file->minor = minor; + + /* for compatibility root is always authenticated */ + file->authenticated = capable(CAP_SYS_ADMIN); + + INIT_LIST_HEAD(&file->lhead); + INIT_LIST_HEAD(&file->fbs); + mutex_init(&file->fbs_lock); + INIT_LIST_HEAD(&file->blobs); + INIT_LIST_HEAD(&file->pending_event_list); + INIT_LIST_HEAD(&file->event_list); + init_waitqueue_head(&file->event_wait); + file->event_space = 4096; /* set aside 4k for event buffer */ + + mutex_init(&file->event_read_lock); + + if (drm_core_check_feature(dev, DRIVER_GEM)) + drm_gem_open(dev, file); + + if (drm_core_check_feature(dev, DRIVER_SYNCOBJ)) + drm_syncobj_open(file); + + if (drm_core_check_feature(dev, DRIVER_PRIME)) + drm_prime_init_file_private(&file->prime); + + if (dev->driver->open) { + ret = dev->driver->open(dev, file); + if (ret < 0) + goto out_prime_destroy; + } + + return file; + +out_prime_destroy: + if (drm_core_check_feature(dev, DRIVER_PRIME)) + drm_prime_destroy_file_private(&file->prime); + if (drm_core_check_feature(dev, DRIVER_SYNCOBJ)) + drm_syncobj_release(file); + if (drm_core_check_feature(dev, DRIVER_GEM)) + drm_gem_release(dev, file); + put_pid(file->pid); + kfree(file); + + return ERR_PTR(ret); +} + +static void drm_events_release(struct drm_file *file_priv) +{ + struct drm_device *dev = file_priv->minor->dev; + struct drm_pending_event *e, *et; + unsigned long flags; + + spin_lock_irqsave(&dev->event_lock, flags); + + /* Unlink pending events */ + list_for_each_entry_safe(e, et, &file_priv->pending_event_list, + pending_link) { + list_del(&e->pending_link); + e->file_priv = NULL; + } + + /* Remove unconsumed events */ + list_for_each_entry_safe(e, et, &file_priv->event_list, link) { + list_del(&e->link); + kfree(e); + } + + spin_unlock_irqrestore(&dev->event_lock, flags); +} + +/** + * drm_file_free - free file context + * @file: context to free, or NULL + * + * This destroys and deallocates a DRM file context previously allocated via + * drm_file_alloc(). The caller must make sure to unlink it from any contexts + * before calling this. + * + * If NULL is passed, this is a no-op. + * + * RETURNS: + * 0 on success, or error code on failure. + */ +void drm_file_free(struct drm_file *file) +{ + struct drm_device *dev; + + if (!file) + return; + + dev = file->minor->dev; + +#ifdef __linux__ + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", + task_pid_nr(current), + (long)old_encode_dev(file->minor->kdev->devt), + dev->open_count); +#endif + + if (drm_core_check_feature(dev, DRIVER_LEGACY) && + dev->driver->preclose) + dev->driver->preclose(dev, file); + + if (drm_core_check_feature(dev, DRIVER_LEGACY)) + drm_legacy_lock_release(dev, file->filp); + + if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) + drm_legacy_reclaim_buffers(dev, file); + + drm_events_release(file); + + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + drm_fb_release(file); + drm_property_destroy_user_blobs(dev, file); + } + + if (drm_core_check_feature(dev, DRIVER_SYNCOBJ)) + drm_syncobj_release(file); + + if (drm_core_check_feature(dev, DRIVER_GEM)) + drm_gem_release(dev, file); + + drm_legacy_ctxbitmap_flush(dev, file); + + if (drm_is_primary_client(file)) + drm_master_release(file); + + if (dev->driver->postclose) + dev->driver->postclose(dev, file); + + if (drm_core_check_feature(dev, DRIVER_PRIME)) + drm_prime_destroy_file_private(&file->prime); + + WARN_ON(!list_empty(&file->event_list)); + + put_pid(file->pid); + kfree(file); +} + +static void drm_close_helper(struct file *filp) +{ + struct drm_file *file_priv = filp->private_data; + struct drm_device *dev = file_priv->minor->dev; + + mutex_lock(&dev->filelist_mutex); + list_del(&file_priv->lhead); + mutex_unlock(&dev->filelist_mutex); + + drm_file_free(file_priv); +} + +/* + * Check whether DRI will run on this CPU. + * + * \return non-zero if the DRI will run on this CPU, or zero otherwise. + */ +static int drm_cpu_valid(void) +{ +#if defined(__sparc__) && !defined(__sparc_v9__) + return 0; /* No cmpxchg before v9 sparc. */ +#endif + return 1; +} + +/* + * Called whenever a process opens /dev/drm. + * + * \param filp file pointer. + * \param minor acquired minor-object. + * \return zero on success or a negative number on failure. + * + * Creates and initializes a drm_file structure for the file private data in \p + * filp and add it into the double linked list in \p dev. + */ +static int drm_open_helper(struct file *filp, struct drm_minor *minor) +{ + struct drm_device *dev = minor->dev; + struct drm_file *priv; + int ret; + + if (filp->f_flags & O_EXCL) + return -EBUSY; /* No exclusive opens */ + if (!drm_cpu_valid()) + return -EINVAL; + if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF) + return -EINVAL; + + DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor->index); + + priv = drm_file_alloc(minor); + if (IS_ERR(priv)) + return PTR_ERR(priv); + + if (drm_is_primary_client(priv)) { + ret = drm_master_open(priv); + if (ret) { + drm_file_free(priv); + return ret; + } + } + + filp->private_data = priv; +#ifdef __linux__ + filp->f_mode |= FMODE_UNSIGNED_OFFSET; +#endif + priv->filp = filp; + + mutex_lock(&dev->filelist_mutex); + list_add(&priv->lhead, &dev->filelist); + mutex_unlock(&dev->filelist_mutex); + +#ifdef __alpha__ + /* + * Default the hose + */ + if (!dev->hose) { + struct pci_dev *pci_dev; + pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL); + if (pci_dev) { + dev->hose = pci_dev->sysdata; + pci_dev_put(pci_dev); + } + if (!dev->hose) { + struct pci_bus *b = list_entry(pci_root_buses.next, + struct pci_bus, node); + if (b) + dev->hose = b->sysdata; + } + } +#endif + + return 0; +} + +/** + * drm_open - open method for DRM file + * @inode: device inode + * @filp: file pointer. + * + * This function must be used by drivers as their &file_operations.open method. + * It looks up the correct DRM device and instantiates all the per-file + * resources for it. It also calls the &drm_driver.open driver callback. + * + * RETURNS: + * + * 0 on success or negative errno value on falure. + */ +int drm_open(struct inode *inode, struct file *filp) +{ + struct drm_device *dev; + struct drm_minor *minor; + int retcode; + int need_setup = 0; + + minor = drm_minor_acquire(iminor(inode)); + if (IS_ERR(minor)) + return PTR_ERR(minor); + + dev = minor->dev; + if (!dev->open_count++) + need_setup = 1; + + /* share address_space across all char-devs of a single device */ +#ifdef __linux__ + filp->f_mapping = dev->anon_inode->i_mapping; +#endif + + retcode = drm_open_helper(filp, minor); + if (retcode) + goto err_undo; + if (need_setup) { + retcode = drm_legacy_setup(dev); + if (retcode) { + drm_close_helper(filp); + goto err_undo; + } + } + return 0; + +err_undo: + dev->open_count--; + drm_minor_release(minor); + return retcode; +} +EXPORT_SYMBOL(drm_open); + +void drm_lastclose(struct drm_device * dev) +{ + DRM_DEBUG("\n"); + + if (dev->driver->lastclose) + dev->driver->lastclose(dev); + DRM_DEBUG("driver lastclose completed\n"); + + if (drm_core_check_feature(dev, DRIVER_LEGACY)) + drm_legacy_dev_reinit(dev); + + drm_client_dev_restore(dev); +} + +/** + * drm_release - release method for DRM file + * @inode: device inode + * @filp: file pointer. + * + * This function must be used by drivers as their &file_operations.release + * method. It frees any resources associated with the open file, and calls the + * &drm_driver.postclose driver callback. If this is the last open file for the + * DRM device also proceeds to call the &drm_driver.lastclose driver callback. + * + * RETURNS: + * + * Always succeeds and returns 0. + */ +int drm_release(struct inode *inode, struct file *filp) +{ + struct drm_file *file_priv = filp->private_data; + struct drm_minor *minor = file_priv->minor; + struct drm_device *dev = minor->dev; + + mutex_lock(&drm_global_mutex); + + DRM_DEBUG("open_count = %d\n", dev->open_count); + + drm_close_helper(filp); + + if (!--dev->open_count) + drm_lastclose(dev); + + mutex_unlock(&drm_global_mutex); + + drm_minor_release(minor); + + return 0; +} +EXPORT_SYMBOL(drm_release); + +/** + * drm_read - read method for DRM file + * @filp: file pointer + * @buffer: userspace destination pointer for the read + * @count: count in bytes to read + * @offset: offset to read + * + * This function must be used by drivers as their &file_operations.read + * method iff they use DRM events for asynchronous signalling to userspace. + * Since events are used by the KMS API for vblank and page flip completion this + * means all modern display drivers must use it. + * + * @offset is ignored, DRM events are read like a pipe. Therefore drivers also + * must set the &file_operation.llseek to no_llseek(). Polling support is + * provided by drm_poll(). + * + * This function will only ever read a full event. Therefore userspace must + * supply a big enough buffer to fit any event to ensure forward progress. Since + * the maximum event space is currently 4K it's recommended to just use that for + * safety. + * + * RETURNS: + * + * Number of bytes read (always aligned to full events, and can be 0) or a + * negative error code on failure. + */ +ssize_t drm_read(struct file *filp, char __user *buffer, + size_t count, loff_t *offset) +{ + struct drm_file *file_priv = filp->private_data; + struct drm_device *dev = file_priv->minor->dev; + ssize_t ret; + + if (!access_ok(buffer, count)) + return -EFAULT; + + ret = mutex_lock_interruptible(&file_priv->event_read_lock); + if (ret) + return ret; + + for (;;) { + struct drm_pending_event *e = NULL; + + spin_lock_irq(&dev->event_lock); + if (!list_empty(&file_priv->event_list)) { + e = list_first_entry(&file_priv->event_list, + struct drm_pending_event, link); + file_priv->event_space += e->event->length; + list_del(&e->link); + } + spin_unlock_irq(&dev->event_lock); + + if (e == NULL) { + if (ret) + break; + + if (filp->f_flags & O_NONBLOCK) { + ret = -EAGAIN; + break; + } + + mutex_unlock(&file_priv->event_read_lock); + ret = wait_event_interruptible(file_priv->event_wait, + !list_empty(&file_priv->event_list)); + if (ret >= 0) + ret = mutex_lock_interruptible(&file_priv->event_read_lock); + if (ret) + return ret; + } else { + unsigned length = e->event->length; + + if (length > count - ret) { +put_back_event: + spin_lock_irq(&dev->event_lock); + file_priv->event_space -= length; + list_add(&e->link, &file_priv->event_list); + spin_unlock_irq(&dev->event_lock); + wake_up_interruptible(&file_priv->event_wait); + break; + } + + if (copy_to_user(buffer + ret, e->event, length)) { + if (ret == 0) + ret = -EFAULT; + goto put_back_event; + } + + ret += length; + kfree(e); + } + } + mutex_unlock(&file_priv->event_read_lock); + + return ret; +} +EXPORT_SYMBOL(drm_read); + +/** + * drm_poll - poll method for DRM file + * @filp: file pointer + * @wait: poll waiter table + * + * This function must be used by drivers as their &file_operations.read method + * iff they use DRM events for asynchronous signalling to userspace. Since + * events are used by the KMS API for vblank and page flip completion this means + * all modern display drivers must use it. + * + * See also drm_read(). + * + * RETURNS: + * + * Mask of POLL flags indicating the current status of the file. + */ +__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait) +{ +#ifdef __linux__ + struct drm_file *file_priv = filp->private_data; + __poll_t mask = 0; + + poll_wait(filp, &file_priv->event_wait, wait); + + if (!list_empty(&file_priv->event_list)) + mask |= EPOLLIN | EPOLLRDNORM; + + return mask; +#elif defined(__FreeBSD__) + struct drm_file *file_priv; + struct drm_device *dev; + unsigned long irqflags; + int revents; + + file_priv = filp->private_data; + dev = file_priv->minor->dev; + + revents = 0; + spin_lock_irqsave(&dev->event_lock, irqflags); + if (list_empty(&file_priv->event_list)) + selrecord(curthread, &file_priv->event_poll); + else + revents = POLLIN | POLLRDNORM; + spin_unlock_irqrestore(&dev->event_lock, irqflags); + + return (revents); +#endif +} +EXPORT_SYMBOL(drm_poll); + +/** + * drm_event_reserve_init_locked - init a DRM event and reserve space for it + * @dev: DRM device + * @file_priv: DRM file private data + * @p: tracking structure for the pending event + * @e: actual event data to deliver to userspace + * + * This function prepares the passed in event for eventual delivery. If the event + * doesn't get delivered (because the IOCTL fails later on, before queuing up + * anything) then the even must be cancelled and freed using + * drm_event_cancel_free(). Successfully initialized events should be sent out + * using drm_send_event() or drm_send_event_locked() to signal completion of the + * asynchronous event to userspace. + * + * If callers embedded @p into a larger structure it must be allocated with + * kmalloc and @p must be the first member element. + * + * This is the locked version of drm_event_reserve_init() for callers which + * already hold &drm_device.event_lock. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ +int drm_event_reserve_init_locked(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_pending_event *p, + struct drm_event *e) +{ + if (file_priv->event_space < e->length) + return -ENOMEM; + + file_priv->event_space -= e->length; + + p->event = e; + list_add(&p->pending_link, &file_priv->pending_event_list); + p->file_priv = file_priv; + + return 0; +} +EXPORT_SYMBOL(drm_event_reserve_init_locked); + +/** + * drm_event_reserve_init - init a DRM event and reserve space for it + * @dev: DRM device + * @file_priv: DRM file private data + * @p: tracking structure for the pending event + * @e: actual event data to deliver to userspace + * + * This function prepares the passed in event for eventual delivery. If the event + * doesn't get delivered (because the IOCTL fails later on, before queuing up + * anything) then the even must be cancelled and freed using + * drm_event_cancel_free(). Successfully initialized events should be sent out + * using drm_send_event() or drm_send_event_locked() to signal completion of the + * asynchronous event to userspace. + * + * If callers embedded @p into a larger structure it must be allocated with + * kmalloc and @p must be the first member element. + * + * Callers which already hold &drm_device.event_lock should use + * drm_event_reserve_init_locked() instead. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ +int drm_event_reserve_init(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_pending_event *p, + struct drm_event *e) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&dev->event_lock, flags); + ret = drm_event_reserve_init_locked(dev, file_priv, p, e); + spin_unlock_irqrestore(&dev->event_lock, flags); + + return ret; +} +EXPORT_SYMBOL(drm_event_reserve_init); + +/** + * drm_event_cancel_free - free a DRM event and release its space + * @dev: DRM device + * @p: tracking structure for the pending event + * + * This function frees the event @p initialized with drm_event_reserve_init() + * and releases any allocated space. It is used to cancel an event when the + * nonblocking operation could not be submitted and needed to be aborted. + */ +void drm_event_cancel_free(struct drm_device *dev, + struct drm_pending_event *p) +{ + unsigned long flags; + spin_lock_irqsave(&dev->event_lock, flags); + if (p->file_priv) { + p->file_priv->event_space += p->event->length; + list_del(&p->pending_link); + } + spin_unlock_irqrestore(&dev->event_lock, flags); + + if (p->fence) + dma_fence_put(p->fence); + + kfree(p); +} +EXPORT_SYMBOL(drm_event_cancel_free); + +/** + * drm_send_event_locked - send DRM event to file descriptor + * @dev: DRM device + * @e: DRM event to deliver + * + * This function sends the event @e, initialized with drm_event_reserve_init(), + * to its associated userspace DRM file. Callers must already hold + * &drm_device.event_lock, see drm_send_event() for the unlocked version. + * + * Note that the core will take care of unlinking and disarming events when the + * corresponding DRM file is closed. Drivers need not worry about whether the + * DRM file for this event still exists and can call this function upon + * completion of the asynchronous work unconditionally. + */ +void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e) +{ + assert_spin_locked(&dev->event_lock); + + if (e->completion) { + complete_all(e->completion); + e->completion_release(e->completion); + e->completion = NULL; + } + + if (e->fence) { + dma_fence_signal(e->fence); + dma_fence_put(e->fence); + } + + if (!e->file_priv) { + kfree(e); + return; + } + + list_del(&e->pending_link); + list_add_tail(&e->link, + &e->file_priv->event_list); + wake_up_interruptible(&e->file_priv->event_wait); +#ifdef __FreeBSD__ + selwakeup(&e->file_priv->event_poll); +#endif + +} +EXPORT_SYMBOL(drm_send_event_locked); + +/** + * drm_send_event - send DRM event to file descriptor + * @dev: DRM device + * @e: DRM event to deliver + * + * This function sends the event @e, initialized with drm_event_reserve_init(), + * to its associated userspace DRM file. This function acquires + * &drm_device.event_lock, see drm_send_event_locked() for callers which already + * hold this lock. + * + * Note that the core will take care of unlinking and disarming events when the + * corresponding DRM file is closed. Drivers need not worry about whether the + * DRM file for this event still exists and can call this function upon + * completion of the asynchronous work unconditionally. + */ +void drm_send_event(struct drm_device *dev, struct drm_pending_event *e) +{ + unsigned long irqflags; + + spin_lock_irqsave(&dev->event_lock, irqflags); + drm_send_event_locked(dev, e); + spin_unlock_irqrestore(&dev->event_lock, irqflags); +} +EXPORT_SYMBOL(drm_send_event); Index: sys/dev/drm/core/drm_flip_work.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_flip_work.c @@ -0,0 +1,172 @@ +/* + * Copyright (C) 2013 Red Hat + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +/* FBSD: Linux header polution, we need list.h */ +#include + +#include +#include +#include + +/** + * drm_flip_work_allocate_task - allocate a flip-work task + * @data: data associated to the task + * @flags: allocator flags + * + * Allocate a drm_flip_task object and attach private data to it. + */ +struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags) +{ + struct drm_flip_task *task; + + task = kzalloc(sizeof(*task), flags); + if (task) + task->data = data; + + return task; +} +EXPORT_SYMBOL(drm_flip_work_allocate_task); + +/** + * drm_flip_work_queue_task - queue a specific task + * @work: the flip-work + * @task: the task to handle + * + * Queues task, that will later be run (passed back to drm_flip_func_t + * func) on a work queue after drm_flip_work_commit() is called. + */ +void drm_flip_work_queue_task(struct drm_flip_work *work, + struct drm_flip_task *task) +{ + unsigned long flags; + + spin_lock_irqsave(&work->lock, flags); + list_add_tail(&task->node, &work->queued); + spin_unlock_irqrestore(&work->lock, flags); +} +EXPORT_SYMBOL(drm_flip_work_queue_task); + +/** + * drm_flip_work_queue - queue work + * @work: the flip-work + * @val: the value to queue + * + * Queues work, that will later be run (passed back to drm_flip_func_t + * func) on a work queue after drm_flip_work_commit() is called. + */ +void drm_flip_work_queue(struct drm_flip_work *work, void *val) +{ + struct drm_flip_task *task; + + task = drm_flip_work_allocate_task(val, + drm_can_sleep() ? GFP_KERNEL : GFP_ATOMIC); + if (task) { + drm_flip_work_queue_task(work, task); + } else { + DRM_ERROR("%s could not allocate task!\n", work->name); + work->func(work, val); + } +} +EXPORT_SYMBOL(drm_flip_work_queue); + +/** + * drm_flip_work_commit - commit queued work + * @work: the flip-work + * @wq: the work-queue to run the queued work on + * + * Trigger work previously queued by drm_flip_work_queue() to run + * on a workqueue. The typical usage would be to queue work (via + * drm_flip_work_queue()) at any point (from vblank irq and/or + * prior), and then from vblank irq commit the queued work. + */ +void drm_flip_work_commit(struct drm_flip_work *work, + struct workqueue_struct *wq) +{ + unsigned long flags; + + spin_lock_irqsave(&work->lock, flags); + list_splice_tail(&work->queued, &work->commited); + INIT_LIST_HEAD(&work->queued); + spin_unlock_irqrestore(&work->lock, flags); + queue_work(wq, &work->worker); +} +EXPORT_SYMBOL(drm_flip_work_commit); + +static void flip_worker(struct work_struct *w) +{ + struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker); + struct list_head tasks; + unsigned long flags; + + while (1) { + struct drm_flip_task *task, *tmp; + + INIT_LIST_HEAD(&tasks); + spin_lock_irqsave(&work->lock, flags); + list_splice_tail(&work->commited, &tasks); + INIT_LIST_HEAD(&work->commited); + spin_unlock_irqrestore(&work->lock, flags); + + if (list_empty(&tasks)) + break; + + list_for_each_entry_safe(task, tmp, &tasks, node) { + work->func(work, task->data); + kfree(task); + } + } +} + +/** + * drm_flip_work_init - initialize flip-work + * @work: the flip-work to initialize + * @name: debug name + * @func: the callback work function + * + * Initializes/allocates resources for the flip-work + */ +void drm_flip_work_init(struct drm_flip_work *work, + const char *name, drm_flip_func_t func) +{ + work->name = name; + INIT_LIST_HEAD(&work->queued); + INIT_LIST_HEAD(&work->commited); + spin_lock_init(&work->lock); + work->func = func; + + INIT_WORK(&work->worker, flip_worker); +} +EXPORT_SYMBOL(drm_flip_work_init); + +/** + * drm_flip_work_cleanup - cleans up flip-work + * @work: the flip-work to cleanup + * + * Destroy resources allocated for the flip-work + */ +void drm_flip_work_cleanup(struct drm_flip_work *work) +{ + WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited)); +} +EXPORT_SYMBOL(drm_flip_work_cleanup); Index: sys/dev/drm/core/drm_fourcc.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_fourcc.c @@ -0,0 +1,395 @@ +/* + * Copyright (c) 2016 Laurent Pinchart + * + * DRM core format related functions + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include +#include +#include +#include + +#include +#include + +static char printable_char(int c) +{ + return isascii(c) && isprint(c) ? c : '?'; +} + +/** + * drm_mode_legacy_fb_format - compute drm fourcc code from legacy description + * @bpp: bits per pixels + * @depth: bit depth per pixel + * + * Computes a drm fourcc pixel format code for the given @bpp/@depth values. + * Useful in fbdev emulation code, since that deals in those values. + */ +uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth) +{ + uint32_t fmt = DRM_FORMAT_INVALID; + + switch (bpp) { + case 8: + if (depth == 8) + fmt = DRM_FORMAT_C8; + break; + + case 16: + switch (depth) { + case 15: + fmt = DRM_FORMAT_XRGB1555; + break; + case 16: + fmt = DRM_FORMAT_RGB565; + break; + default: + break; + } + break; + + case 24: + if (depth == 24) + fmt = DRM_FORMAT_RGB888; + break; + + case 32: + switch (depth) { + case 24: + fmt = DRM_FORMAT_XRGB8888; + break; + case 30: + fmt = DRM_FORMAT_XRGB2101010; + break; + case 32: + fmt = DRM_FORMAT_ARGB8888; + break; + default: + break; + } + break; + + default: + break; + } + + return fmt; +} +EXPORT_SYMBOL(drm_mode_legacy_fb_format); + +/** + * drm_driver_legacy_fb_format - compute drm fourcc code from legacy description + * @dev: DRM device + * @bpp: bits per pixels + * @depth: bit depth per pixel + * + * Computes a drm fourcc pixel format code for the given @bpp/@depth values. + * Unlike drm_mode_legacy_fb_format() this looks at the drivers mode_config, + * and depending on the &drm_mode_config.quirk_addfb_prefer_host_byte_order flag + * it returns little endian byte order or host byte order framebuffer formats. + */ +uint32_t drm_driver_legacy_fb_format(struct drm_device *dev, + uint32_t bpp, uint32_t depth) +{ + uint32_t fmt = drm_mode_legacy_fb_format(bpp, depth); + + if (dev->mode_config.quirk_addfb_prefer_host_byte_order) { + if (fmt == DRM_FORMAT_XRGB8888) + fmt = DRM_FORMAT_HOST_XRGB8888; + if (fmt == DRM_FORMAT_ARGB8888) + fmt = DRM_FORMAT_HOST_ARGB8888; + if (fmt == DRM_FORMAT_RGB565) + fmt = DRM_FORMAT_HOST_RGB565; + if (fmt == DRM_FORMAT_XRGB1555) + fmt = DRM_FORMAT_HOST_XRGB1555; + } + + if (dev->mode_config.quirk_addfb_prefer_xbgr_30bpp && + fmt == DRM_FORMAT_XRGB2101010) + fmt = DRM_FORMAT_XBGR2101010; + + return fmt; +} +EXPORT_SYMBOL(drm_driver_legacy_fb_format); + +/** + * drm_get_format_name - fill a string with a drm fourcc format's name + * @format: format to compute name of + * @buf: caller-supplied buffer + */ +const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf) +{ + snprintf(buf->str, sizeof(buf->str), + "%c%c%c%c %s-endian (0x%08x)", + printable_char(format & 0xff), + printable_char((format >> 8) & 0xff), + printable_char((format >> 16) & 0xff), + printable_char((format >> 24) & 0x7f), + format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little", + format); + + return buf->str; +} +EXPORT_SYMBOL(drm_get_format_name); + +/* + * Internal function to query information for a given format. See + * drm_format_info() for the public API. + */ +const struct drm_format_info *__drm_format_info(u32 format) +{ + static const struct drm_format_info formats[] = { + { .format = DRM_FORMAT_C8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_RGB332, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_BGR233, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_XRGB4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_XBGR4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_RGBX4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_BGRX4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_ARGB4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_ABGR4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_RGBA4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_BGRA4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_XRGB1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_XBGR1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_RGBX5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_BGRX5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_ARGB1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_ABGR1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_RGBA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_BGRA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_BGR565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_RGB888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_BGR888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_RGBX8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_BGRX8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_RGB565_A8, .depth = 24, .num_planes = 2, .cpp = { 2, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_BGR565_A8, .depth = 24, .num_planes = 2, .cpp = { 2, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_RGBX1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_BGRX1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_RGBA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_BGRA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_RGBA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_XRGB16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_XBGR16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_ARGB16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_ABGR16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_RGB888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_BGR888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_XRGB8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_XBGR8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_RGBX8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_BGRX8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_YUV410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4, .is_yuv = true }, + { .format = DRM_FORMAT_YVU410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4, .is_yuv = true }, + { .format = DRM_FORMAT_YUV411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_YVU411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_YUV420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_YVU420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_YUV422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_YVU422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_YUV444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_YVU444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_NV12, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_NV21, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_NV16, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_NV61, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_NV24, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_NV42, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_YUYV, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_XYUV8888, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_VUY888, .depth = 0, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true }, + { .format = DRM_FORMAT_Y210, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_Y212, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_Y216, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_Y410, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true }, + { .format = DRM_FORMAT_Y412, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true }, + { .format = DRM_FORMAT_Y416, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true }, + { .format = DRM_FORMAT_XVYU2101010, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_XVYU12_16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_XVYU16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_Y0L0, .depth = 0, .num_planes = 1, + .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 }, + .hsub = 2, .vsub = 2, .has_alpha = true, .is_yuv = true }, + { .format = DRM_FORMAT_X0L0, .depth = 0, .num_planes = 1, + .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 }, + .hsub = 2, .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_Y0L2, .depth = 0, .num_planes = 1, + .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 }, + .hsub = 2, .vsub = 2, .has_alpha = true, .is_yuv = true }, + { .format = DRM_FORMAT_X0L2, .depth = 0, .num_planes = 1, + .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 }, + .hsub = 2, .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_P010, .depth = 0, .num_planes = 2, + .char_per_block = { 2, 4, 0 }, .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 }, + .hsub = 2, .vsub = 2, .is_yuv = true}, + { .format = DRM_FORMAT_P012, .depth = 0, .num_planes = 2, + .char_per_block = { 2, 4, 0 }, .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 }, + .hsub = 2, .vsub = 2, .is_yuv = true}, + { .format = DRM_FORMAT_P016, .depth = 0, .num_planes = 2, + .char_per_block = { 2, 4, 0 }, .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 }, + .hsub = 2, .vsub = 2, .is_yuv = true}, + { .format = DRM_FORMAT_P210, .depth = 0, + .num_planes = 2, .char_per_block = { 2, 4, 0 }, + .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 }, .hsub = 2, + .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_VUY101010, .depth = 0, + .num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 1, .vsub = 1, + .is_yuv = true }, + { .format = DRM_FORMAT_YUV420_8BIT, .depth = 0, + .num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 2, .vsub = 2, + .is_yuv = true }, + { .format = DRM_FORMAT_YUV420_10BIT, .depth = 0, + .num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 2, .vsub = 2, + .is_yuv = true }, + }; + + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(formats); ++i) { + if (formats[i].format == format) + return &formats[i]; + } + + return NULL; +} + +/** + * drm_format_info - query information for a given format + * @format: pixel format (DRM_FORMAT_*) + * + * The caller should only pass a supported pixel format to this function. + * Unsupported pixel formats will generate a warning in the kernel log. + * + * Returns: + * The instance of struct drm_format_info that describes the pixel format, or + * NULL if the format is unsupported. + */ +const struct drm_format_info *drm_format_info(u32 format) +{ + const struct drm_format_info *info; + + info = __drm_format_info(format); + WARN_ON(!info); + return info; +} +EXPORT_SYMBOL(drm_format_info); + +/** + * drm_get_format_info - query information for a given framebuffer configuration + * @dev: DRM device + * @mode_cmd: metadata from the userspace fb creation request + * + * Returns: + * The instance of struct drm_format_info that describes the pixel format, or + * NULL if the format is unsupported. + */ +const struct drm_format_info * +drm_get_format_info(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + const struct drm_format_info *info = NULL; + + if (dev->mode_config.funcs->get_format_info) + info = dev->mode_config.funcs->get_format_info(mode_cmd); + + if (!info) + info = drm_format_info(mode_cmd->pixel_format); + + return info; +} +EXPORT_SYMBOL(drm_get_format_info); + +/** + * drm_format_info_block_width - width in pixels of block. + * @info: pixel format info + * @plane: plane index + * + * Returns: + * The width in pixels of a block, depending on the plane index. + */ +unsigned int drm_format_info_block_width(const struct drm_format_info *info, + int plane) +{ + if (!info || plane < 0 || plane >= info->num_planes) + return 0; + + if (!info->block_w[plane]) + return 1; + return info->block_w[plane]; +} +EXPORT_SYMBOL(drm_format_info_block_width); + +/** + * drm_format_info_block_height - height in pixels of a block + * @info: pixel format info + * @plane: plane index + * + * Returns: + * The height in pixels of a block, depending on the plane index. + */ +unsigned int drm_format_info_block_height(const struct drm_format_info *info, + int plane) +{ + if (!info || plane < 0 || plane >= info->num_planes) + return 0; + + if (!info->block_h[plane]) + return 1; + return info->block_h[plane]; +} +EXPORT_SYMBOL(drm_format_info_block_height); + +/** + * drm_format_info_min_pitch - computes the minimum required pitch in bytes + * @info: pixel format info + * @plane: plane index + * @buffer_width: buffer width in pixels + * + * Returns: + * The minimum required pitch in bytes for a buffer by taking into consideration + * the pixel format information and the buffer width. + */ +uint64_t drm_format_info_min_pitch(const struct drm_format_info *info, + int plane, unsigned int buffer_width) +{ + if (!info || plane < 0 || plane >= info->num_planes) + return 0; + + return DIV_ROUND_UP_ULL((u64)buffer_width * info->char_per_block[plane], + drm_format_info_block_width(info, plane) * + drm_format_info_block_height(info, plane)); +} +EXPORT_SYMBOL(drm_format_info_min_pitch); Index: sys/dev/drm/core/drm_framebuffer.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_framebuffer.c @@ -0,0 +1,1094 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "drm_crtc_internal.h" +#include "drm_internal.h" + +/** + * DOC: overview + * + * Frame buffers are abstract memory objects that provide a source of pixels to + * scanout to a CRTC. Applications explicitly request the creation of frame + * buffers through the DRM_IOCTL_MODE_ADDFB(2) ioctls and receive an opaque + * handle that can be passed to the KMS CRTC control, plane configuration and + * page flip functions. + * + * Frame buffers rely on the underlying memory manager for allocating backing + * storage. When creating a frame buffer applications pass a memory handle + * (or a list of memory handles for multi-planar formats) through the + * &struct drm_mode_fb_cmd2 argument. For drivers using GEM as their userspace + * buffer management interface this would be a GEM handle. Drivers are however + * free to use their own backing storage object handles, e.g. vmwgfx directly + * exposes special TTM handles to userspace and so expects TTM handles in the + * create ioctl and not GEM handles. + * + * Framebuffers are tracked with &struct drm_framebuffer. They are published + * using drm_framebuffer_init() - after calling that function userspace can use + * and access the framebuffer object. The helper function + * drm_helper_mode_fill_fb_struct() can be used to pre-fill the required + * metadata fields. + * + * The lifetime of a drm framebuffer is controlled with a reference count, + * drivers can grab additional references with drm_framebuffer_get() and drop + * them again with drm_framebuffer_put(). For driver-private framebuffers for + * which the last reference is never dropped (e.g. for the fbdev framebuffer + * when the struct &struct drm_framebuffer is embedded into the fbdev helper + * struct) drivers can manually clean up a framebuffer at module unload time + * with drm_framebuffer_unregister_private(). But doing this is not + * recommended, and it's better to have a normal free-standing &struct + * drm_framebuffer. + */ + +int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h, + const struct drm_framebuffer *fb) +{ + unsigned int fb_width, fb_height; + + fb_width = fb->width << 16; + fb_height = fb->height << 16; + + /* Make sure source coordinates are inside the fb. */ + if (src_w > fb_width || + src_x > fb_width - src_w || + src_h > fb_height || + src_y > fb_height - src_h) { + DRM_DEBUG_KMS("Invalid source coordinates " + "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n", + src_w >> 16, ((src_w & 0xffff) * 15625) >> 10, + src_h >> 16, ((src_h & 0xffff) * 15625) >> 10, + src_x >> 16, ((src_x & 0xffff) * 15625) >> 10, + src_y >> 16, ((src_y & 0xffff) * 15625) >> 10, + fb->width, fb->height); + return -ENOSPC; + } + + return 0; +} + +/** + * drm_mode_addfb - add an FB to the graphics configuration + * @dev: drm device for the ioctl + * @or: pointer to request structure + * @file_priv: drm file + * + * Add a new FB to the specified CRTC, given a user request. This is the + * original addfb ioctl which only supported RGB formats. + * + * Called by the user via ioctl, or by an in-kernel client. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_addfb(struct drm_device *dev, struct drm_mode_fb_cmd *or, + struct drm_file *file_priv) +{ + struct drm_mode_fb_cmd2 r = {}; + int ret; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EOPNOTSUPP; + + r.pixel_format = drm_driver_legacy_fb_format(dev, or->bpp, or->depth); + if (r.pixel_format == DRM_FORMAT_INVALID) { + DRM_DEBUG("bad {bpp:%d, depth:%d}\n", or->bpp, or->depth); + return -EINVAL; + } + + /* convert to new format and call new ioctl */ + r.fb_id = or->fb_id; + r.width = or->width; + r.height = or->height; + r.pitches[0] = or->pitch; + r.handles[0] = or->handle; + + ret = drm_mode_addfb2(dev, &r, file_priv); + if (ret) + return ret; + + or->fb_id = r.fb_id; + + return 0; +} + +int drm_mode_addfb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + return drm_mode_addfb(dev, data, file_priv); +} + +static int fb_plane_width(int width, + const struct drm_format_info *format, int plane) +{ + if (plane == 0) + return width; + + return DIV_ROUND_UP(width, format->hsub); +} + +static int fb_plane_height(int height, + const struct drm_format_info *format, int plane) +{ + if (plane == 0) + return height; + + return DIV_ROUND_UP(height, format->vsub); +} + +static int framebuffer_check(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *r) +{ + const struct drm_format_info *info; + int i; + + /* check if the format is supported at all */ + info = __drm_format_info(r->pixel_format); + if (!info) { + struct drm_format_name_buf format_name; + + DRM_DEBUG_KMS("bad framebuffer format %s\n", + drm_get_format_name(r->pixel_format, + &format_name)); + return -EINVAL; + } + + /* now let the driver pick its own format info */ + info = drm_get_format_info(dev, r); + + if (r->width == 0) { + DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width); + return -EINVAL; + } + + if (r->height == 0) { + DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height); + return -EINVAL; + } + + for (i = 0; i < info->num_planes; i++) { + unsigned int width = fb_plane_width(r->width, info, i); + unsigned int height = fb_plane_height(r->height, info, i); + unsigned int block_size = info->char_per_block[i]; + u64 min_pitch = drm_format_info_min_pitch(info, i, width); + + if (!block_size && (r->modifier[i] == DRM_FORMAT_MOD_LINEAR)) { + DRM_DEBUG_KMS("Format requires non-linear modifier for plane %d\n", i); + return -EINVAL; + } + + if (!r->handles[i]) { + DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i); + return -EINVAL; + } + + if (min_pitch > UINT_MAX) + return -ERANGE; + + if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX) + return -ERANGE; + + if (block_size && r->pitches[i] < min_pitch) { + DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i); + return -EINVAL; + } + + if (r->modifier[i] && !(r->flags & DRM_MODE_FB_MODIFIERS)) { + DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n", + r->modifier[i], i); + return -EINVAL; + } + + if (r->flags & DRM_MODE_FB_MODIFIERS && + r->modifier[i] != r->modifier[0]) { + DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n", + r->modifier[i], i); + return -EINVAL; + } + + /* modifier specific checks: */ + switch (r->modifier[i]) { + case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE: + /* NOTE: the pitch restriction may be lifted later if it turns + * out that no hw has this restriction: + */ + if (r->pixel_format != DRM_FORMAT_NV12 || + width % 128 || height % 32 || + r->pitches[i] % 128) { + DRM_DEBUG_KMS("bad modifier data for plane %d\n", i); + return -EINVAL; + } + break; + + default: + break; + } + } + + for (i = info->num_planes; i < 4; i++) { + if (r->modifier[i]) { + DRM_DEBUG_KMS("non-zero modifier for unused plane %d\n", i); + return -EINVAL; + } + + /* Pre-FB_MODIFIERS userspace didn't clear the structs properly. */ + if (!(r->flags & DRM_MODE_FB_MODIFIERS)) + continue; + + if (r->handles[i]) { + DRM_DEBUG_KMS("buffer object handle for unused plane %d\n", i); + return -EINVAL; + } + + if (r->pitches[i]) { + DRM_DEBUG_KMS("non-zero pitch for unused plane %d\n", i); + return -EINVAL; + } + + if (r->offsets[i]) { + DRM_DEBUG_KMS("non-zero offset for unused plane %d\n", i); + return -EINVAL; + } + } + + return 0; +} + +struct drm_framebuffer * +drm_internal_framebuffer_create(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *r, + struct drm_file *file_priv) +{ + struct drm_mode_config *config = &dev->mode_config; + struct drm_framebuffer *fb; + int ret; + + if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) { + DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags); + return ERR_PTR(-EINVAL); + } + + if ((config->min_width > r->width) || (r->width > config->max_width)) { + DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n", + r->width, config->min_width, config->max_width); + return ERR_PTR(-EINVAL); + } + if ((config->min_height > r->height) || (r->height > config->max_height)) { + DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n", + r->height, config->min_height, config->max_height); + return ERR_PTR(-EINVAL); + } + + if (r->flags & DRM_MODE_FB_MODIFIERS && + !dev->mode_config.allow_fb_modifiers) { + DRM_DEBUG_KMS("driver does not support fb modifiers\n"); + return ERR_PTR(-EINVAL); + } + + ret = framebuffer_check(dev, r); + if (ret) + return ERR_PTR(ret); + + fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); + if (IS_ERR(fb)) { + DRM_DEBUG_KMS("could not create framebuffer\n"); + return fb; + } + + return fb; +} +EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_internal_framebuffer_create); + +/** + * drm_mode_addfb2 - add an FB to the graphics configuration + * @dev: drm device for the ioctl + * @data: data pointer for the ioctl + * @file_priv: drm file for the ioctl call + * + * Add a new FB to the specified CRTC, given a user request with format. This is + * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers + * and uses fourcc codes as pixel format specifiers. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_addfb2(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_mode_fb_cmd2 *r = data; + struct drm_framebuffer *fb; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EOPNOTSUPP; + + fb = drm_internal_framebuffer_create(dev, r, file_priv); + if (IS_ERR(fb)) + return PTR_ERR(fb); + + DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); + r->fb_id = fb->base.id; + + /* Transfer ownership to the filp for reaping on close */ + mutex_lock(&file_priv->fbs_lock); + list_add(&fb->filp_head, &file_priv->fbs); + mutex_unlock(&file_priv->fbs_lock); + + return 0; +} + +int drm_mode_addfb2_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ +#ifdef __BIG_ENDIAN + if (!dev->mode_config.quirk_addfb_prefer_host_byte_order) { + /* + * Drivers must set the + * quirk_addfb_prefer_host_byte_order quirk to make + * the drm_mode_addfb() compat code work correctly on + * bigendian machines. + * + * If they don't they interpret pixel_format values + * incorrectly for bug compatibility, which in turn + * implies the ADDFB2 ioctl does not work correctly + * then. So block it to make userspace fallback to + * ADDFB. + */ + DRM_DEBUG_KMS("addfb2 broken on bigendian"); + return -EOPNOTSUPP; + } +#endif + return drm_mode_addfb2(dev, data, file_priv); +} + +struct drm_mode_rmfb_work { + struct work_struct work; + struct list_head fbs; +}; + +static void drm_mode_rmfb_work_fn(struct work_struct *w) +{ + struct drm_mode_rmfb_work *arg = container_of(w, typeof(*arg), work); + + while (!list_empty(&arg->fbs)) { + struct drm_framebuffer *fb = + list_first_entry(&arg->fbs, typeof(*fb), filp_head); + + list_del_init(&fb->filp_head); + drm_framebuffer_remove(fb); + } +} + +/** + * drm_mode_rmfb - remove an FB from the configuration + * @dev: drm device + * @fb_id: id of framebuffer to remove + * @file_priv: drm file + * + * Remove the specified FB. + * + * Called by the user via ioctl, or by an in-kernel client. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_rmfb(struct drm_device *dev, u32 fb_id, + struct drm_file *file_priv) +{ + struct drm_framebuffer *fb = NULL; + struct drm_framebuffer *fbl = NULL; + int found = 0; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EOPNOTSUPP; + + fb = drm_framebuffer_lookup(dev, file_priv, fb_id); + if (!fb) + return -ENOENT; + + mutex_lock(&file_priv->fbs_lock); + list_for_each_entry(fbl, &file_priv->fbs, filp_head) + if (fb == fbl) + found = 1; + if (!found) { + mutex_unlock(&file_priv->fbs_lock); + goto fail_unref; + } + + list_del_init(&fb->filp_head); + mutex_unlock(&file_priv->fbs_lock); + + /* drop the reference we picked up in framebuffer lookup */ + drm_framebuffer_put(fb); + + /* + * we now own the reference that was stored in the fbs list + * + * drm_framebuffer_remove may fail with -EINTR on pending signals, + * so run this in a separate stack as there's no way to correctly + * handle this after the fb is already removed from the lookup table. + */ + if (drm_framebuffer_read_refcount(fb) > 1) { + struct drm_mode_rmfb_work arg; + + INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn); + INIT_LIST_HEAD(&arg.fbs); + list_add_tail(&fb->filp_head, &arg.fbs); + + schedule_work(&arg.work); + flush_work(&arg.work); + destroy_work_on_stack(&arg.work); + } else + drm_framebuffer_put(fb); + + return 0; + +fail_unref: + drm_framebuffer_put(fb); + return -ENOENT; +} + +int drm_mode_rmfb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + uint32_t *fb_id = data; + + return drm_mode_rmfb(dev, *fb_id, file_priv); +} + +/** + * drm_mode_getfb - get FB info + * @dev: drm device for the ioctl + * @data: data pointer for the ioctl + * @file_priv: drm file for the ioctl call + * + * Lookup the FB given its ID and return info about it. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_getfb(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_mode_fb_cmd *r = data; + struct drm_framebuffer *fb; + int ret; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EOPNOTSUPP; + + fb = drm_framebuffer_lookup(dev, file_priv, r->fb_id); + if (!fb) + return -ENOENT; + + /* Multi-planar framebuffers need getfb2. */ + if (fb->format->num_planes > 1) { + ret = -EINVAL; + goto out; + } + + if (!fb->funcs->create_handle) { + ret = -ENODEV; + goto out; + } + + r->height = fb->height; + r->width = fb->width; + r->depth = fb->format->depth; + r->bpp = fb->format->cpp[0] * 8; + r->pitch = fb->pitches[0]; + + /* GET_FB() is an unprivileged ioctl so we must not return a + * buffer-handle to non-master processes! For + * backwards-compatibility reasons, we cannot make GET_FB() privileged, + * so just return an invalid handle for non-masters. + */ + if (!drm_is_current_master(file_priv) && !capable(CAP_SYS_ADMIN)) { + r->handle = 0; + ret = 0; + goto out; + } + + ret = fb->funcs->create_handle(fb, file_priv, &r->handle); + +out: + drm_framebuffer_put(fb); + + return ret; +} + +/** + * drm_mode_dirtyfb_ioctl - flush frontbuffer rendering on an FB + * @dev: drm device for the ioctl + * @data: data pointer for the ioctl + * @file_priv: drm file for the ioctl call + * + * Lookup the FB and flush out the damaged area supplied by userspace as a clip + * rectangle list. Generic userspace which does frontbuffer rendering must call + * this ioctl to flush out the changes on manual-update display outputs, e.g. + * usb display-link, mipi manual update panels or edp panel self refresh modes. + * + * Modesetting drivers which always update the frontbuffer do not need to + * implement the corresponding &drm_framebuffer_funcs.dirty callback. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_dirtyfb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_clip_rect __user *clips_ptr; + struct drm_clip_rect *clips = NULL; + struct drm_mode_fb_dirty_cmd *r = data; + struct drm_framebuffer *fb; + unsigned flags; + int num_clips; + int ret; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EOPNOTSUPP; + + fb = drm_framebuffer_lookup(dev, file_priv, r->fb_id); + if (!fb) + return -ENOENT; + + num_clips = r->num_clips; + clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr; + + if (!num_clips != !clips_ptr) { + ret = -EINVAL; + goto out_err1; + } + + flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags; + + /* If userspace annotates copy, clips must come in pairs */ + if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) { + ret = -EINVAL; + goto out_err1; + } + + if (num_clips && clips_ptr) { + if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) { + ret = -EINVAL; + goto out_err1; + } + clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL); + if (!clips) { + ret = -ENOMEM; + goto out_err1; + } + + ret = copy_from_user(clips, clips_ptr, + num_clips * sizeof(*clips)); + if (ret) { + ret = -EFAULT; + goto out_err2; + } + } + + if (fb->funcs->dirty) { + ret = fb->funcs->dirty(fb, file_priv, flags, r->color, + clips, num_clips); + } else { + ret = -ENOSYS; + } + +out_err2: + kfree(clips); +out_err1: + drm_framebuffer_put(fb); + + return ret; +} + +/** + * drm_fb_release - remove and free the FBs on this file + * @priv: drm file for the ioctl + * + * Destroy all the FBs associated with @filp. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +void drm_fb_release(struct drm_file *priv) +{ + struct drm_framebuffer *fb, *tfb; + struct drm_mode_rmfb_work arg; + + INIT_LIST_HEAD(&arg.fbs); + + /* + * When the file gets released that means no one else can access the fb + * list any more, so no need to grab fpriv->fbs_lock. And we need to + * avoid upsetting lockdep since the universal cursor code adds a + * framebuffer while holding mutex locks. + * + * Note that a real deadlock between fpriv->fbs_lock and the modeset + * locks is impossible here since no one else but this function can get + * at it any more. + */ + list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) { + if (drm_framebuffer_read_refcount(fb) > 1) { + list_move_tail(&fb->filp_head, &arg.fbs); + } else { + list_del_init(&fb->filp_head); + + /* This drops the fpriv->fbs reference. */ + drm_framebuffer_put(fb); + } + } + + if (!list_empty(&arg.fbs)) { + INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn); + + schedule_work(&arg.work); + flush_work(&arg.work); + destroy_work_on_stack(&arg.work); + } +} + +void drm_framebuffer_free(struct kref *kref) +{ + struct drm_framebuffer *fb = + container_of(kref, struct drm_framebuffer, base.refcount); + struct drm_device *dev = fb->dev; + + /* + * The lookup idr holds a weak reference, which has not necessarily been + * removed at this point. Check for that. + */ + drm_mode_object_unregister(dev, &fb->base); + + fb->funcs->destroy(fb); +} + +/** + * drm_framebuffer_init - initialize a framebuffer + * @dev: DRM device + * @fb: framebuffer to be initialized + * @funcs: ... with these functions + * + * Allocates an ID for the framebuffer's parent mode object, sets its mode + * functions & device file and adds it to the master fd list. + * + * IMPORTANT: + * This functions publishes the fb and makes it available for concurrent access + * by other users. Which means by this point the fb _must_ be fully set up - + * since all the fb attributes are invariant over its lifetime, no further + * locking but only correct reference counting is required. + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, + const struct drm_framebuffer_funcs *funcs) +{ + int ret; + + if (WARN_ON_ONCE(fb->dev != dev || !fb->format)) + return -EINVAL; + + INIT_LIST_HEAD(&fb->filp_head); + + fb->funcs = funcs; + strcpy(fb->comm, current->comm); + + ret = __drm_mode_object_add(dev, &fb->base, DRM_MODE_OBJECT_FB, + false, drm_framebuffer_free); + if (ret) + goto out; + + mutex_lock(&dev->mode_config.fb_lock); + dev->mode_config.num_fb++; + list_add(&fb->head, &dev->mode_config.fb_list); + mutex_unlock(&dev->mode_config.fb_lock); + + drm_mode_object_register(dev, &fb->base); +out: + return ret; +} +EXPORT_SYMBOL(drm_framebuffer_init); + +/** + * drm_framebuffer_lookup - look up a drm framebuffer and grab a reference + * @dev: drm device + * @file_priv: drm file to check for lease against. + * @id: id of the fb object + * + * If successful, this grabs an additional reference to the framebuffer - + * callers need to make sure to eventually unreference the returned framebuffer + * again, using drm_framebuffer_put(). + */ +struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id) +{ + struct drm_mode_object *obj; + struct drm_framebuffer *fb = NULL; + + obj = __drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_FB); + if (obj) + fb = obj_to_fb(obj); + return fb; +} +EXPORT_SYMBOL(drm_framebuffer_lookup); + +/** + * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr + * @fb: fb to unregister + * + * Drivers need to call this when cleaning up driver-private framebuffers, e.g. + * those used for fbdev. Note that the caller must hold a reference of its own, + * i.e. the object may not be destroyed through this call (since it'll lead to a + * locking inversion). + * + * NOTE: This function is deprecated. For driver-private framebuffers it is not + * recommended to embed a framebuffer struct info fbdev struct, instead, a + * framebuffer pointer is preferred and drm_framebuffer_put() should be called + * when the framebuffer is to be cleaned up. + */ +void drm_framebuffer_unregister_private(struct drm_framebuffer *fb) +{ + struct drm_device *dev; + + if (!fb) + return; + + dev = fb->dev; + + /* Mark fb as reaped and drop idr ref. */ + drm_mode_object_unregister(dev, &fb->base); +} +EXPORT_SYMBOL(drm_framebuffer_unregister_private); + +/** + * drm_framebuffer_cleanup - remove a framebuffer object + * @fb: framebuffer to remove + * + * Cleanup framebuffer. This function is intended to be used from the drivers + * &drm_framebuffer_funcs.destroy callback. It can also be used to clean up + * driver private framebuffers embedded into a larger structure. + * + * Note that this function does not remove the fb from active usage - if it is + * still used anywhere, hilarity can ensue since userspace could call getfb on + * the id and get back -EINVAL. Obviously no concern at driver unload time. + * + * Also, the framebuffer will not be removed from the lookup idr - for + * user-created framebuffers this will happen in in the rmfb ioctl. For + * driver-private objects (e.g. for fbdev) drivers need to explicitly call + * drm_framebuffer_unregister_private. + */ +void drm_framebuffer_cleanup(struct drm_framebuffer *fb) +{ + struct drm_device *dev = fb->dev; + + mutex_lock(&dev->mode_config.fb_lock); + list_del(&fb->head); + dev->mode_config.num_fb--; + mutex_unlock(&dev->mode_config.fb_lock); +} +EXPORT_SYMBOL(drm_framebuffer_cleanup); + +static int atomic_remove_fb(struct drm_framebuffer *fb) +{ + struct drm_modeset_acquire_ctx ctx; + struct drm_device *dev = fb->dev; + struct drm_atomic_state *state; + struct drm_plane *plane; + struct drm_connector *conn __maybe_unused; + struct drm_connector_state *conn_state; + int i, ret; + unsigned plane_mask; + bool disable_crtcs = false; + +retry_disable: + drm_modeset_acquire_init(&ctx, 0); + + state = drm_atomic_state_alloc(dev); + if (!state) { + ret = -ENOMEM; + goto out; + } + state->acquire_ctx = &ctx; + +retry: + plane_mask = 0; + ret = drm_modeset_lock_all_ctx(dev, &ctx); + if (ret) + goto unlock; + + drm_for_each_plane(plane, dev) { + struct drm_plane_state *plane_state; + + if (plane->state->fb != fb) + continue; + + plane_state = drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) { + ret = PTR_ERR(plane_state); + goto unlock; + } + + if (disable_crtcs && plane_state->crtc->primary == plane) { + struct drm_crtc_state *crtc_state; + + crtc_state = drm_atomic_get_existing_crtc_state(state, plane_state->crtc); + + ret = drm_atomic_add_affected_connectors(state, plane_state->crtc); + if (ret) + goto unlock; + + crtc_state->active = false; + ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL); + if (ret) + goto unlock; + } + + drm_atomic_set_fb_for_plane(plane_state, NULL); + ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); + if (ret) + goto unlock; + + plane_mask |= drm_plane_mask(plane); + } + + /* This list is only filled when disable_crtcs is set. */ + for_each_new_connector_in_state(state, conn, conn_state, i) { + ret = drm_atomic_set_crtc_for_connector(conn_state, NULL); + + if (ret) + goto unlock; + } + + if (plane_mask) + ret = drm_atomic_commit(state); + +unlock: + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + drm_modeset_backoff(&ctx); + goto retry; + } + + drm_atomic_state_put(state); + +out: + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + if (ret == -EINVAL && !disable_crtcs) { + disable_crtcs = true; + goto retry_disable; + } + + return ret; +} + +static void legacy_remove_fb(struct drm_framebuffer *fb) +{ + struct drm_device *dev = fb->dev; + struct drm_crtc *crtc; + struct drm_plane *plane; + + drm_modeset_lock_all(dev); + /* remove from any CRTC */ + drm_for_each_crtc(crtc, dev) { + if (crtc->primary->fb == fb) { + /* should turn off the crtc */ + if (drm_crtc_force_disable(crtc)) + DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc); + } + } + + drm_for_each_plane(plane, dev) { + if (plane->fb == fb) + drm_plane_force_disable(plane); + } + drm_modeset_unlock_all(dev); +} + +/** + * drm_framebuffer_remove - remove and unreference a framebuffer object + * @fb: framebuffer to remove + * + * Scans all the CRTCs and planes in @dev's mode_config. If they're + * using @fb, removes it, setting it to NULL. Then drops the reference to the + * passed-in framebuffer. Might take the modeset locks. + * + * Note that this function optimizes the cleanup away if the caller holds the + * last reference to the framebuffer. It is also guaranteed to not take the + * modeset locks in this case. + */ +void drm_framebuffer_remove(struct drm_framebuffer *fb) +{ + struct drm_device *dev; + + if (!fb) + return; + + dev = fb->dev; + + WARN_ON(!list_empty(&fb->filp_head)); + + /* + * drm ABI mandates that we remove any deleted framebuffers from active + * useage. But since most sane clients only remove framebuffers they no + * longer need, try to optimize this away. + * + * Since we're holding a reference ourselves, observing a refcount of 1 + * means that we're the last holder and can skip it. Also, the refcount + * can never increase from 1 again, so we don't need any barriers or + * locks. + * + * Note that userspace could try to race with use and instate a new + * usage _after_ we've cleared all current ones. End result will be an + * in-use fb with fb-id == 0. Userspace is allowed to shoot its own foot + * in this manner. + */ + if (drm_framebuffer_read_refcount(fb) > 1) { + if (drm_drv_uses_atomic_modeset(dev)) { + int ret = atomic_remove_fb(fb); + WARN(ret, "atomic remove_fb failed with %i\n", ret); + } else + legacy_remove_fb(fb); + } + + drm_framebuffer_put(fb); +} +EXPORT_SYMBOL(drm_framebuffer_remove); + +/** + * drm_framebuffer_plane_width - width of the plane given the first plane + * @width: width of the first plane + * @fb: the framebuffer + * @plane: plane index + * + * Returns: + * The width of @plane, given that the width of the first plane is @width. + */ +int drm_framebuffer_plane_width(int width, + const struct drm_framebuffer *fb, int plane) +{ + if (plane >= fb->format->num_planes) + return 0; + + return fb_plane_width(width, fb->format, plane); +} +EXPORT_SYMBOL(drm_framebuffer_plane_width); + +/** + * drm_framebuffer_plane_height - height of the plane given the first plane + * @height: height of the first plane + * @fb: the framebuffer + * @plane: plane index + * + * Returns: + * The height of @plane, given that the height of the first plane is @height. + */ +int drm_framebuffer_plane_height(int height, + const struct drm_framebuffer *fb, int plane) +{ + if (plane >= fb->format->num_planes) + return 0; + + return fb_plane_height(height, fb->format, plane); +} +EXPORT_SYMBOL(drm_framebuffer_plane_height); + +void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent, + const struct drm_framebuffer *fb) +{ + struct drm_format_name_buf format_name; + unsigned int i; + + drm_printf_indent(p, indent, "allocated by = %s\n", fb->comm); + drm_printf_indent(p, indent, "refcount=%u\n", + drm_framebuffer_read_refcount(fb)); + drm_printf_indent(p, indent, "format=%s\n", + drm_get_format_name(fb->format->format, &format_name)); + drm_printf_indent(p, indent, "modifier=0x%llx\n", fb->modifier); + drm_printf_indent(p, indent, "size=%ux%u\n", fb->width, fb->height); + drm_printf_indent(p, indent, "layers:\n"); + + for (i = 0; i < fb->format->num_planes; i++) { + drm_printf_indent(p, indent + 1, "size[%u]=%dx%d\n", i, + drm_framebuffer_plane_width(fb->width, fb, i), + drm_framebuffer_plane_height(fb->height, fb, i)); + drm_printf_indent(p, indent + 1, "pitch[%u]=%u\n", i, fb->pitches[i]); + drm_printf_indent(p, indent + 1, "offset[%u]=%u\n", i, fb->offsets[i]); + drm_printf_indent(p, indent + 1, "obj[%u]:%s\n", i, + fb->obj[i] ? "" : "(null)"); + if (fb->obj[i]) + drm_gem_print_info(p, indent + 2, fb->obj[i]); + } +} + +#ifdef CONFIG_DEBUG_FS +static int drm_framebuffer_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = m->private; + struct drm_device *dev = node->minor->dev; + struct drm_printer p = drm_seq_file_printer(m); + struct drm_framebuffer *fb; + + mutex_lock(&dev->mode_config.fb_lock); + drm_for_each_fb(fb, dev) { + drm_printf(&p, "framebuffer[%u]:\n", fb->base.id); + drm_framebuffer_print_info(&p, 1, fb); + } + mutex_unlock(&dev->mode_config.fb_lock); + + return 0; +} + +static const struct drm_info_list drm_framebuffer_debugfs_list[] = { + { "framebuffer", drm_framebuffer_info, 0 }, +}; + +int drm_framebuffer_debugfs_init(struct drm_minor *minor) +{ + return drm_debugfs_create_files(drm_framebuffer_debugfs_list, + ARRAY_SIZE(drm_framebuffer_debugfs_list), + minor->debugfs_root, minor); +} +#endif Index: sys/dev/drm/core/drm_gem.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_gem.c @@ -0,0 +1,1347 @@ +/* + * Copyright © 2008 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "drm_internal.h" + +/** @file drm_gem.c + * + * This file provides some of the base ioctls and library routines for + * the graphics memory manager implemented by each device driver. + * + * Because various devices have different requirements in terms of + * synchronization and migration strategies, implementing that is left up to + * the driver, and all that the general API provides should be generic -- + * allocating objects, reading/writing data with the cpu, freeing objects. + * Even there, platform-dependent optimizations for reading/writing data with + * the CPU mean we'll likely hook those out to driver-specific calls. However, + * the DRI2 implementation wants to have at least allocate/mmap be generic. + * + * The goal was to have swap-backed object allocation managed through + * struct file. However, file descriptors as handles to a struct file have + * two major failings: + * - Process limits prevent more than 1024 or so being used at a time by + * default. + * - Inability to allocate high fds will aggravate the X Server's select() + * handling, and likely that of many GL client applications as well. + * + * This led to a plan of using our own integer IDs (called handles, following + * DRM terminology) to mimic fds, and implement the fd syscalls we need as + * ioctls. The objects themselves will still include the struct file so + * that we can transition to fds if the required kernel infrastructure shows + * up at a later date, and as our interface with shmfs for memory allocation. + */ + +/** + * drm_gem_init - Initialize the GEM device fields + * @dev: drm_devic structure to initialize + */ +int +drm_gem_init(struct drm_device *dev) +{ + struct drm_vma_offset_manager *vma_offset_manager; + + mutex_init(&dev->object_name_lock); + idr_init_base(&dev->object_name_idr, 1); + + vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL); + if (!vma_offset_manager) { + DRM_ERROR("out of memory\n"); + return -ENOMEM; + } + + dev->vma_offset_manager = vma_offset_manager; + drm_vma_offset_manager_init(vma_offset_manager, + DRM_FILE_PAGE_OFFSET_START, + DRM_FILE_PAGE_OFFSET_SIZE); + + return 0; +} + +void +drm_gem_destroy(struct drm_device *dev) +{ + + drm_vma_offset_manager_destroy(dev->vma_offset_manager); + kfree(dev->vma_offset_manager); + dev->vma_offset_manager = NULL; +} + +/** + * drm_gem_object_init - initialize an allocated shmem-backed GEM object + * @dev: drm_device the object should be initialized for + * @obj: drm_gem_object to initialize + * @size: object size + * + * Initialize an already allocated GEM object of the specified size with + * shmfs backing store. + */ +int drm_gem_object_init(struct drm_device *dev, + struct drm_gem_object *obj, size_t size) +{ + struct file *filp; + + drm_gem_private_object_init(dev, obj, size); + + filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); + if (IS_ERR(filp)) + return PTR_ERR(filp); + + obj->filp = filp; + + return 0; +} +EXPORT_SYMBOL(drm_gem_object_init); + +/** + * drm_gem_private_object_init - initialize an allocated private GEM object + * @dev: drm_device the object should be initialized for + * @obj: drm_gem_object to initialize + * @size: object size + * + * Initialize an already allocated GEM object of the specified size with + * no GEM provided backing store. Instead the caller is responsible for + * backing the object and handling it. + */ +void drm_gem_private_object_init(struct drm_device *dev, + struct drm_gem_object *obj, size_t size) +{ + BUG_ON((size & (PAGE_SIZE - 1)) != 0); + + obj->dev = dev; + obj->filp = NULL; + + kref_init(&obj->refcount); + obj->handle_count = 0; + obj->size = size; + reservation_object_init(&obj->_resv); + if (!obj->resv) + obj->resv = &obj->_resv; + + drm_vma_node_reset(&obj->vma_node); +} +EXPORT_SYMBOL(drm_gem_private_object_init); + +static void +drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) +{ + /* + * Note: obj->dma_buf can't disappear as long as we still hold a + * handle reference in obj->handle_count. + */ + mutex_lock(&filp->prime.lock); + if (obj->dma_buf) { + drm_prime_remove_buf_handle_locked(&filp->prime, + obj->dma_buf); + } + mutex_unlock(&filp->prime.lock); +} + +/** + * drm_gem_object_handle_free - release resources bound to userspace handles + * @obj: GEM object to clean up. + * + * Called after the last handle to the object has been closed + * + * Removes any name for the object. Note that this must be + * called before drm_gem_object_free or we'll be touching + * freed memory + */ +static void drm_gem_object_handle_free(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + + /* Remove any name for this object */ + if (obj->name) { + idr_remove(&dev->object_name_idr, obj->name); + obj->name = 0; + } +} + +static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) +{ + /* Unbreak the reference cycle if we have an exported dma_buf. */ + if (obj->dma_buf) { + dma_buf_put(obj->dma_buf); + obj->dma_buf = NULL; + } +} + +static void +drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + bool final = false; + + if (WARN_ON(obj->handle_count == 0)) + return; + + /* + * Must bump handle count first as this may be the last + * ref, in which case the object would disappear before we + * checked for a name + */ + + mutex_lock(&dev->object_name_lock); + if (--obj->handle_count == 0) { + drm_gem_object_handle_free(obj); + drm_gem_object_exported_dma_buf_free(obj); + final = true; + } + mutex_unlock(&dev->object_name_lock); + + if (final) + drm_gem_object_put_unlocked(obj); +} + +/* + * Called at device or object close to release the file's + * handle references on objects. + */ +static int +drm_gem_object_release_handle(int id, void *ptr, void *data) +{ + struct drm_file *file_priv = data; + struct drm_gem_object *obj = ptr; + struct drm_device *dev = obj->dev; + + if (obj->funcs && obj->funcs->close) + obj->funcs->close(obj, file_priv); + else if (dev->driver->gem_close_object) + dev->driver->gem_close_object(obj, file_priv); + + if (drm_core_check_feature(dev, DRIVER_PRIME)) + drm_gem_remove_prime_handles(obj, file_priv); + drm_vma_node_revoke(&obj->vma_node, file_priv); + + drm_gem_object_handle_put_unlocked(obj); + + return 0; +} + +/** + * drm_gem_handle_delete - deletes the given file-private handle + * @filp: drm file-private structure to use for the handle look up + * @handle: userspace handle to delete + * + * Removes the GEM handle from the @filp lookup table which has been added with + * drm_gem_handle_create(). If this is the last handle also cleans up linked + * resources like GEM names. + */ +int +drm_gem_handle_delete(struct drm_file *filp, u32 handle) +{ + struct drm_gem_object *obj; + + spin_lock(&filp->table_lock); + + /* Check if we currently have a reference on the object */ + obj = idr_replace(&filp->object_idr, NULL, handle); + spin_unlock(&filp->table_lock); + if (IS_ERR_OR_NULL(obj)) + return -EINVAL; + + /* Release driver's reference and decrement refcount. */ + drm_gem_object_release_handle(handle, obj, filp); + + /* And finally make the handle available for future allocations. */ + spin_lock(&filp->table_lock); + idr_remove(&filp->object_idr, handle); + spin_unlock(&filp->table_lock); + + return 0; +} +EXPORT_SYMBOL(drm_gem_handle_delete); + +/** + * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object + * @file: drm file-private structure containing the gem object + * @dev: corresponding drm_device + * @handle: gem object handle + * @offset: return location for the fake mmap offset + * + * This implements the &drm_driver.dumb_map_offset kms driver callback for + * drivers which use gem to manage their backing storage. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, + u32 handle, u64 *offset) +{ + struct drm_gem_object *obj; + int ret; + + obj = drm_gem_object_lookup(file, handle); + if (!obj) + return -ENOENT; + + /* Don't allow imported objects to be mapped */ + if (obj->import_attach) { + ret = -EINVAL; + goto out; + } + + ret = drm_gem_create_mmap_offset(obj); + if (ret) + goto out; + + *offset = drm_vma_node_offset_addr(&obj->vma_node); +out: + drm_gem_object_put_unlocked(obj); + + return ret; +} +EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset); + +/** + * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers + * @file: drm file-private structure to remove the dumb handle from + * @dev: corresponding drm_device + * @handle: the dumb handle to remove + * + * This implements the &drm_driver.dumb_destroy kms driver callback for drivers + * which use gem to manage their backing storage. + */ +int drm_gem_dumb_destroy(struct drm_file *file, + struct drm_device *dev, + uint32_t handle) +{ + return drm_gem_handle_delete(file, handle); +} +EXPORT_SYMBOL(drm_gem_dumb_destroy); + +/** + * drm_gem_handle_create_tail - internal functions to create a handle + * @file_priv: drm file-private structure to register the handle for + * @obj: object to register + * @handlep: pointer to return the created handle to the caller + * + * This expects the &drm_device.object_name_lock to be held already and will + * drop it before returning. Used to avoid races in establishing new handles + * when importing an object from either an flink name or a dma-buf. + * + * Handles must be release again through drm_gem_handle_delete(). This is done + * when userspace closes @file_priv for all attached handles, or through the + * GEM_CLOSE ioctl for individual handles. + */ +int +drm_gem_handle_create_tail(struct drm_file *file_priv, + struct drm_gem_object *obj, + u32 *handlep) +{ + struct drm_device *dev = obj->dev; + u32 handle; + int ret; + + WARN_ON(!mutex_is_locked(&dev->object_name_lock)); + if (obj->handle_count++ == 0) + drm_gem_object_get(obj); + + /* + * Get the user-visible handle using idr. Preload and perform + * allocation under our spinlock. + */ + idr_preload(GFP_KERNEL); + spin_lock(&file_priv->table_lock); + + ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); + + spin_unlock(&file_priv->table_lock); + idr_preload_end(); + + mutex_unlock(&dev->object_name_lock); + if (ret < 0) + goto err_unref; + + handle = ret; + + ret = drm_vma_node_allow(&obj->vma_node, file_priv); + if (ret) + goto err_remove; + + if (obj->funcs && obj->funcs->open) { + ret = obj->funcs->open(obj, file_priv); + if (ret) + goto err_revoke; + } else if (dev->driver->gem_open_object) { + ret = dev->driver->gem_open_object(obj, file_priv); + if (ret) + goto err_revoke; + } + + *handlep = handle; + return 0; + +err_revoke: + drm_vma_node_revoke(&obj->vma_node, file_priv); +err_remove: + spin_lock(&file_priv->table_lock); + idr_remove(&file_priv->object_idr, handle); + spin_unlock(&file_priv->table_lock); +err_unref: + drm_gem_object_handle_put_unlocked(obj); + return ret; +} + +/** + * drm_gem_handle_create - create a gem handle for an object + * @file_priv: drm file-private structure to register the handle for + * @obj: object to register + * @handlep: pionter to return the created handle to the caller + * + * Create a handle for this object. This adds a handle reference to the object, + * which includes a regular reference count. Callers will likely want to + * dereference the object afterwards. + * + * Since this publishes @obj to userspace it must be fully set up by this point, + * drivers must call this last in their buffer object creation callbacks. + */ +int drm_gem_handle_create(struct drm_file *file_priv, + struct drm_gem_object *obj, + u32 *handlep) +{ + mutex_lock(&obj->dev->object_name_lock); + + return drm_gem_handle_create_tail(file_priv, obj, handlep); +} +EXPORT_SYMBOL(drm_gem_handle_create); + + +/** + * drm_gem_free_mmap_offset - release a fake mmap offset for an object + * @obj: obj in question + * + * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). + * + * Note that drm_gem_object_release() already calls this function, so drivers + * don't have to take care of releasing the mmap offset themselves when freeing + * the GEM object. + */ +void +drm_gem_free_mmap_offset(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + + drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); +} +EXPORT_SYMBOL(drm_gem_free_mmap_offset); + +/** + * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object + * @obj: obj in question + * @size: the virtual size + * + * GEM memory mapping works by handing back to userspace a fake mmap offset + * it can use in a subsequent mmap(2) call. The DRM core code then looks + * up the object based on the offset and sets up the various memory mapping + * structures. + * + * This routine allocates and attaches a fake offset for @obj, in cases where + * the virtual size differs from the physical size (ie. &drm_gem_object.size). + * Otherwise just use drm_gem_create_mmap_offset(). + * + * This function is idempotent and handles an already allocated mmap offset + * transparently. Drivers do not need to check for this case. + */ +int +drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) +{ + struct drm_device *dev = obj->dev; + + return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, + size / PAGE_SIZE); +} +EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); + +/** + * drm_gem_create_mmap_offset - create a fake mmap offset for an object + * @obj: obj in question + * + * GEM memory mapping works by handing back to userspace a fake mmap offset + * it can use in a subsequent mmap(2) call. The DRM core code then looks + * up the object based on the offset and sets up the various memory mapping + * structures. + * + * This routine allocates and attaches a fake offset for @obj. + * + * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release + * the fake offset again. + */ +int drm_gem_create_mmap_offset(struct drm_gem_object *obj) +{ + return drm_gem_create_mmap_offset_size(obj, obj->size); +} +EXPORT_SYMBOL(drm_gem_create_mmap_offset); + +#ifdef __linux__ +/* + * Move pages to appropriate lru and release the pagevec, decrementing the + * ref count of those pages. + */ +static void drm_gem_check_release_pagevec(struct pagevec *pvec) +{ + check_move_unevictable_pages(pvec); + __pagevec_release(pvec); + cond_resched(); +} + +/** + * drm_gem_get_pages - helper to allocate backing pages for a GEM object + * from shmem + * @obj: obj in question + * + * This reads the page-array of the shmem-backing storage of the given gem + * object. An array of pages is returned. If a page is not allocated or + * swapped-out, this will allocate/swap-in the required pages. Note that the + * whole object is covered by the page-array and pinned in memory. + * + * Use drm_gem_put_pages() to release the array and unpin all pages. + * + * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). + * If you require other GFP-masks, you have to do those allocations yourself. + * + * Note that you are not allowed to change gfp-zones during runtime. That is, + * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as + * set during initialization. If you have special zone constraints, set them + * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care + * to keep pages in the required zone during swap-in. + */ +struct page **drm_gem_get_pages(struct drm_gem_object *obj) +{ + struct address_space *mapping; + struct page *p, **pages; + struct pagevec pvec; + int i, npages; + + /* This is the shared memory object that backs the GEM resource */ + mapping = obj->filp->f_mapping; + + /* We already BUG_ON() for non-page-aligned sizes in + * drm_gem_object_init(), so we should never hit this unless + * driver author is doing something really wrong: + */ + WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); + + npages = obj->size >> PAGE_SHIFT; + + pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); + if (pages == NULL) + return ERR_PTR(-ENOMEM); + + mapping_set_unevictable(mapping); + + for (i = 0; i < npages; i++) { + p = shmem_read_mapping_page(mapping, i); + if (IS_ERR(p)) + goto fail; + pages[i] = p; + + /* Make sure shmem keeps __GFP_DMA32 allocated pages in the + * correct region during swapin. Note that this requires + * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) + * so shmem can relocate pages during swapin if required. + */ + BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && + (page_to_pfn(p) >= 0x00100000UL)); + } + + return pages; + +fail: + mapping_clear_unevictable(mapping); + pagevec_init(&pvec); + while (i--) { + if (!pagevec_add(&pvec, pages[i])) + drm_gem_check_release_pagevec(&pvec); + } + if (pagevec_count(&pvec)) + drm_gem_check_release_pagevec(&pvec); + + kvfree(pages); + return ERR_CAST(p); +} +EXPORT_SYMBOL(drm_gem_get_pages); + +/** + * drm_gem_put_pages - helper to free backing pages for a GEM object + * @obj: obj in question + * @pages: pages to free + * @dirty: if true, pages will be marked as dirty + * @accessed: if true, the pages will be marked as accessed + */ +void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, + bool dirty, bool accessed) +{ + int i, npages; + struct address_space *mapping; + struct pagevec pvec; + + mapping = file_inode(obj->filp)->i_mapping; + mapping_clear_unevictable(mapping); + + /* We already BUG_ON() for non-page-aligned sizes in + * drm_gem_object_init(), so we should never hit this unless + * driver author is doing something really wrong: + */ + WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); + + npages = obj->size >> PAGE_SHIFT; + + pagevec_init(&pvec); + for (i = 0; i < npages; i++) { + if (!pages[i]) + continue; + + if (dirty) + set_page_dirty(pages[i]); + + if (accessed) + mark_page_accessed(pages[i]); + + /* Undo the reference we took when populating the table */ + if (!pagevec_add(&pvec, pages[i])) + drm_gem_check_release_pagevec(&pvec); + } + if (pagevec_count(&pvec)) + drm_gem_check_release_pagevec(&pvec); + + kvfree(pages); +} +EXPORT_SYMBOL(drm_gem_put_pages); +#endif + +static int objects_lookup(struct drm_file *filp, u32 *handle, int count, + struct drm_gem_object **objs) +{ + int i, ret = 0; + struct drm_gem_object *obj; + + spin_lock(&filp->table_lock); + + for (i = 0; i < count; i++) { + /* Check if we currently have a reference on the object */ + obj = idr_find(&filp->object_idr, handle[i]); + if (!obj) { + ret = -ENOENT; + break; + } + drm_gem_object_get(obj); + objs[i] = obj; + } + spin_unlock(&filp->table_lock); + + return ret; +} + +/** + * drm_gem_objects_lookup - look up GEM objects from an array of handles + * @filp: DRM file private date + * @bo_handles: user pointer to array of userspace handle + * @count: size of handle array + * @objs_out: returned pointer to array of drm_gem_object pointers + * + * Takes an array of userspace handles and returns a newly allocated array of + * GEM objects. + * + * For a single handle lookup, use drm_gem_object_lookup(). + * + * Returns: + * + * @objs filled in with GEM object pointers. Returned GEM objects need to be + * released with drm_gem_object_put(). -ENOENT is returned on a lookup + * failure. 0 is returned on success. + * + */ +int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, + int count, struct drm_gem_object ***objs_out) +{ + int ret; + u32 *handles; + struct drm_gem_object **objs; + + if (!count) + return 0; + + objs = kvmalloc_array(count, sizeof(struct drm_gem_object *), + GFP_KERNEL | __GFP_ZERO); + if (!objs) + return -ENOMEM; + + handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); + if (!handles) { + ret = -ENOMEM; + goto out; + } + + if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { + ret = -EFAULT; + DRM_DEBUG("Failed to copy in GEM handles\n"); + goto out; + } + + ret = objects_lookup(filp, handles, count, objs); + *objs_out = objs; + +out: + kvfree(handles); + return ret; + +} +EXPORT_SYMBOL(drm_gem_objects_lookup); + +/** + * drm_gem_object_lookup - look up a GEM object from its handle + * @filp: DRM file private date + * @handle: userspace handle + * + * Returns: + * + * A reference to the object named by the handle if such exists on @filp, NULL + * otherwise. + * + * If looking up an array of handles, use drm_gem_objects_lookup(). + */ +struct drm_gem_object * +drm_gem_object_lookup(struct drm_file *filp, u32 handle) +{ + struct drm_gem_object *obj = NULL; + + objects_lookup(filp, &handle, 1, &obj); + return obj; +} +EXPORT_SYMBOL(drm_gem_object_lookup); + +/** + * drm_gem_reservation_object_wait - Wait on GEM object's reservation's objects + * shared and/or exclusive fences. + * @filep: DRM file private date + * @handle: userspace handle + * @wait_all: if true, wait on all fences, else wait on just exclusive fence + * @timeout: timeout value in jiffies or zero to return immediately + * + * Returns: + * + * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or + * greater than 0 on success. + */ +long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle, + bool wait_all, unsigned long timeout) +{ + long ret; + struct drm_gem_object *obj; + + obj = drm_gem_object_lookup(filep, handle); + if (!obj) { + DRM_DEBUG("Failed to look up GEM BO %d\n", handle); + return -EINVAL; + } + + ret = reservation_object_wait_timeout_rcu(obj->resv, wait_all, + true, timeout); + if (ret == 0) + ret = -ETIME; + else if (ret > 0) + ret = 0; + + drm_gem_object_put_unlocked(obj); + + return ret; +} +EXPORT_SYMBOL(drm_gem_reservation_object_wait); + +/** + * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl + * @dev: drm_device + * @data: ioctl data + * @file_priv: drm file-private structure + * + * Releases the handle to an mm object. + */ +int +drm_gem_close_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_gem_close *args = data; + int ret; + + if (!drm_core_check_feature(dev, DRIVER_GEM)) + return -EOPNOTSUPP; + + ret = drm_gem_handle_delete(file_priv, args->handle); + + return ret; +} + +/** + * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl + * @dev: drm_device + * @data: ioctl data + * @file_priv: drm file-private structure + * + * Create a global name for an object, returning the name. + * + * Note that the name does not hold a reference; when the object + * is freed, the name goes away. + */ +int +drm_gem_flink_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_gem_flink *args = data; + struct drm_gem_object *obj; + int ret; + + if (!drm_core_check_feature(dev, DRIVER_GEM)) + return -EOPNOTSUPP; + + obj = drm_gem_object_lookup(file_priv, args->handle); + if (obj == NULL) + return -ENOENT; + + mutex_lock(&dev->object_name_lock); + /* prevent races with concurrent gem_close. */ + if (obj->handle_count == 0) { + ret = -ENOENT; + goto err; + } + + if (!obj->name) { + ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL); + if (ret < 0) + goto err; + + obj->name = ret; + } + + args->name = (uint64_t) obj->name; + ret = 0; + +err: + mutex_unlock(&dev->object_name_lock); + drm_gem_object_put_unlocked(obj); + return ret; +} + +/** + * drm_gem_open - implementation of the GEM_OPEN ioctl + * @dev: drm_device + * @data: ioctl data + * @file_priv: drm file-private structure + * + * Open an object using the global name, returning a handle and the size. + * + * This handle (of course) holds a reference to the object, so the object + * will not go away until the handle is deleted. + */ +int +drm_gem_open_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_gem_open *args = data; + struct drm_gem_object *obj; + int ret; + u32 handle; + + if (!drm_core_check_feature(dev, DRIVER_GEM)) + return -EOPNOTSUPP; + + mutex_lock(&dev->object_name_lock); + obj = idr_find(&dev->object_name_idr, (int) args->name); + if (obj) { + drm_gem_object_get(obj); + } else { + mutex_unlock(&dev->object_name_lock); + return -ENOENT; + } + + /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ + ret = drm_gem_handle_create_tail(file_priv, obj, &handle); + drm_gem_object_put_unlocked(obj); + if (ret) + return ret; + + args->handle = handle; + args->size = obj->size; + + return 0; +} + +/** + * gem_gem_open - initalizes GEM file-private structures at devnode open time + * @dev: drm_device which is being opened by userspace + * @file_private: drm file-private structure to set up + * + * Called at device open time, sets up the structure for handling refcounting + * of mm objects. + */ +void +drm_gem_open(struct drm_device *dev, struct drm_file *file_private) +{ + idr_init_base(&file_private->object_idr, 1); + spin_lock_init(&file_private->table_lock); +} + +/** + * drm_gem_release - release file-private GEM resources + * @dev: drm_device which is being closed by userspace + * @file_private: drm file-private structure to clean up + * + * Called at close time when the filp is going away. + * + * Releases any remaining references on objects by this filp. + */ +void +drm_gem_release(struct drm_device *dev, struct drm_file *file_private) +{ + idr_for_each(&file_private->object_idr, + &drm_gem_object_release_handle, file_private); + idr_destroy(&file_private->object_idr); +} + +/** + * drm_gem_object_release - release GEM buffer object resources + * @obj: GEM buffer object + * + * This releases any structures and resources used by @obj and is the invers of + * drm_gem_object_init(). + */ +void +drm_gem_object_release(struct drm_gem_object *obj) +{ + WARN_ON(obj->dma_buf); + + if (obj->filp) + fput(obj->filp); + + reservation_object_fini(&obj->_resv); + drm_gem_free_mmap_offset(obj); +} +EXPORT_SYMBOL(drm_gem_object_release); + +/** + * drm_gem_object_free - free a GEM object + * @kref: kref of the object to free + * + * Called after the last reference to the object has been lost. + * Must be called holding &drm_device.struct_mutex. + * + * Frees the object + */ +void +drm_gem_object_free(struct kref *kref) +{ + struct drm_gem_object *obj = + container_of(kref, struct drm_gem_object, refcount); + struct drm_device *dev = obj->dev; + + if (obj->funcs) { + obj->funcs->free(obj); + } else if (dev->driver->gem_free_object_unlocked) { + dev->driver->gem_free_object_unlocked(obj); + } else if (dev->driver->gem_free_object) { + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + + dev->driver->gem_free_object(obj); + } +} +EXPORT_SYMBOL(drm_gem_object_free); + +/** + * drm_gem_object_put_unlocked - drop a GEM buffer object reference + * @obj: GEM buffer object + * + * This releases a reference to @obj. Callers must not hold the + * &drm_device.struct_mutex lock when calling this function. + * + * See also __drm_gem_object_put(). + */ +void +drm_gem_object_put_unlocked(struct drm_gem_object *obj) +{ + struct drm_device *dev; + + if (!obj) + return; + + dev = obj->dev; + + if (dev->driver->gem_free_object) { + might_lock(&dev->struct_mutex); + if (kref_put_mutex(&obj->refcount, drm_gem_object_free, + &dev->struct_mutex)) + mutex_unlock(&dev->struct_mutex); + } else { + kref_put(&obj->refcount, drm_gem_object_free); + } +} +EXPORT_SYMBOL(drm_gem_object_put_unlocked); + +/** + * drm_gem_object_put - release a GEM buffer object reference + * @obj: GEM buffer object + * + * This releases a reference to @obj. Callers must hold the + * &drm_device.struct_mutex lock when calling this function, even when the + * driver doesn't use &drm_device.struct_mutex for anything. + * + * For drivers not encumbered with legacy locking use + * drm_gem_object_put_unlocked() instead. + */ +void +drm_gem_object_put(struct drm_gem_object *obj) +{ + if (obj) { + WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); + + kref_put(&obj->refcount, drm_gem_object_free); + } +} +EXPORT_SYMBOL(drm_gem_object_put); + +/** + * drm_gem_vm_open - vma->ops->open implementation for GEM + * @vma: VM area structure + * + * This function implements the #vm_operations_struct open() callback for GEM + * drivers. This must be used together with drm_gem_vm_close(). + */ +void drm_gem_vm_open(struct vm_area_struct *vma) +{ + struct drm_gem_object *obj = vma->vm_private_data; + + drm_gem_object_get(obj); +} +EXPORT_SYMBOL(drm_gem_vm_open); + +/** + * drm_gem_vm_close - vma->ops->close implementation for GEM + * @vma: VM area structure + * + * This function implements the #vm_operations_struct close() callback for GEM + * drivers. This must be used together with drm_gem_vm_open(). + */ +void drm_gem_vm_close(struct vm_area_struct *vma) +{ + struct drm_gem_object *obj = vma->vm_private_data; + + drm_gem_object_put_unlocked(obj); +} +EXPORT_SYMBOL(drm_gem_vm_close); + +/** + * drm_gem_mmap_obj - memory map a GEM object + * @obj: the GEM object to map + * @obj_size: the object size to be mapped, in bytes + * @vma: VMA for the area to be mapped + * + * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops + * provided by the driver. Depending on their requirements, drivers can either + * provide a fault handler in their gem_vm_ops (in which case any accesses to + * the object will be trapped, to perform migration, GTT binding, surface + * register allocation, or performance monitoring), or mmap the buffer memory + * synchronously after calling drm_gem_mmap_obj. + * + * This function is mainly intended to implement the DMABUF mmap operation, when + * the GEM object is not looked up based on its fake offset. To implement the + * DRM mmap operation, drivers should use the drm_gem_mmap() function. + * + * drm_gem_mmap_obj() assumes the user is granted access to the buffer while + * drm_gem_mmap() prevents unprivileged users from mapping random objects. So + * callers must verify access restrictions before calling this helper. + * + * Return 0 or success or -EINVAL if the object size is smaller than the VMA + * size, or if no gem_vm_ops are provided. + */ +int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, + struct vm_area_struct *vma) +{ + struct drm_device *dev = obj->dev; + + /* Check for valid size. */ + if (obj_size < vma->vm_end - vma->vm_start) + return -EINVAL; + + if (obj->funcs && obj->funcs->vm_ops) + vma->vm_ops = obj->funcs->vm_ops; + else if (dev->driver->gem_vm_ops) + vma->vm_ops = dev->driver->gem_vm_ops; + else + return -EINVAL; + + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_private_data = obj; + vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + + /* Take a ref for this mapping of the object, so that the fault + * handler can dereference the mmap offset's pointer to the object. + * This reference is cleaned up by the corresponding vm_close + * (which should happen whether the vma was created by this call, or + * by a vm_open due to mremap or partial unmap or whatever). + */ + drm_gem_object_get(obj); + + return 0; +} +EXPORT_SYMBOL(drm_gem_mmap_obj); + +/** + * drm_gem_mmap - memory map routine for GEM objects + * @filp: DRM file pointer + * @vma: VMA for the area to be mapped + * + * If a driver supports GEM object mapping, mmap calls on the DRM file + * descriptor will end up here. + * + * Look up the GEM object based on the offset passed in (vma->vm_pgoff will + * contain the fake offset we created when the GTT map ioctl was called on + * the object) and map it with a call to drm_gem_mmap_obj(). + * + * If the caller is not granted access to the buffer object, the mmap will fail + * with EACCES. Please see the vma manager for more information. + */ +int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->minor->dev; + struct drm_gem_object *obj = NULL; + struct drm_vma_offset_node *node; + int ret; + + if (drm_dev_is_unplugged(dev)) + return -ENODEV; + + drm_vma_offset_lock_lookup(dev->vma_offset_manager); + node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, + vma->vm_pgoff, + vma_pages(vma)); + if (likely(node)) { + obj = container_of(node, struct drm_gem_object, vma_node); + /* + * When the object is being freed, after it hits 0-refcnt it + * proceeds to tear down the object. In the process it will + * attempt to remove the VMA offset and so acquire this + * mgr->vm_lock. Therefore if we find an object with a 0-refcnt + * that matches our range, we know it is in the process of being + * destroyed and will be freed as soon as we release the lock - + * so we have to check for the 0-refcnted object and treat it as + * invalid. + */ + if (!kref_get_unless_zero(&obj->refcount)) + obj = NULL; + } + drm_vma_offset_unlock_lookup(dev->vma_offset_manager); + + if (!obj) + return -EINVAL; + + if (!drm_vma_node_is_allowed(node, priv)) { + drm_gem_object_put_unlocked(obj); + return -EACCES; + } + + if (node->readonly) { + if (vma->vm_flags & VM_WRITE) { + drm_gem_object_put_unlocked(obj); + return -EINVAL; + } + + vma->vm_flags &= ~VM_MAYWRITE; + } + + ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, + vma); + + drm_gem_object_put_unlocked(obj); + + return ret; +} +EXPORT_SYMBOL(drm_gem_mmap); + +void drm_gem_print_info(struct drm_printer *p, unsigned int indent, + const struct drm_gem_object *obj) +{ + drm_printf_indent(p, indent, "name=%d\n", obj->name); + drm_printf_indent(p, indent, "refcount=%u\n", + kref_read(&obj->refcount)); + drm_printf_indent(p, indent, "start=%08lx\n", + drm_vma_node_start(&obj->vma_node)); + drm_printf_indent(p, indent, "size=%zu\n", obj->size); + drm_printf_indent(p, indent, "imported=%s\n", + obj->import_attach ? "yes" : "no"); + + if (obj->funcs && obj->funcs->print_info) + obj->funcs->print_info(p, indent, obj); + else if (obj->dev->driver->gem_print_info) + obj->dev->driver->gem_print_info(p, indent, obj); +} + +int drm_gem_pin(struct drm_gem_object *obj) +{ + if (obj->funcs && obj->funcs->pin) + return obj->funcs->pin(obj); + else if (obj->dev->driver->gem_prime_pin) + return obj->dev->driver->gem_prime_pin(obj); + else + return 0; +} + +void drm_gem_unpin(struct drm_gem_object *obj) +{ + if (obj->funcs && obj->funcs->unpin) + obj->funcs->unpin(obj); + else if (obj->dev->driver->gem_prime_unpin) + obj->dev->driver->gem_prime_unpin(obj); +} + +void *drm_gem_vmap(struct drm_gem_object *obj) +{ + void *vaddr; + + if (obj->funcs && obj->funcs->vmap) + vaddr = obj->funcs->vmap(obj); + else if (obj->dev->driver->gem_prime_vmap) + vaddr = obj->dev->driver->gem_prime_vmap(obj); + else + vaddr = ERR_PTR(-EOPNOTSUPP); + + if (!vaddr) + vaddr = ERR_PTR(-ENOMEM); + + return vaddr; +} + +void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr) +{ + if (!vaddr) + return; + + if (obj->funcs && obj->funcs->vunmap) + obj->funcs->vunmap(obj, vaddr); + else if (obj->dev->driver->gem_prime_vunmap) + obj->dev->driver->gem_prime_vunmap(obj, vaddr); +} + +/** + * drm_gem_lock_reservations - Sets up the ww context and acquires + * the lock on an array of GEM objects. + * + * Once you've locked your reservations, you'll want to set up space + * for your shared fences (if applicable), submit your job, then + * drm_gem_unlock_reservations(). + * + * @objs: drm_gem_objects to lock + * @count: Number of objects in @objs + * @acquire_ctx: struct ww_acquire_ctx that will be initialized as + * part of tracking this set of locked reservations. + */ +int +drm_gem_lock_reservations(struct drm_gem_object **objs, int count, + struct ww_acquire_ctx *acquire_ctx) +{ + int contended = -1; + int i, ret; + + ww_acquire_init(acquire_ctx, &reservation_ww_class); + +retry: + if (contended != -1) { + struct drm_gem_object *obj = objs[contended]; + + ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock, + acquire_ctx); + if (ret) { + ww_acquire_done(acquire_ctx); + return ret; + } + } + + for (i = 0; i < count; i++) { + if (i == contended) + continue; + + ret = ww_mutex_lock_interruptible(&objs[i]->resv->lock, + acquire_ctx); + if (ret) { + int j; + + for (j = 0; j < i; j++) + ww_mutex_unlock(&objs[j]->resv->lock); + + if (contended != -1 && contended >= i) + ww_mutex_unlock(&objs[contended]->resv->lock); + + if (ret == -EDEADLK) { + contended = i; + goto retry; + } + + ww_acquire_done(acquire_ctx); + return ret; + } + } + + ww_acquire_done(acquire_ctx); + + return 0; +} +EXPORT_SYMBOL(drm_gem_lock_reservations); + +void +drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, + struct ww_acquire_ctx *acquire_ctx) +{ + int i; + + for (i = 0; i < count; i++) + ww_mutex_unlock(&objs[i]->resv->lock); + + ww_acquire_fini(acquire_ctx); +} +EXPORT_SYMBOL(drm_gem_unlock_reservations); Index: sys/dev/drm/core/drm_hashtab.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_hashtab.c @@ -0,0 +1,211 @@ +/************************************************************************** + * + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * + **************************************************************************/ +/* + * Simple open hash tab implementation. + * + * Authors: + * Thomas Hellström + */ + +#include +#include +#include +#ifndef __linux__ +#include +#endif +#include +#include + +#include +#include + +int drm_ht_create(struct drm_open_hash *ht, unsigned int order) +{ + unsigned int size = 1 << order; + + ht->order = order; + ht->table = NULL; + if (size <= PAGE_SIZE / sizeof(*ht->table)) + ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL); + else + ht->table = vzalloc(array_size(size, sizeof(*ht->table))); + if (!ht->table) { + DRM_ERROR("Out of memory for hash table\n"); + return -ENOMEM; + } + return 0; +} +EXPORT_SYMBOL(drm_ht_create); + +void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key) +{ + struct drm_hash_item *entry; + struct hlist_head *h_list; + unsigned int hashed_key; + int count = 0; + + hashed_key = hash_long(key, ht->order); + DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key); + h_list = &ht->table[hashed_key]; + hlist_for_each_entry(entry, h_list, head) + DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key); +} + +static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht, + unsigned long key) +{ + struct drm_hash_item *entry; + struct hlist_head *h_list; + unsigned int hashed_key; + + hashed_key = hash_long(key, ht->order); + h_list = &ht->table[hashed_key]; + hlist_for_each_entry(entry, h_list, head) { + if (entry->key == key) + return &entry->head; + if (entry->key > key) + break; + } + return NULL; +} + +static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht, + unsigned long key) +{ + struct drm_hash_item *entry; + struct hlist_head *h_list; + unsigned int hashed_key; + + hashed_key = hash_long(key, ht->order); + h_list = &ht->table[hashed_key]; + hlist_for_each_entry_rcu(entry, h_list, head) { + if (entry->key == key) + return &entry->head; + if (entry->key > key) + break; + } + return NULL; +} + +int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item) +{ + struct drm_hash_item *entry; + struct hlist_head *h_list; + struct hlist_node *parent; + unsigned int hashed_key; + unsigned long key = item->key; + + hashed_key = hash_long(key, ht->order); + h_list = &ht->table[hashed_key]; + parent = NULL; + hlist_for_each_entry(entry, h_list, head) { + if (entry->key == key) + return -EINVAL; + if (entry->key > key) + break; + parent = &entry->head; + } + if (parent) { + hlist_add_behind_rcu(&item->head, parent); + } else { + hlist_add_head_rcu(&item->head, h_list); + } + return 0; +} +EXPORT_SYMBOL(drm_ht_insert_item); + +/* + * Just insert an item and return any "bits" bit key that hasn't been + * used before. + */ +int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, + unsigned long seed, int bits, int shift, + unsigned long add) +{ + int ret; + unsigned long mask = (1UL << bits) - 1; + unsigned long first, unshifted_key; + + unshifted_key = hash_long(seed, bits); + first = unshifted_key; + do { + item->key = (unshifted_key << shift) + add; + ret = drm_ht_insert_item(ht, item); + if (ret) + unshifted_key = (unshifted_key + 1) & mask; + } while(ret && (unshifted_key != first)); + + if (ret) { + DRM_ERROR("Available key bit space exhausted\n"); + return -EINVAL; + } + return 0; +} +EXPORT_SYMBOL(drm_ht_just_insert_please); + +int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, + struct drm_hash_item **item) +{ + struct hlist_node *list; + + list = drm_ht_find_key_rcu(ht, key); + if (!list) + return -EINVAL; + + *item = hlist_entry(list, struct drm_hash_item, head); + return 0; +} +EXPORT_SYMBOL(drm_ht_find_item); + +int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key) +{ + struct hlist_node *list; + + list = drm_ht_find_key(ht, key); + if (list) { + hlist_del_init_rcu(list); + return 0; + } + return -EINVAL; +} + +int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item) +{ + hlist_del_init_rcu(&item->head); + return 0; +} +EXPORT_SYMBOL(drm_ht_remove_item); + +void drm_ht_remove(struct drm_open_hash *ht) +{ + if (ht->table) { + kvfree(ht->table); + ht->table = NULL; + } +} +EXPORT_SYMBOL(drm_ht_remove); Index: sys/dev/drm/core/drm_internal.h =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_internal.h @@ -0,0 +1,215 @@ +/* + * Copyright © 2014 Intel Corporation + * Daniel Vetter + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include + +#define DRM_IF_MAJOR 1 +#define DRM_IF_MINOR 4 + +#define DRM_IF_VERSION(maj, min) (maj << 16 | min) + +struct dentry; +struct dma_buf; +struct drm_connector; +struct drm_crtc; +struct drm_framebuffer; +struct drm_gem_object; +struct drm_master; +struct drm_minor; +struct drm_prime_file_private; +struct drm_printer; + +/* drm_file.c */ +extern struct mutex drm_global_mutex; +struct drm_file *drm_file_alloc(struct drm_minor *minor); +void drm_file_free(struct drm_file *file); +void drm_lastclose(struct drm_device *dev); + +/* drm_pci.c */ +int drm_irq_by_busid(struct drm_device *dev, void *data, + struct drm_file *file_priv); +void drm_pci_agp_destroy(struct drm_device *dev); +int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master); + +/* drm_prime.c */ +int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv); +void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv); +void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, + struct dma_buf *dma_buf); + +/* drm_drv.c */ +struct drm_minor *drm_minor_acquire(unsigned int minor_id); +void drm_minor_release(struct drm_minor *minor); + +/* drm_vblank.c */ +void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe); +void drm_vblank_cleanup(struct drm_device *dev); + +/* IOCTLS */ +int drm_wait_vblank_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); +int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +/* drm_irq.c */ + +/* IOCTLS */ +#if IS_ENABLED(CONFIG_DRM_LEGACY) +int drm_legacy_irq_control(struct drm_device *dev, void *data, + struct drm_file *file_priv); +#endif + +int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); + +int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); + +/* drm_auth.c */ +int drm_getmagic(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_authmagic(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_setmaster_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_dropmaster_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_master_open(struct drm_file *file_priv); +void drm_master_release(struct drm_file *file_priv); +bool drm_master_internal_acquire(struct drm_device *dev); +void drm_master_internal_release(struct drm_device *dev); + +/* drm_sysfs.c */ +extern struct class *drm_class; + +int drm_sysfs_init(void); +void drm_sysfs_destroy(void); +#ifdef __linux__ +struct device *drm_sysfs_minor_alloc(struct drm_minor *minor); +#elif defined(__FreeBSD__) +struct cdev *drm_sysfs_minor_alloc(struct drm_minor *minor); +#endif +int drm_sysfs_connector_add(struct drm_connector *connector); +void drm_sysfs_connector_remove(struct drm_connector *connector); + +void drm_sysfs_lease_event(struct drm_device *dev); + +/* drm_gem.c */ +struct drm_gem_object; +int drm_gem_init(struct drm_device *dev); +void drm_gem_destroy(struct drm_device *dev); +int drm_gem_handle_create_tail(struct drm_file *file_priv, + struct drm_gem_object *obj, + u32 *handlep); +int drm_gem_close_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_gem_flink_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_gem_open_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +void drm_gem_open(struct drm_device *dev, struct drm_file *file_private); +void drm_gem_release(struct drm_device *dev, struct drm_file *file_private); +void drm_gem_print_info(struct drm_printer *p, unsigned int indent, + const struct drm_gem_object *obj); + +int drm_gem_pin(struct drm_gem_object *obj); +void drm_gem_unpin(struct drm_gem_object *obj); +void *drm_gem_vmap(struct drm_gem_object *obj); +void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr); + +/* drm_debugfs.c drm_debugfs_crc.c */ +#if defined(CONFIG_DEBUG_FS) +int drm_debugfs_init(struct drm_minor *minor, int minor_id, + struct dentry *root); +void drm_debugfs_cleanup(struct drm_minor *minor); +void drm_debugfs_connector_add(struct drm_connector *connector); +void drm_debugfs_connector_remove(struct drm_connector *connector); +void drm_debugfs_crtc_add(struct drm_crtc *crtc); +void drm_debugfs_crtc_remove(struct drm_crtc *crtc); +void drm_debugfs_crtc_crc_add(struct drm_crtc *crtc); +#else +static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id, + struct dentry *root) +{ + return 0; +} + +static inline void drm_debugfs_cleanup(struct drm_minor *minor) +{ +} + +static inline void drm_debugfs_connector_add(struct drm_connector *connector) +{ +} +static inline void drm_debugfs_connector_remove(struct drm_connector *connector) +{ +} + +static inline void drm_debugfs_crtc_add(struct drm_crtc *crtc) +{ +} +static inline void drm_debugfs_crtc_remove(struct drm_crtc *crtc) +{ +} + +static inline void drm_debugfs_crtc_crc_add(struct drm_crtc *crtc) +{ +} + +#endif + +drm_ioctl_t drm_version; +drm_ioctl_t drm_getunique; +drm_ioctl_t drm_getclient; + +/* drm_syncobj.c */ +void drm_syncobj_open(struct drm_file *file_private); +void drm_syncobj_release(struct drm_file *file_private); +int drm_syncobj_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private); +int drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private); +int drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private); +int drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private); +int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private); +int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private); +int drm_syncobj_signal_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private); + +/* drm_framebuffer.c */ +void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent, + const struct drm_framebuffer *fb); +int drm_framebuffer_debugfs_init(struct drm_minor *minor); + +/* drm_hdcp.c */ +int drm_setup_hdcp_srm(struct class *drm_class); +void drm_teardown_hdcp_srm(struct class *drm_class); Index: sys/dev/drm/core/drm_ioc32.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_ioc32.c @@ -0,0 +1,1013 @@ +/* + * \file drm_ioc32.c + * + * 32-bit ioctl compatibility routines for the DRM. + * + * \author Paul Mackerras + * + * Copyright (C) Paul Mackerras 2005. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +/* FBSD: Linux header polution, we need wait.h and fs.h */ +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include "drm_crtc_internal.h" +#include "drm_internal.h" +#include "drm_legacy.h" + +#ifdef __FreeBSD__ +#define compat_ptr(x) ((void *)(unsigned long)x) +#define ptr_to_compat(x) ((unsigned long)x) +#endif + +#define DRM_IOCTL_VERSION32 DRM_IOWR(0x00, drm_version32_t) +#define DRM_IOCTL_GET_UNIQUE32 DRM_IOWR(0x01, drm_unique32_t) +#define DRM_IOCTL_GET_MAP32 DRM_IOWR(0x04, drm_map32_t) +#define DRM_IOCTL_GET_CLIENT32 DRM_IOWR(0x05, drm_client32_t) +#define DRM_IOCTL_GET_STATS32 DRM_IOR( 0x06, drm_stats32_t) + +#define DRM_IOCTL_SET_UNIQUE32 DRM_IOW( 0x10, drm_unique32_t) +#define DRM_IOCTL_ADD_MAP32 DRM_IOWR(0x15, drm_map32_t) +#define DRM_IOCTL_ADD_BUFS32 DRM_IOWR(0x16, drm_buf_desc32_t) +#define DRM_IOCTL_MARK_BUFS32 DRM_IOW( 0x17, drm_buf_desc32_t) +#define DRM_IOCTL_INFO_BUFS32 DRM_IOWR(0x18, drm_buf_info32_t) +#define DRM_IOCTL_MAP_BUFS32 DRM_IOWR(0x19, drm_buf_map32_t) +#define DRM_IOCTL_FREE_BUFS32 DRM_IOW( 0x1a, drm_buf_free32_t) + +#define DRM_IOCTL_RM_MAP32 DRM_IOW( 0x1b, drm_map32_t) + +#define DRM_IOCTL_SET_SAREA_CTX32 DRM_IOW( 0x1c, drm_ctx_priv_map32_t) +#define DRM_IOCTL_GET_SAREA_CTX32 DRM_IOWR(0x1d, drm_ctx_priv_map32_t) + +#define DRM_IOCTL_RES_CTX32 DRM_IOWR(0x26, drm_ctx_res32_t) +#define DRM_IOCTL_DMA32 DRM_IOWR(0x29, drm_dma32_t) + +#define DRM_IOCTL_AGP_ENABLE32 DRM_IOW( 0x32, drm_agp_mode32_t) +#define DRM_IOCTL_AGP_INFO32 DRM_IOR( 0x33, drm_agp_info32_t) +#define DRM_IOCTL_AGP_ALLOC32 DRM_IOWR(0x34, drm_agp_buffer32_t) +#define DRM_IOCTL_AGP_FREE32 DRM_IOW( 0x35, drm_agp_buffer32_t) +#define DRM_IOCTL_AGP_BIND32 DRM_IOW( 0x36, drm_agp_binding32_t) +#define DRM_IOCTL_AGP_UNBIND32 DRM_IOW( 0x37, drm_agp_binding32_t) + +#define DRM_IOCTL_SG_ALLOC32 DRM_IOW( 0x38, drm_scatter_gather32_t) +#define DRM_IOCTL_SG_FREE32 DRM_IOW( 0x39, drm_scatter_gather32_t) + +#define DRM_IOCTL_UPDATE_DRAW32 DRM_IOW( 0x3f, drm_update_draw32_t) + +#define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t) + +#define DRM_IOCTL_MODE_ADDFB232 DRM_IOWR(0xb8, drm_mode_fb_cmd232_t) + +typedef struct drm_version_32 { + int version_major; /* Major version */ + int version_minor; /* Minor version */ + int version_patchlevel; /* Patch level */ + u32 name_len; /* Length of name buffer */ + u32 name; /* Name of driver */ + u32 date_len; /* Length of date buffer */ + u32 date; /* User-space buffer to hold date */ + u32 desc_len; /* Length of desc buffer */ + u32 desc; /* User-space buffer to hold desc */ +} drm_version32_t; + +static int compat_drm_version(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_version32_t v32; + struct drm_version v; + int err; + + if (copy_from_user(&v32, (void __user *)arg, sizeof(v32))) + return -EFAULT; + + v = (struct drm_version) { + .name_len = v32.name_len, + .name = compat_ptr(v32.name), + .date_len = v32.date_len, + .date = compat_ptr(v32.date), + .desc_len = v32.desc_len, + .desc = compat_ptr(v32.desc), + }; + err = drm_ioctl_kernel(file, drm_version, &v, + DRM_UNLOCKED|DRM_RENDER_ALLOW); + if (err) + return err; + + v32.version_major = v.version_major; + v32.version_minor = v.version_minor; + v32.version_patchlevel = v.version_patchlevel; + v32.name_len = v.name_len; + v32.date_len = v.date_len; + v32.desc_len = v.desc_len; + if (copy_to_user((void __user *)arg, &v32, sizeof(v32))) + return -EFAULT; + return 0; +} + +typedef struct drm_unique32 { + u32 unique_len; /* Length of unique */ + u32 unique; /* Unique name for driver instantiation */ +} drm_unique32_t; + +static int compat_drm_getunique(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_unique32_t uq32; + struct drm_unique uq; + int err; + + if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32))) + return -EFAULT; + uq = (struct drm_unique){ + .unique_len = uq32.unique_len, + .unique = compat_ptr(uq32.unique), + }; + + err = drm_ioctl_kernel(file, drm_getunique, &uq, DRM_UNLOCKED); + if (err) + return err; + + uq32.unique_len = uq.unique_len; + if (copy_to_user((void __user *)arg, &uq32, sizeof(uq32))) + return -EFAULT; + return 0; +} + +static int compat_drm_setunique(struct file *file, unsigned int cmd, + unsigned long arg) +{ + /* it's dead */ + return -EINVAL; +} + +#if IS_ENABLED(CONFIG_DRM_LEGACY) +typedef struct drm_map32 { + u32 offset; /* Requested physical address (0 for SAREA) */ + u32 size; /* Requested physical size (bytes) */ + enum drm_map_type type; /* Type of memory to map */ + enum drm_map_flags flags; /* Flags */ + u32 handle; /* User-space: "Handle" to pass to mmap() */ + int mtrr; /* MTRR slot used */ +} drm_map32_t; + +static int compat_drm_getmap(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_map32_t __user *argp = (void __user *)arg; + drm_map32_t m32; + struct drm_map map; + int err; + + if (copy_from_user(&m32, argp, sizeof(m32))) + return -EFAULT; + + map.offset = m32.offset; + err = drm_ioctl_kernel(file, drm_legacy_getmap_ioctl, &map, DRM_UNLOCKED); + if (err) + return err; + + m32.offset = map.offset; + m32.size = map.size; + m32.type = map.type; + m32.flags = map.flags; + m32.handle = ptr_to_compat((void __user *)map.handle); + m32.mtrr = map.mtrr; + if (copy_to_user(argp, &m32, sizeof(m32))) + return -EFAULT; + return 0; + +} + +static int compat_drm_addmap(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_map32_t __user *argp = (void __user *)arg; + drm_map32_t m32; + struct drm_map map; + int err; + + if (copy_from_user(&m32, argp, sizeof(m32))) + return -EFAULT; + + map.offset = m32.offset; + map.size = m32.size; + map.type = m32.type; + map.flags = m32.flags; + + err = drm_ioctl_kernel(file, drm_legacy_addmap_ioctl, &map, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); + if (err) + return err; + + m32.offset = map.offset; + m32.mtrr = map.mtrr; + m32.handle = ptr_to_compat((void __user *)map.handle); + if (map.handle != compat_ptr(m32.handle)) + pr_err_ratelimited("compat_drm_addmap truncated handle %p for type %d offset %x\n", + map.handle, m32.type, m32.offset); + + if (copy_to_user(argp, &m32, sizeof(m32))) + return -EFAULT; + + return 0; +} + +static int compat_drm_rmmap(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_map32_t __user *argp = (void __user *)arg; + struct drm_map map; + u32 handle; + + if (get_user(handle, &argp->handle)) + return -EFAULT; + map.handle = compat_ptr(handle); + return drm_ioctl_kernel(file, drm_legacy_rmmap_ioctl, &map, DRM_AUTH); +} +#endif + +typedef struct drm_client32 { + int idx; /* Which client desired? */ + int auth; /* Is client authenticated? */ + u32 pid; /* Process ID */ + u32 uid; /* User ID */ + u32 magic; /* Magic */ + u32 iocs; /* Ioctl count */ +} drm_client32_t; + +static int compat_drm_getclient(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_client32_t c32; + drm_client32_t __user *argp = (void __user *)arg; + struct drm_client client; + int err; + + if (copy_from_user(&c32, argp, sizeof(c32))) + return -EFAULT; + + client.idx = c32.idx; + + err = drm_ioctl_kernel(file, drm_getclient, &client, DRM_UNLOCKED); + if (err) + return err; + + c32.idx = client.idx; + c32.auth = client.auth; + c32.pid = client.pid; + c32.uid = client.uid; + c32.magic = client.magic; + c32.iocs = client.iocs; + + if (copy_to_user(argp, &c32, sizeof(c32))) + return -EFAULT; + return 0; +} + +typedef struct drm_stats32 { + u32 count; + struct { + u32 value; + enum drm_stat_type type; + } data[15]; +} drm_stats32_t; + +static int compat_drm_getstats(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_stats32_t __user *argp = (void __user *)arg; + int err; + + err = drm_ioctl_kernel(file, drm_noop, NULL, DRM_UNLOCKED); + if (err) + return err; + + if (clear_user(argp, sizeof(drm_stats32_t))) + return -EFAULT; + return 0; +} + +#if IS_ENABLED(CONFIG_DRM_LEGACY) +typedef struct drm_buf_desc32 { + int count; /* Number of buffers of this size */ + int size; /* Size in bytes */ + int low_mark; /* Low water mark */ + int high_mark; /* High water mark */ + int flags; + u32 agp_start; /* Start address in the AGP aperture */ +} drm_buf_desc32_t; + +static int compat_drm_addbufs(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_buf_desc32_t __user *argp = (void __user *)arg; + drm_buf_desc32_t desc32; + struct drm_buf_desc desc; + int err; + + if (copy_from_user(&desc32, argp, sizeof(drm_buf_desc32_t))) + return -EFAULT; + + desc = (struct drm_buf_desc){ + desc32.count, desc32.size, desc32.low_mark, desc32.high_mark, + desc32.flags, desc32.agp_start + }; + + err = drm_ioctl_kernel(file, drm_legacy_addbufs, &desc, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); + if (err) + return err; + + desc32 = (drm_buf_desc32_t){ + desc.count, desc.size, desc.low_mark, desc.high_mark, + desc.flags, desc.agp_start + }; + if (copy_to_user(argp, &desc32, sizeof(drm_buf_desc32_t))) + return -EFAULT; + + return 0; +} + +static int compat_drm_markbufs(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_buf_desc32_t b32; + drm_buf_desc32_t __user *argp = (void __user *)arg; + struct drm_buf_desc buf; + + if (copy_from_user(&b32, argp, sizeof(b32))) + return -EFAULT; + + buf.size = b32.size; + buf.low_mark = b32.low_mark; + buf.high_mark = b32.high_mark; + + return drm_ioctl_kernel(file, drm_legacy_markbufs, &buf, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); +} + +typedef struct drm_buf_info32 { + int count; /**< Entries in list */ + u32 list; +} drm_buf_info32_t; + +static int copy_one_buf32(void *data, int count, struct drm_buf_entry *from) +{ + drm_buf_info32_t *request = data; + drm_buf_desc32_t __user *to = compat_ptr(request->list); + drm_buf_desc32_t v = {.count = from->buf_count, + .size = from->buf_size, + .low_mark = from->low_mark, + .high_mark = from->high_mark}; + + if (copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags))) + return -EFAULT; + return 0; +} + +static int drm_legacy_infobufs32(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + drm_buf_info32_t *request = data; + return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf32); +} + +static int compat_drm_infobufs(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_buf_info32_t req32; + drm_buf_info32_t __user *argp = (void __user *)arg; + int err; + + if (copy_from_user(&req32, argp, sizeof(req32))) + return -EFAULT; + + if (req32.count < 0) + req32.count = 0; + + err = drm_ioctl_kernel(file, drm_legacy_infobufs32, &req32, DRM_AUTH); + if (err) + return err; + + if (put_user(req32.count, &argp->count)) + return -EFAULT; + + return 0; +} + +typedef struct drm_buf_pub32 { + int idx; /**< Index into the master buffer list */ + int total; /**< Buffer size */ + int used; /**< Amount of buffer in use (for DMA) */ + u32 address; /**< Address of buffer */ +} drm_buf_pub32_t; + +typedef struct drm_buf_map32 { + int count; /**< Length of the buffer list */ + u32 virtual; /**< Mmap'd area in user-virtual */ + u32 list; /**< Buffer information */ +} drm_buf_map32_t; + +static int map_one_buf32(void *data, int idx, unsigned long virtual, + struct drm_buf *buf) +{ + drm_buf_map32_t *request = data; + drm_buf_pub32_t __user *to = compat_ptr(request->list) + idx; + drm_buf_pub32_t v; + + v.idx = buf->idx; + v.total = buf->total; + v.used = 0; + v.address = virtual + buf->offset; + if (copy_to_user(to, &v, sizeof(v))) + return -EFAULT; + return 0; +} + +static int drm_legacy_mapbufs32(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + drm_buf_map32_t *request = data; + void __user *v; + int err = __drm_legacy_mapbufs(dev, data, &request->count, + &v, map_one_buf32, + file_priv); + request->virtual = ptr_to_compat(v); + return err; +} + +static int compat_drm_mapbufs(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_buf_map32_t __user *argp = (void __user *)arg; + drm_buf_map32_t req32; + int err; + + if (copy_from_user(&req32, argp, sizeof(req32))) + return -EFAULT; + if (req32.count < 0) + return -EINVAL; + + err = drm_ioctl_kernel(file, drm_legacy_mapbufs32, &req32, DRM_AUTH); + if (err) + return err; + + if (put_user(req32.count, &argp->count) + || put_user(req32.virtual, &argp->virtual)) + return -EFAULT; + + return 0; +} + +typedef struct drm_buf_free32 { + int count; + u32 list; +} drm_buf_free32_t; + +static int compat_drm_freebufs(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_buf_free32_t req32; + struct drm_buf_free request; + drm_buf_free32_t __user *argp = (void __user *)arg; + + if (copy_from_user(&req32, argp, sizeof(req32))) + return -EFAULT; + + request.count = req32.count; + request.list = compat_ptr(req32.list); + return drm_ioctl_kernel(file, drm_legacy_freebufs, &request, DRM_AUTH); +} + +typedef struct drm_ctx_priv_map32 { + unsigned int ctx_id; /**< Context requesting private mapping */ + u32 handle; /**< Handle of map */ +} drm_ctx_priv_map32_t; + +static int compat_drm_setsareactx(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_ctx_priv_map32_t req32; + struct drm_ctx_priv_map request; + drm_ctx_priv_map32_t __user *argp = (void __user *)arg; + + if (copy_from_user(&req32, argp, sizeof(req32))) + return -EFAULT; + + request.ctx_id = req32.ctx_id; + request.handle = compat_ptr(req32.handle); + return drm_ioctl_kernel(file, drm_legacy_setsareactx, &request, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); +} + +static int compat_drm_getsareactx(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct drm_ctx_priv_map req; + drm_ctx_priv_map32_t req32; + drm_ctx_priv_map32_t __user *argp = (void __user *)arg; + int err; + + if (copy_from_user(&req32, argp, sizeof(req32))) + return -EFAULT; + + req.ctx_id = req32.ctx_id; + err = drm_ioctl_kernel(file, drm_legacy_getsareactx, &req, DRM_AUTH); + if (err) + return err; + + req32.handle = ptr_to_compat((void __user *)req.handle); + if (copy_to_user(argp, &req32, sizeof(req32))) + return -EFAULT; + + return 0; +} + +typedef struct drm_ctx_res32 { + int count; + u32 contexts; +} drm_ctx_res32_t; + +static int compat_drm_resctx(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_ctx_res32_t __user *argp = (void __user *)arg; + drm_ctx_res32_t res32; + struct drm_ctx_res res; + int err; + + if (copy_from_user(&res32, argp, sizeof(res32))) + return -EFAULT; + + res.count = res32.count; + res.contexts = compat_ptr(res32.contexts); + err = drm_ioctl_kernel(file, drm_legacy_resctx, &res, DRM_AUTH); + if (err) + return err; + + res32.count = res.count; + if (copy_to_user(argp, &res32, sizeof(res32))) + return -EFAULT; + + return 0; +} + +typedef struct drm_dma32 { + int context; /**< Context handle */ + int send_count; /**< Number of buffers to send */ + u32 send_indices; /**< List of handles to buffers */ + u32 send_sizes; /**< Lengths of data to send */ + enum drm_dma_flags flags; /**< Flags */ + int request_count; /**< Number of buffers requested */ + int request_size; /**< Desired size for buffers */ + u32 request_indices; /**< Buffer information */ + u32 request_sizes; + int granted_count; /**< Number of buffers granted */ +} drm_dma32_t; + +static int compat_drm_dma(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_dma32_t d32; + drm_dma32_t __user *argp = (void __user *)arg; + struct drm_dma d; + int err; + + if (copy_from_user(&d32, argp, sizeof(d32))) + return -EFAULT; + + d.context = d32.context; + d.send_count = d32.send_count; + d.send_indices = compat_ptr(d32.send_indices); + d.send_sizes = compat_ptr(d32.send_sizes); + d.flags = d32.flags; + d.request_count = d32.request_count; + d.request_indices = compat_ptr(d32.request_indices); + d.request_sizes = compat_ptr(d32.request_sizes); + err = drm_ioctl_kernel(file, drm_legacy_dma_ioctl, &d, DRM_AUTH); + if (err) + return err; + + if (put_user(d.request_size, &argp->request_size) + || put_user(d.granted_count, &argp->granted_count)) + return -EFAULT; + + return 0; +} +#endif + +#if IS_ENABLED(CONFIG_AGP) +typedef struct drm_agp_mode32 { + u32 mode; /**< AGP mode */ +} drm_agp_mode32_t; + +static int compat_drm_agp_enable(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_agp_mode32_t __user *argp = (void __user *)arg; + struct drm_agp_mode mode; + + if (get_user(mode.mode, &argp->mode)) + return -EFAULT; + + return drm_ioctl_kernel(file, drm_agp_enable_ioctl, &mode, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); +} + +typedef struct drm_agp_info32 { + int agp_version_major; + int agp_version_minor; + u32 mode; + u32 aperture_base; /* physical address */ + u32 aperture_size; /* bytes */ + u32 memory_allowed; /* bytes */ + u32 memory_used; + + /* PCI information */ + unsigned short id_vendor; + unsigned short id_device; +} drm_agp_info32_t; + +static int compat_drm_agp_info(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_agp_info32_t __user *argp = (void __user *)arg; + drm_agp_info32_t i32; + struct drm_agp_info info; + int err; + + err = drm_ioctl_kernel(file, drm_agp_info_ioctl, &info, DRM_AUTH); + if (err) + return err; + + i32.agp_version_major = info.agp_version_major; + i32.agp_version_minor = info.agp_version_minor; + i32.mode = info.mode; + i32.aperture_base = info.aperture_base; + i32.aperture_size = info.aperture_size; + i32.memory_allowed = info.memory_allowed; + i32.memory_used = info.memory_used; + i32.id_vendor = info.id_vendor; + i32.id_device = info.id_device; + if (copy_to_user(argp, &i32, sizeof(i32))) + return -EFAULT; + + return 0; +} + +typedef struct drm_agp_buffer32 { + u32 size; /**< In bytes -- will round to page boundary */ + u32 handle; /**< Used for binding / unbinding */ + u32 type; /**< Type of memory to allocate */ + u32 physical; /**< Physical used by i810 */ +} drm_agp_buffer32_t; + +static int compat_drm_agp_alloc(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_agp_buffer32_t __user *argp = (void __user *)arg; + drm_agp_buffer32_t req32; + struct drm_agp_buffer request; + int err; + + if (copy_from_user(&req32, argp, sizeof(req32))) + return -EFAULT; + + request.size = req32.size; + request.type = req32.type; + err = drm_ioctl_kernel(file, drm_agp_alloc_ioctl, &request, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); + if (err) + return err; + + req32.handle = request.handle; + req32.physical = request.physical; + if (copy_to_user(argp, &req32, sizeof(req32))) { + drm_ioctl_kernel(file, drm_agp_free_ioctl, &request, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); + return -EFAULT; + } + + return 0; +} + +static int compat_drm_agp_free(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_agp_buffer32_t __user *argp = (void __user *)arg; + struct drm_agp_buffer request; + + if (get_user(request.handle, &argp->handle)) + return -EFAULT; + + return drm_ioctl_kernel(file, drm_agp_free_ioctl, &request, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); +} + +typedef struct drm_agp_binding32 { + u32 handle; /**< From drm_agp_buffer */ + u32 offset; /**< In bytes -- will round to page boundary */ +} drm_agp_binding32_t; + +static int compat_drm_agp_bind(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_agp_binding32_t __user *argp = (void __user *)arg; + drm_agp_binding32_t req32; + struct drm_agp_binding request; + + if (copy_from_user(&req32, argp, sizeof(req32))) + return -EFAULT; + + request.handle = req32.handle; + request.offset = req32.offset; + return drm_ioctl_kernel(file, drm_agp_bind_ioctl, &request, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); +} + +static int compat_drm_agp_unbind(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_agp_binding32_t __user *argp = (void __user *)arg; + struct drm_agp_binding request; + + if (get_user(request.handle, &argp->handle)) + return -EFAULT; + + return drm_ioctl_kernel(file, drm_agp_unbind_ioctl, &request, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); +} +#endif /* CONFIG_AGP */ + +#if IS_ENABLED(CONFIG_DRM_LEGACY) +typedef struct drm_scatter_gather32 { + u32 size; /**< In bytes -- will round to page boundary */ + u32 handle; /**< Used for mapping / unmapping */ +} drm_scatter_gather32_t; + +static int compat_drm_sg_alloc(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_scatter_gather32_t __user *argp = (void __user *)arg; + struct drm_scatter_gather request; + int err; + + if (get_user(request.size, &argp->size)) + return -EFAULT; + + err = drm_ioctl_kernel(file, drm_legacy_sg_alloc, &request, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); + if (err) + return err; + + /* XXX not sure about the handle conversion here... */ + if (put_user(request.handle >> PAGE_SHIFT, &argp->handle)) + return -EFAULT; + + return 0; +} + +static int compat_drm_sg_free(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_scatter_gather32_t __user *argp = (void __user *)arg; + struct drm_scatter_gather request; + unsigned long x; + + if (get_user(x, &argp->handle)) + return -EFAULT; + request.handle = x << PAGE_SHIFT; + return drm_ioctl_kernel(file, drm_legacy_sg_free, &request, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); +} +#endif +#if defined(CONFIG_X86) +typedef struct drm_update_draw32 { + drm_drawable_t handle; + unsigned int type; + unsigned int num; + /* 64-bit version has a 32-bit pad here */ + u64 data; /**< Pointer */ +} __attribute__((packed)) drm_update_draw32_t; + +static int compat_drm_update_draw(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_update_draw32_t update32; + if (copy_from_user(&update32, (void __user *)arg, sizeof(update32))) + return -EFAULT; + + return drm_ioctl_kernel(file, drm_noop, NULL, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); +} +#endif + +struct drm_wait_vblank_request32 { + enum drm_vblank_seq_type type; + unsigned int sequence; + u32 signal; +}; + +struct drm_wait_vblank_reply32 { + enum drm_vblank_seq_type type; + unsigned int sequence; + s32 tval_sec; + s32 tval_usec; +}; + +typedef union drm_wait_vblank32 { + struct drm_wait_vblank_request32 request; + struct drm_wait_vblank_reply32 reply; +} drm_wait_vblank32_t; + +static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_wait_vblank32_t __user *argp = (void __user *)arg; + drm_wait_vblank32_t req32; + union drm_wait_vblank req; + int err; + + if (copy_from_user(&req32, argp, sizeof(req32))) + return -EFAULT; + + req.request.type = req32.request.type; + req.request.sequence = req32.request.sequence; + req.request.signal = req32.request.signal; + err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED); + if (err) + return err; + + req32.reply.type = req.reply.type; + req32.reply.sequence = req.reply.sequence; + req32.reply.tval_sec = req.reply.tval_sec; + req32.reply.tval_usec = req.reply.tval_usec; + if (copy_to_user(argp, &req32, sizeof(req32))) + return -EFAULT; + + return 0; +} + +#if defined(CONFIG_X86) +typedef struct drm_mode_fb_cmd232 { + u32 fb_id; + u32 width; + u32 height; + u32 pixel_format; + u32 flags; + u32 handles[4]; + u32 pitches[4]; + u32 offsets[4]; + u64 modifier[4]; +} __attribute__((packed)) drm_mode_fb_cmd232_t; + +static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct drm_mode_fb_cmd232 __user *argp = (void __user *)arg; + struct drm_mode_fb_cmd2 req64; + int err; + + if (copy_from_user(&req64, argp, + offsetof(drm_mode_fb_cmd232_t, modifier))) + return -EFAULT; + + if (copy_from_user(&req64.modifier, &argp->modifier, + sizeof(req64.modifier))) + return -EFAULT; + + err = drm_ioctl_kernel(file, drm_mode_addfb2, &req64, + DRM_UNLOCKED); + if (err) + return err; + + if (put_user(req64.fb_id, &argp->fb_id)) + return -EFAULT; + + return 0; +} +#endif + +static struct { + drm_ioctl_compat_t *fn; + char *name; +} drm_compat_ioctls[] = { +#define DRM_IOCTL32_DEF(n, f) [DRM_IOCTL_NR(n##32)] = {.fn = f, .name = #n} + DRM_IOCTL32_DEF(DRM_IOCTL_VERSION, compat_drm_version), + DRM_IOCTL32_DEF(DRM_IOCTL_GET_UNIQUE, compat_drm_getunique), +#if IS_ENABLED(CONFIG_DRM_LEGACY) + DRM_IOCTL32_DEF(DRM_IOCTL_GET_MAP, compat_drm_getmap), +#endif + DRM_IOCTL32_DEF(DRM_IOCTL_GET_CLIENT, compat_drm_getclient), + DRM_IOCTL32_DEF(DRM_IOCTL_GET_STATS, compat_drm_getstats), + DRM_IOCTL32_DEF(DRM_IOCTL_SET_UNIQUE, compat_drm_setunique), +#if IS_ENABLED(CONFIG_DRM_LEGACY) + DRM_IOCTL32_DEF(DRM_IOCTL_ADD_MAP, compat_drm_addmap), + DRM_IOCTL32_DEF(DRM_IOCTL_ADD_BUFS, compat_drm_addbufs), + DRM_IOCTL32_DEF(DRM_IOCTL_MARK_BUFS, compat_drm_markbufs), + DRM_IOCTL32_DEF(DRM_IOCTL_INFO_BUFS, compat_drm_infobufs), + DRM_IOCTL32_DEF(DRM_IOCTL_MAP_BUFS, compat_drm_mapbufs), + DRM_IOCTL32_DEF(DRM_IOCTL_FREE_BUFS, compat_drm_freebufs), + DRM_IOCTL32_DEF(DRM_IOCTL_RM_MAP, compat_drm_rmmap), + DRM_IOCTL32_DEF(DRM_IOCTL_SET_SAREA_CTX, compat_drm_setsareactx), + DRM_IOCTL32_DEF(DRM_IOCTL_GET_SAREA_CTX, compat_drm_getsareactx), + DRM_IOCTL32_DEF(DRM_IOCTL_RES_CTX, compat_drm_resctx), + DRM_IOCTL32_DEF(DRM_IOCTL_DMA, compat_drm_dma), +#endif +#if IS_ENABLED(CONFIG_AGP) + DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ENABLE, compat_drm_agp_enable), + DRM_IOCTL32_DEF(DRM_IOCTL_AGP_INFO, compat_drm_agp_info), + DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ALLOC, compat_drm_agp_alloc), + DRM_IOCTL32_DEF(DRM_IOCTL_AGP_FREE, compat_drm_agp_free), + DRM_IOCTL32_DEF(DRM_IOCTL_AGP_BIND, compat_drm_agp_bind), + DRM_IOCTL32_DEF(DRM_IOCTL_AGP_UNBIND, compat_drm_agp_unbind), +#endif +#if IS_ENABLED(CONFIG_DRM_LEGACY) + DRM_IOCTL32_DEF(DRM_IOCTL_SG_ALLOC, compat_drm_sg_alloc), + DRM_IOCTL32_DEF(DRM_IOCTL_SG_FREE, compat_drm_sg_free), +#endif +#if defined(CONFIG_X86) || defined(CONFIG_IA64) + DRM_IOCTL32_DEF(DRM_IOCTL_UPDATE_DRAW, compat_drm_update_draw), +#endif + DRM_IOCTL32_DEF(DRM_IOCTL_WAIT_VBLANK, compat_drm_wait_vblank), +#if defined(CONFIG_X86) || defined(CONFIG_IA64) + DRM_IOCTL32_DEF(DRM_IOCTL_MODE_ADDFB2, compat_drm_mode_addfb2), +#endif +}; + +/** + * drm_compat_ioctl - 32bit IOCTL compatibility handler for DRM drivers + * @filp: file this ioctl is called on + * @cmd: ioctl cmd number + * @arg: user argument + * + * Compatibility handler for 32 bit userspace running on 64 kernels. All actual + * IOCTL handling is forwarded to drm_ioctl(), while marshalling structures as + * appropriate. Note that this only handles DRM core IOCTLs, if the driver has + * botched IOCTL itself, it must handle those by wrapping this function. + * + * Returns: + * Zero on success, negative error code on failure. + */ +long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + unsigned int nr = DRM_IOCTL_NR(cmd); +#ifdef __linux__ + struct drm_file *file_priv = filp->private_data; +#endif + drm_ioctl_compat_t *fn; + int ret; + + /* Assume that ioctls without an explicit compat routine will just + * work. This may not always be a good assumption, but it's better + * than always failing. + */ + if (nr >= ARRAY_SIZE(drm_compat_ioctls)) + return drm_ioctl(filp, cmd, arg); + + fn = drm_compat_ioctls[nr].fn; + if (!fn) + return drm_ioctl(filp, cmd, arg); + +#ifdef __linux__ + DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n", + task_pid_nr(current), + (long)old_encode_dev(file_priv->minor->kdev->devt), + file_priv->authenticated, + drm_compat_ioctls[nr].name); +#endif + ret = (*fn)(filp, cmd, arg); + if (ret) + DRM_DEBUG("ret = %d\n", ret); + return ret; +} +EXPORT_SYMBOL(drm_compat_ioctl); Index: sys/dev/drm/core/drm_ioctl.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_ioctl.c @@ -0,0 +1,959 @@ +/* + * Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Author Rickard E. (Rik) Faith + * Author Gareth Hughes + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/* FBSD: Linux header polution, we need fs.h and ioctl.h */ +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "drm_crtc_internal.h" +#include "drm_internal.h" +#include "drm_legacy.h" + +/** + * DOC: getunique and setversion story + * + * BEWARE THE DRAGONS! MIND THE TRAPDOORS! + * + * In an attempt to warn anyone else who's trying to figure out what's going + * on here, I'll try to summarize the story. First things first, let's clear up + * the names, because the kernel internals, libdrm and the ioctls are all named + * differently: + * + * - GET_UNIQUE ioctl, implemented by drm_getunique is wrapped up in libdrm + * through the drmGetBusid function. + * - The libdrm drmSetBusid function is backed by the SET_UNIQUE ioctl. All + * that code is nerved in the kernel with drm_invalid_op(). + * - The internal set_busid kernel functions and driver callbacks are + * exclusively use by the SET_VERSION ioctl, because only drm 1.0 (which is + * nerved) allowed userspace to set the busid through the above ioctl. + * - Other ioctls and functions involved are named consistently. + * + * For anyone wondering what's the difference between drm 1.1 and 1.4: Correctly + * handling pci domains in the busid on ppc. Doing this correctly was only + * implemented in libdrm in 2010, hence can't be nerved yet. No one knows what's + * special with drm 1.2 and 1.3. + * + * Now the actual horror story of how device lookup in drm works. At large, + * there's 2 different ways, either by busid, or by device driver name. + * + * Opening by busid is fairly simple: + * + * 1. First call SET_VERSION to make sure pci domains are handled properly. As a + * side-effect this fills out the unique name in the master structure. + * 2. Call GET_UNIQUE to read out the unique name from the master structure, + * which matches the busid thanks to step 1. If it doesn't, proceed to try + * the next device node. + * + * Opening by name is slightly different: + * + * 1. Directly call VERSION to get the version and to match against the driver + * name returned by that ioctl. Note that SET_VERSION is not called, which + * means the the unique name for the master node just opening is _not_ filled + * out. This despite that with current drm device nodes are always bound to + * one device, and can't be runtime assigned like with drm 1.0. + * 2. Match driver name. If it mismatches, proceed to the next device node. + * 3. Call GET_UNIQUE, and check whether the unique name has length zero (by + * checking that the first byte in the string is 0). If that's not the case + * libdrm skips and proceeds to the next device node. Probably this is just + * copypasta from drm 1.0 times where a set unique name meant that the driver + * was in use already, but that's just conjecture. + * + * Long story short: To keep the open by name logic working, GET_UNIQUE must + * _not_ return a unique string when SET_VERSION hasn't been called yet, + * otherwise libdrm breaks. Even when that unique string can't ever change, and + * is totally irrelevant for actually opening the device because runtime + * assignable device instances were only support in drm 1.0, which is long dead. + * But the libdrm code in drmOpenByName somehow survived, hence this can't be + * broken. + */ + +/* + * Get the bus id. + * + * \param inode device inode. + * \param file_priv DRM file private. + * \param cmd command. + * \param arg user argument, pointing to a drm_unique structure. + * \return zero on success or a negative number on failure. + * + * Copies the bus id from drm_device::unique into user space. + */ +int drm_getunique(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_unique *u = data; + struct drm_master *master = file_priv->master; + + mutex_lock(&master->dev->master_mutex); + if (u->unique_len >= master->unique_len) { + if (copy_to_user(u->unique, master->unique, master->unique_len)) { + mutex_unlock(&master->dev->master_mutex); + return -EFAULT; + } + } + u->unique_len = master->unique_len; + mutex_unlock(&master->dev->master_mutex); + + return 0; +} + +static void +drm_unset_busid(struct drm_device *dev, + struct drm_master *master) +{ + kfree(master->unique); + master->unique = NULL; + master->unique_len = 0; +} + +static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv) +{ + struct drm_master *master = file_priv->master; +#if defined(__linux__) || defined(CONFIG_PCI) + int ret; +#endif + + if (master->unique != NULL) + drm_unset_busid(dev, master); + +#if defined(__linux__) || defined(CONFIG_PCI) +#ifdef __linux__ + if (dev->dev && dev_is_pci(dev->dev)) { +#else + // BSDFIXME: Assume it's PCI for now + if (dev->dev) { +#endif + ret = drm_pci_set_busid(dev, master); + if (ret) { + drm_unset_busid(dev, master); + return ret; + } + } else { + WARN_ON(!dev->unique); + master->unique = kstrdup(dev->unique, GFP_KERNEL); + if (master->unique) + master->unique_len = strlen(dev->unique); + } +#else + WARN_ON(!dev->unique); + master->unique = kstrdup(dev->unique, GFP_KERNEL); + if (master->unique) + master->unique_len = strlen(dev->unique); +#endif + + return 0; +} + +/* + * Get client information. + * + * \param inode device inode. + * \param file_priv DRM file private. + * \param cmd command. + * \param arg user argument, pointing to a drm_client structure. + * + * \return zero on success or a negative number on failure. + * + * Searches for the client with the specified index and copies its information + * into userspace + */ +int drm_getclient(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_client *client = data; + + /* + * Hollowed-out getclient ioctl to keep some dead old drm tests/tools + * not breaking completely. Userspace tools stop enumerating one they + * get -EINVAL, hence this is the return value we need to hand back for + * no clients tracked. + * + * Unfortunately some clients (*cough* libva *cough*) use this in a fun + * attempt to figure out whether they're authenticated or not. Since + * that's the only thing they care about, give it to the directly + * instead of walking one giant list. + */ + if (client->idx == 0) { + client->auth = file_priv->authenticated; + client->pid = task_pid_vnr(current); +#ifdef __linux__ + client->uid = overflowuid; +#else + client->uid = 0; +#endif + client->magic = 0; + client->iocs = 0; + + return 0; + } else { + return -EINVAL; + } +} + +/* + * Get statistics information. + * + * \param inode device inode. + * \param file_priv DRM file private. + * \param cmd command. + * \param arg user argument, pointing to a drm_stats structure. + * + * \return zero on success or a negative number on failure. + */ +static int drm_getstats(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_stats *stats = data; + + /* Clear stats to prevent userspace from eating its stack garbage. */ + memset(stats, 0, sizeof(*stats)); + + return 0; +} + +/* + * Get device/driver capabilities + */ +static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct drm_get_cap *req = data; + struct drm_crtc *crtc; + + req->value = 0; + + /* Only some caps make sense with UMS/render-only drivers. */ + switch (req->capability) { + case DRM_CAP_TIMESTAMP_MONOTONIC: + req->value = 1; + return 0; + case DRM_CAP_PRIME: + req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0; + req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0; + return 0; + case DRM_CAP_SYNCOBJ: + req->value = drm_core_check_feature(dev, DRIVER_SYNCOBJ); + return 0; + } + + /* Other caps only work with KMS drivers */ + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EOPNOTSUPP; + + switch (req->capability) { + case DRM_CAP_DUMB_BUFFER: + if (dev->driver->dumb_create) + req->value = 1; + break; + case DRM_CAP_VBLANK_HIGH_CRTC: + req->value = 1; + break; + case DRM_CAP_DUMB_PREFERRED_DEPTH: + req->value = dev->mode_config.preferred_depth; + break; + case DRM_CAP_DUMB_PREFER_SHADOW: + req->value = dev->mode_config.prefer_shadow; + break; + case DRM_CAP_ASYNC_PAGE_FLIP: + req->value = dev->mode_config.async_page_flip; + break; + case DRM_CAP_PAGE_FLIP_TARGET: + req->value = 1; + drm_for_each_crtc(crtc, dev) { + if (!crtc->funcs->page_flip_target) + req->value = 0; + } + break; + case DRM_CAP_CURSOR_WIDTH: + if (dev->mode_config.cursor_width) + req->value = dev->mode_config.cursor_width; + else + req->value = 64; + break; + case DRM_CAP_CURSOR_HEIGHT: + if (dev->mode_config.cursor_height) + req->value = dev->mode_config.cursor_height; + else + req->value = 64; + break; + case DRM_CAP_ADDFB2_MODIFIERS: + req->value = dev->mode_config.allow_fb_modifiers; + break; + case DRM_CAP_CRTC_IN_VBLANK_EVENT: + req->value = 1; + break; + default: + return -EINVAL; + } + return 0; +} + +/* + * Set device/driver capabilities + */ +static int +drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct drm_set_client_cap *req = data; + + /* No render-only settable capabilities for now */ + + /* Below caps that only works with KMS drivers */ + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EOPNOTSUPP; + + switch (req->capability) { + case DRM_CLIENT_CAP_STEREO_3D: + if (req->value > 1) + return -EINVAL; + file_priv->stereo_allowed = req->value; + break; + case DRM_CLIENT_CAP_UNIVERSAL_PLANES: + if (req->value > 1) + return -EINVAL; + file_priv->universal_planes = req->value; + break; + case DRM_CLIENT_CAP_ATOMIC: + if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) + return -EOPNOTSUPP; + /* The modesetting DDX has a totally broken idea of atomic. */ + if (current->comm[0] == 'X' && req->value == 1) { + pr_info("broken atomic modeset userspace detected, disabling atomic\n"); + return -EOPNOTSUPP; + } + if (req->value > 2) + return -EINVAL; + file_priv->atomic = req->value; + file_priv->universal_planes = req->value; + /* + * No atomic user-space blows up on aspect ratio mode bits. + */ + file_priv->aspect_ratio_allowed = req->value; + break; + case DRM_CLIENT_CAP_ASPECT_RATIO: + if (req->value > 1) + return -EINVAL; + file_priv->aspect_ratio_allowed = req->value; + break; + case DRM_CLIENT_CAP_WRITEBACK_CONNECTORS: + if (!file_priv->atomic) + return -EINVAL; + if (req->value > 1) + return -EINVAL; + file_priv->writeback_connectors = req->value; + break; + default: + return -EINVAL; + } + + return 0; +} + +/* + * Setversion ioctl. + * + * \param inode device inode. + * \param file_priv DRM file private. + * \param cmd command. + * \param arg user argument, pointing to a drm_lock structure. + * \return zero on success or negative number on failure. + * + * Sets the requested interface version + */ +static int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct drm_set_version *sv = data; + int if_version, retcode = 0; + + mutex_lock(&dev->master_mutex); + if (sv->drm_di_major != -1) { + if (sv->drm_di_major != DRM_IF_MAJOR || + sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) { + retcode = -EINVAL; + goto done; + } + if_version = DRM_IF_VERSION(sv->drm_di_major, + sv->drm_di_minor); + dev->if_version = max(if_version, dev->if_version); + if (sv->drm_di_minor >= 1) { + /* + * Version 1.1 includes tying of DRM to specific device + * Version 1.4 has proper PCI domain support + */ + retcode = drm_set_busid(dev, file_priv); + if (retcode) + goto done; + } + } + + if (sv->drm_dd_major != -1) { + if (sv->drm_dd_major != dev->driver->major || + sv->drm_dd_minor < 0 || sv->drm_dd_minor > + dev->driver->minor) { + retcode = -EINVAL; + goto done; + } + } + +done: + sv->drm_di_major = DRM_IF_MAJOR; + sv->drm_di_minor = DRM_IF_MINOR; + sv->drm_dd_major = dev->driver->major; + sv->drm_dd_minor = dev->driver->minor; + mutex_unlock(&dev->master_mutex); + + return retcode; +} + +/** + * drm_noop - DRM no-op ioctl implemntation + * @dev: DRM device for the ioctl + * @data: data pointer for the ioctl + * @file_priv: DRM file for the ioctl call + * + * This no-op implementation for drm ioctls is useful for deprecated + * functionality where we can't return a failure code because existing userspace + * checks the result of the ioctl, but doesn't care about the action. + * + * Always returns successfully with 0. + */ +int drm_noop(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + DRM_DEBUG("\n"); + return 0; +} +EXPORT_SYMBOL(drm_noop); + +/** + * drm_invalid_op - DRM invalid ioctl implemntation + * @dev: DRM device for the ioctl + * @data: data pointer for the ioctl + * @file_priv: DRM file for the ioctl call + * + * This no-op implementation for drm ioctls is useful for deprecated + * functionality where we really don't want to allow userspace to call the ioctl + * any more. This is the case for old ums interfaces for drivers that + * transitioned to kms gradually and so kept the old legacy tables around. This + * only applies to radeon and i915 kms drivers, other drivers shouldn't need to + * use this function. + * + * Always fails with a return value of -EINVAL. + */ +int drm_invalid_op(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + return -EINVAL; +} +EXPORT_SYMBOL(drm_invalid_op); + +/* + * Copy and IOCTL return string to user space + */ +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value) +{ + int len; + + /* don't overflow userbuf */ + len = strlen(value); + if (len > *buf_len) + len = *buf_len; + + /* let userspace know exact length of driver value (which could be + * larger than the userspace-supplied buffer) */ + *buf_len = strlen(value); + + /* finally, try filling in the userbuf */ + if (len && buf) + if (copy_to_user(buf, value, len)) + return -EFAULT; + return 0; +} + +/* + * Get version information + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument, pointing to a drm_version structure. + * \return zero on success or negative number on failure. + * + * Fills in the version information in \p arg. + */ +int drm_version(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_version *version = data; + int err; + + version->version_major = dev->driver->major; + version->version_minor = dev->driver->minor; + version->version_patchlevel = dev->driver->patchlevel; + err = drm_copy_field(version->name, &version->name_len, + dev->driver->name); + if (!err) + err = drm_copy_field(version->date, &version->date_len, + dev->driver->date); + if (!err) + err = drm_copy_field(version->desc, &version->desc_len, + dev->driver->desc); + + return err; +} + +/** + * drm_ioctl_permit - Check ioctl permissions against caller + * + * @flags: ioctl permission flags. + * @file_priv: Pointer to struct drm_file identifying the caller. + * + * Checks whether the caller is allowed to run an ioctl with the + * indicated permissions. + * + * Returns: + * Zero if allowed, -EACCES otherwise. + */ +int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) +{ + /* ROOT_ONLY is only for CAP_SYS_ADMIN */ + if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN))) + return -EACCES; + + /* AUTH is only for authenticated or render client */ + if (unlikely((flags & DRM_AUTH) && !drm_is_render_client(file_priv) && + !file_priv->authenticated)) + return -EACCES; + + /* MASTER is only for master or control clients */ + if (unlikely((flags & DRM_MASTER) && + !drm_is_current_master(file_priv))) + return -EACCES; + + /* Render clients must be explicitly allowed */ + if (unlikely(!(flags & DRM_RENDER_ALLOW) && + drm_is_render_client(file_priv))) + return -EACCES; + + return 0; +} +EXPORT_SYMBOL(drm_ioctl_permit); + +#define DRM_IOCTL_DEF(ioctl, _func, _flags) \ + [DRM_IOCTL_NR(ioctl)] = { \ + .cmd = ioctl, \ + .func = _func, \ + .flags = _flags, \ + .name = #ioctl \ + } + +#if IS_ENABLED(CONFIG_DRM_LEGACY) +#define DRM_LEGACY_IOCTL_DEF(ioctl, _func, _flags) DRM_IOCTL_DEF(ioctl, _func, _flags) +#else +#define DRM_LEGACY_IOCTL_DEF(ioctl, _func, _flags) DRM_IOCTL_DEF(ioctl, drm_invalid_op, _flags) +#endif + +/* Ioctl table */ +static const struct drm_ioctl_desc drm_ioctls[] = { + DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, + DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), + + DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED), + + DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_UNLOCKED | DRM_MASTER), + + DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_UNLOCKED|DRM_MASTER), + + DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH), + + DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH), + + DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY), + + DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY), + DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH), + DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH), + + DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + + DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_LOCK, drm_legacy_lock, DRM_AUTH), + DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_legacy_unlock, DRM_AUTH), + + DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH), + + DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_legacy_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_legacy_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_legacy_infobufs, DRM_AUTH), + DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_legacy_mapbufs, DRM_AUTH), + DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_legacy_freebufs, DRM_AUTH), + DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_DMA, drm_legacy_dma_ioctl, DRM_AUTH), + DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_legacy_irq_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + +#if IS_ENABLED(CONFIG_AGP) + DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), +#endif + + DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_legacy_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_legacy_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + + DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank_ioctl, DRM_UNLOCKED), + + DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_legacy_modeset_ctl_ioctl, 0), + + DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + + DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), + + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_UNLOCKED), + + DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_connector_property_set_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATEPROPBLOB, drm_mode_createblob_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROYPROPBLOB, drm_mode_destroyblob_ioctl, DRM_UNLOCKED), + + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_CREATE, drm_syncobj_create_ioctl, + DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_DESTROY, drm_syncobj_destroy_ioctl, + DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, drm_syncobj_handle_to_fd_ioctl, + DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl, + DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl, + DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_RESET, drm_syncobj_reset_ioctl, + DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl, + DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_CRTC_QUEUE_SEQUENCE, drm_crtc_queue_sequence_ioctl, DRM_UNLOCKED), +#ifdef __linux__ + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_LIST_LESSEES, drm_mode_list_lessees_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GET_LEASE, drm_mode_get_lease_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_REVOKE_LEASE, drm_mode_revoke_lease_ioctl, DRM_MASTER|DRM_UNLOCKED), +#endif +}; + +#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) + +/** + * DOC: driver specific ioctls + * + * First things first, driver private IOCTLs should only be needed for drivers + * supporting rendering. Kernel modesetting is all standardized, and extended + * through properties. There are a few exceptions in some existing drivers, + * which define IOCTL for use by the display DRM master, but they all predate + * properties. + * + * Now if you do have a render driver you always have to support it through + * driver private properties. There's a few steps needed to wire all the things + * up. + * + * First you need to define the structure for your IOCTL in your driver private + * UAPI header in ``include/uapi/drm/my_driver_drm.h``:: + * + * struct my_driver_operation { + * u32 some_thing; + * u32 another_thing; + * }; + * + * Please make sure that you follow all the best practices from + * ``Documentation/ioctl/botching-up-ioctls.rst``. Note that drm_ioctl() + * automatically zero-extends structures, hence make sure you can add more stuff + * at the end, i.e. don't put a variable sized array there. + * + * Then you need to define your IOCTL number, using one of DRM_IO(), DRM_IOR(), + * DRM_IOW() or DRM_IOWR(). It must start with the DRM_IOCTL\_ prefix:: + * + * ##define DRM_IOCTL_MY_DRIVER_OPERATION \ + * DRM_IOW(DRM_COMMAND_BASE, struct my_driver_operation) + * + * DRM driver private IOCTL must be in the range from DRM_COMMAND_BASE to + * DRM_COMMAND_END. Finally you need an array of &struct drm_ioctl_desc to wire + * up the handlers and set the access rights:: + * + * static const struct drm_ioctl_desc my_driver_ioctls[] = { + * DRM_IOCTL_DEF_DRV(MY_DRIVER_OPERATION, my_driver_operation, + * DRM_AUTH|DRM_RENDER_ALLOW), + * }; + * + * And then assign this to the &drm_driver.ioctls field in your driver + * structure. + * + * See the separate chapter on :ref:`file operations` for how + * the driver-specific IOCTLs are wired up. + */ + +long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata, + u32 flags) +{ + struct drm_file *file_priv = file->private_data; + struct drm_device *dev = file_priv->minor->dev; + int retcode; + + if (drm_dev_is_unplugged(dev)) + return -ENODEV; + + retcode = drm_ioctl_permit(flags, file_priv); + if (unlikely(retcode)) + return retcode; + + /* Enforce sane locking for modern driver ioctls. */ + if (!drm_core_check_feature(dev, DRIVER_LEGACY) || + (flags & DRM_UNLOCKED)) + retcode = func(dev, kdata, file_priv); + else { + mutex_lock(&drm_global_mutex); + retcode = func(dev, kdata, file_priv); + mutex_unlock(&drm_global_mutex); + } + return retcode; +} +EXPORT_SYMBOL(drm_ioctl_kernel); + +/** + * drm_ioctl - ioctl callback implementation for DRM drivers + * @filp: file this ioctl is called on + * @cmd: ioctl cmd number + * @arg: user argument + * + * Looks up the ioctl function in the DRM core and the driver dispatch table, + * stored in &drm_driver.ioctls. It checks for necessary permission by calling + * drm_ioctl_permit(), and dispatches to the respective function. + * + * Returns: + * Zero on success, negative error code on failure. + */ +long drm_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg) +{ + struct drm_file *file_priv = filp->private_data; + struct drm_device *dev; + const struct drm_ioctl_desc *ioctl = NULL; + drm_ioctl_t *func; + unsigned int nr = DRM_IOCTL_NR(cmd); + int retcode = -EINVAL; + char stack_kdata[128]; + char *kdata = NULL; + unsigned int in_size, out_size, drv_size, ksize; + bool is_driver_ioctl; + + dev = file_priv->minor->dev; + + if (drm_dev_is_unplugged(dev)) + return -ENODEV; + + is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END; + + if (is_driver_ioctl) { + /* driver ioctl */ + unsigned int index = nr - DRM_COMMAND_BASE; + + if (index >= dev->driver->num_ioctls) + goto err_i1; + index = array_index_nospec(index, dev->driver->num_ioctls); + ioctl = &dev->driver->ioctls[index]; + } else { + /* core ioctl */ + if (nr >= DRM_CORE_IOCTL_COUNT) + goto err_i1; + nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT); + ioctl = &drm_ioctls[nr]; + } + + drv_size = _IOC_SIZE(ioctl->cmd); + out_size = in_size = _IOC_SIZE(cmd); + if ((cmd & ioctl->cmd & IOC_IN) == 0) + in_size = 0; + if ((cmd & ioctl->cmd & IOC_OUT) == 0) + out_size = 0; + ksize = max(max(in_size, out_size), drv_size); + +#ifdef __linux__ + DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n", + task_pid_nr(current), + (long)old_encode_dev(file_priv->minor->kdev->devt), + file_priv->authenticated, ioctl->name); +#else + DRM_DEBUG("%s: pid=%d, auth=%d, %s\n", __func__, task_pid_nr(current), + file_priv->authenticated, ioctl->name); +#endif + + /* Do not trust userspace, use our own definition */ + func = ioctl->func; + + if (unlikely(!func)) { + DRM_DEBUG("no function\n"); + retcode = -EINVAL; + goto err_i1; + } + + if (ksize <= sizeof(stack_kdata)) { + kdata = stack_kdata; + } else { + kdata = kmalloc(ksize, GFP_KERNEL); + if (!kdata) { + retcode = -ENOMEM; + goto err_i1; + } + } + +// Some weird stuff is happening in copy_from_user +#ifdef __linux__ + if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) { + retcode = -EFAULT; + goto err_i1; + } +#else + memcpy(kdata, (void __user *)arg, in_size); +#endif + + if (ksize > in_size) + memset(kdata + in_size, 0, ksize - in_size); + + retcode = drm_ioctl_kernel(filp, func, kdata, ioctl->flags); +#ifdef __linux__ + if (copy_to_user((void __user *)arg, kdata, out_size) != 0) + retcode = -EFAULT; +#else + memcpy((void __user *)arg, kdata, out_size); +#endif + + err_i1: + if (!ioctl) +#ifdef __linux__ + DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n", + task_pid_nr(current), + (long)old_encode_dev(file_priv->minor->kdev->devt), + file_priv->authenticated, cmd, nr); +#endif + + if (kdata != stack_kdata) + kfree(kdata); + if (retcode) + DRM_DEBUG("pid=%d, ret = %d\n", task_pid_nr(current), retcode); + return retcode; +} +EXPORT_SYMBOL(drm_ioctl); + +/** + * drm_ioctl_flags - Check for core ioctl and return ioctl permission flags + * @nr: ioctl number + * @flags: where to return the ioctl permission flags + * + * This ioctl is only used by the vmwgfx driver to augment the access checks + * done by the drm core and insofar a pretty decent layering violation. This + * shouldn't be used by any drivers. + * + * Returns: + * True if the @nr corresponds to a DRM core ioctl number, false otherwise. + */ +bool drm_ioctl_flags(unsigned int nr, unsigned int *flags) +{ + if (nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END) + return false; + + if (nr >= DRM_CORE_IOCTL_COUNT) + return false; + nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT); + + *flags = drm_ioctls[nr].flags; + return true; +} +EXPORT_SYMBOL(drm_ioctl_flags); Index: sys/dev/drm/core/drm_irq.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_irq.c @@ -0,0 +1,262 @@ +/* + * drm_irq.c IRQ and vblank support + * + * \author Rickard E. (Rik) Faith + * \author Gareth Hughes + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com + * + * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + + +#include +#include /* For task queue support */ +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "drm_internal.h" + +/** + * DOC: irq helpers + * + * The DRM core provides very simple support helpers to enable IRQ handling on a + * device through the drm_irq_install() and drm_irq_uninstall() functions. This + * only supports devices with a single interrupt on the main device stored in + * &drm_device.dev and set as the device paramter in drm_dev_alloc(). + * + * These IRQ helpers are strictly optional. Drivers which roll their own only + * need to set &drm_device.irq_enabled to signal the DRM core that vblank + * interrupts are working. Since these helpers don't automatically clean up the + * requested interrupt like e.g. devm_request_irq() they're not really + * recommended. + */ + +/** + * drm_irq_install - install IRQ handler + * @dev: DRM device + * @irq: IRQ number to install the handler for + * + * Initializes the IRQ related data. Installs the handler, calling the driver + * &drm_driver.irq_preinstall and &drm_driver.irq_postinstall functions before + * and after the installation. + * + * This is the simplified helper interface provided for drivers with no special + * needs. Drivers which need to install interrupt handlers for multiple + * interrupts must instead set &drm_device.irq_enabled to signal the DRM core + * that vblank interrupts are available. + * + * @irq must match the interrupt number that would be passed to request_irq(), + * if called directly instead of using this helper function. + * + * &drm_driver.irq_handler is called to handle the registered interrupt. + * + * Returns: + * Zero on success or a negative error code on failure. + */ +int drm_irq_install(struct drm_device *dev, int irq) +{ + int ret; + unsigned long sh_flags = 0; + + if (irq == 0) + return -EINVAL; + + /* Driver must have been initialized */ + if (!dev->dev_private) + return -EINVAL; + + if (dev->irq_enabled) + return -EBUSY; + dev->irq_enabled = true; + + DRM_DEBUG("irq=%d\n", irq); + + /* Before installing handler */ + if (dev->driver->irq_preinstall) + dev->driver->irq_preinstall(dev); + + /* PCI devices require shared interrupts. */ + if (dev->pdev) + sh_flags = IRQF_SHARED; + + ret = request_irq(irq, dev->driver->irq_handler, + sh_flags, dev->driver->name, dev); + + if (ret < 0) { + dev->irq_enabled = false; + return ret; + } + + /* After installing handler */ + if (dev->driver->irq_postinstall) + ret = dev->driver->irq_postinstall(dev); + + if (ret < 0) { + dev->irq_enabled = false; + if (drm_core_check_feature(dev, DRIVER_LEGACY)) + vga_client_register(dev->pdev, NULL, NULL, NULL); + free_irq(irq, dev); + } else { + dev->irq = irq; + } + + return ret; +} +EXPORT_SYMBOL(drm_irq_install); + +/** + * drm_irq_uninstall - uninstall the IRQ handler + * @dev: DRM device + * + * Calls the driver's &drm_driver.irq_uninstall function and unregisters the IRQ + * handler. This should only be called by drivers which used drm_irq_install() + * to set up their interrupt handler. Other drivers must only reset + * &drm_device.irq_enabled to false. + * + * Note that for kernel modesetting drivers it is a bug if this function fails. + * The sanity checks are only to catch buggy user modesetting drivers which call + * the same function through an ioctl. + * + * Returns: + * Zero on success or a negative error code on failure. + */ +int drm_irq_uninstall(struct drm_device *dev) +{ + unsigned long irqflags; + bool irq_enabled; + int i; + + irq_enabled = dev->irq_enabled; + dev->irq_enabled = false; + + /* + * Wake up any waiters so they don't hang. This is just to paper over + * issues for UMS drivers which aren't in full control of their + * vblank/irq handling. KMS drivers must ensure that vblanks are all + * disabled when uninstalling the irq handler. + */ + if (dev->num_crtcs) { + spin_lock_irqsave(&dev->vbl_lock, irqflags); + for (i = 0; i < dev->num_crtcs; i++) { + struct drm_vblank_crtc *vblank = &dev->vblank[i]; + + if (!vblank->enabled) + continue; + + WARN_ON(drm_core_check_feature(dev, DRIVER_MODESET)); + + drm_vblank_disable_and_save(dev, i); + wake_up(&vblank->queue); + } + spin_unlock_irqrestore(&dev->vbl_lock, irqflags); + } + + if (!irq_enabled) + return -EINVAL; + + DRM_DEBUG("irq=%d\n", dev->irq); + + if (drm_core_check_feature(dev, DRIVER_LEGACY)) + vga_client_register(dev->pdev, NULL, NULL, NULL); + + if (dev->driver->irq_uninstall) + dev->driver->irq_uninstall(dev); + + free_irq(dev->irq, dev); + + return 0; +} +EXPORT_SYMBOL(drm_irq_uninstall); + +#if IS_ENABLED(CONFIG_DRM_LEGACY) +int drm_legacy_irq_control(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_control *ctl = data; + int ret = 0, irq; + + /* if we haven't irq we fallback for compatibility reasons - + * this used to be a separate function in drm_dma.h + */ + + if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) + return 0; + if (!drm_core_check_feature(dev, DRIVER_LEGACY)) + return 0; + /* UMS was only ever supported on pci devices. */ + if (WARN_ON(!dev->pdev)) + return -EINVAL; + + switch (ctl->func) { + case DRM_INST_HANDLER: + irq = dev->pdev->irq; + + if (dev->if_version < DRM_IF_VERSION(1, 2) && + ctl->irq != irq) + return -EINVAL; + mutex_lock(&dev->struct_mutex); + ret = drm_irq_install(dev, irq); + mutex_unlock(&dev->struct_mutex); + + return ret; + case DRM_UNINST_HANDLER: + mutex_lock(&dev->struct_mutex); + ret = drm_irq_uninstall(dev); + mutex_unlock(&dev->struct_mutex); + + return ret; + default: + return -EINVAL; + } +} +#endif Index: sys/dev/drm/core/drm_kms_helper_common.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_kms_helper_common.c @@ -0,0 +1,91 @@ +/* + * Copyright © 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Rafael Antognolli + * + */ + +#include + +#include + +#include "drm_crtc_helper_internal.h" + +MODULE_AUTHOR("David Airlie, Jesse Barnes"); +MODULE_DESCRIPTION("DRM KMS helper"); +MODULE_LICENSE("GPL and additional rights"); + +#ifdef __linux__ // BSD don't have struct kernel_param and don't need backward compatibility +#if IS_ENABLED(CONFIG_DRM_LOAD_EDID_FIRMWARE) + +/* Backward compatibility for drm_kms_helper.edid_firmware */ +static int edid_firmware_set(const char *val, const struct kernel_param *kp) +{ + DRM_NOTE("drm_kms_helper.edid_firmware is deprecated, please use drm.edid_firmware instead.\n"); + + return __drm_set_edid_firmware_path(val); +} + +static int edid_firmware_get(char *buffer, const struct kernel_param *kp) +{ + return __drm_get_edid_firmware_path(buffer, PAGE_SIZE); +} + +static const struct kernel_param_ops edid_firmware_ops = { + .set = edid_firmware_set, + .get = edid_firmware_get, +}; + +module_param_cb(edid_firmware, &edid_firmware_ops, NULL, 0644); +__MODULE_PARM_TYPE(edid_firmware, "charp"); +MODULE_PARM_DESC(edid_firmware, + "DEPRECATED. Use drm.edid_firmware module parameter instead."); + +#endif +#endif + +static int __init drm_kms_helper_init(void) +{ + int ret; + + /* Call init functions from specific kms helpers here */ + ret = drm_fb_helper_modinit(); + if (ret < 0) + goto out; + + ret = drm_dp_aux_dev_init(); + if (ret < 0) + goto out; + +out: + return ret; +} + +static void __exit drm_kms_helper_exit(void) +{ + /* Call exit functions from specific kms helpers here */ + drm_dp_aux_dev_exit(); +} + +module_init(drm_kms_helper_init); +module_exit(drm_kms_helper_exit); Index: sys/dev/drm/core/drm_legacy.h =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_legacy.h @@ -0,0 +1,214 @@ +#ifndef __DRM_LEGACY_H__ +#define __DRM_LEGACY_H__ + +/* + * Copyright (c) 2014 David Herrmann + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * This file contains legacy interfaces that modern drm drivers + * should no longer be using. They cannot be removed as legacy + * drivers use them, and removing them are API breaks. + */ +#include + +#include +#include +#include + +struct agp_memory; +struct drm_device; +struct drm_file; +struct drm_buf_desc; + +/* + * Generic DRM Contexts + */ + +#define DRM_KERNEL_CONTEXT 0 +#define DRM_RESERVED_CONTEXTS 1 + +#if IS_ENABLED(CONFIG_DRM_LEGACY) +void drm_legacy_ctxbitmap_init(struct drm_device *dev); +void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev); +void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file); +#else +static inline void drm_legacy_ctxbitmap_init(struct drm_device *dev) {} +static inline void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev) {} +static inline void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file) {} +#endif + +void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle); + +#if IS_ENABLED(CONFIG_DRM_LEGACY) +int drm_legacy_resctx(struct drm_device *d, void *v, struct drm_file *f); +int drm_legacy_addctx(struct drm_device *d, void *v, struct drm_file *f); +int drm_legacy_getctx(struct drm_device *d, void *v, struct drm_file *f); +int drm_legacy_switchctx(struct drm_device *d, void *v, struct drm_file *f); +int drm_legacy_newctx(struct drm_device *d, void *v, struct drm_file *f); +int drm_legacy_rmctx(struct drm_device *d, void *v, struct drm_file *f); + +int drm_legacy_setsareactx(struct drm_device *d, void *v, struct drm_file *f); +int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f); +#endif + +/* + * Generic Buffer Management + */ + +#define DRM_MAP_HASH_OFFSET 0x10000000 + +#if IS_ENABLED(CONFIG_DRM_LEGACY) +static inline int drm_legacy_create_map_hash(struct drm_device *dev) +{ + return drm_ht_create(&dev->map_hash, 12); +} + +static inline void drm_legacy_remove_map_hash(struct drm_device *dev) +{ + drm_ht_remove(&dev->map_hash); +} +#else +static inline int drm_legacy_create_map_hash(struct drm_device *dev) +{ + return 0; +} + +static inline void drm_legacy_remove_map_hash(struct drm_device *dev) {} +#endif + + +#if IS_ENABLED(CONFIG_DRM_LEGACY) +int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_legacy_addmap_ioctl(struct drm_device *d, void *v, struct drm_file *f); +int drm_legacy_rmmap_ioctl(struct drm_device *d, void *v, struct drm_file *f); + +int drm_legacy_addbufs(struct drm_device *d, void *v, struct drm_file *f); +int drm_legacy_infobufs(struct drm_device *d, void *v, struct drm_file *f); +int drm_legacy_markbufs(struct drm_device *d, void *v, struct drm_file *f); +int drm_legacy_freebufs(struct drm_device *d, void *v, struct drm_file *f); +int drm_legacy_mapbufs(struct drm_device *d, void *v, struct drm_file *f); +int drm_legacy_dma_ioctl(struct drm_device *d, void *v, struct drm_file *f); +#endif + +int __drm_legacy_infobufs(struct drm_device *, void *, int *, + int (*)(void *, int, struct drm_buf_entry *)); +int __drm_legacy_mapbufs(struct drm_device *, void *, int *, + void __user **, + int (*)(void *, int, unsigned long, struct drm_buf *), + struct drm_file *); + +#if IS_ENABLED(CONFIG_DRM_LEGACY) +void drm_legacy_master_rmmaps(struct drm_device *dev, + struct drm_master *master); +void drm_legacy_rmmaps(struct drm_device *dev); +#else +static inline void drm_legacy_master_rmmaps(struct drm_device *dev, + struct drm_master *master) {} +static inline void drm_legacy_rmmaps(struct drm_device *dev) {} +#endif + +#if IS_ENABLED(CONFIG_DRM_VM) && IS_ENABLED(CONFIG_DRM_LEGACY) +void drm_legacy_vma_flush(struct drm_device *d); +#else +static inline void drm_legacy_vma_flush(struct drm_device *d) +{ + /* do nothing */ +} +#endif + +/* + * AGP Support + */ + +struct drm_agp_mem { + unsigned long handle; + struct agp_memory *memory; + unsigned long bound; + int pages; + struct list_head head; +}; + +/* drm_lock.c */ +#if IS_ENABLED(CONFIG_DRM_LEGACY) +int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f); +int drm_legacy_unlock(struct drm_device *d, void *v, struct drm_file *f); +void drm_legacy_lock_release(struct drm_device *dev, struct file *filp); +#else +static inline void drm_legacy_lock_release(struct drm_device *dev, struct file *filp) {} +#endif + +/* DMA support */ +#if IS_ENABLED(CONFIG_DRM_LEGACY) +int drm_legacy_dma_setup(struct drm_device *dev); +void drm_legacy_dma_takedown(struct drm_device *dev); +#else +static inline int drm_legacy_dma_setup(struct drm_device *dev) +{ + return 0; +} +#endif + +void drm_legacy_free_buffer(struct drm_device *dev, + struct drm_buf * buf); +#if IS_ENABLED(CONFIG_DRM_LEGACY) +void drm_legacy_reclaim_buffers(struct drm_device *dev, + struct drm_file *filp); +#else +static inline void drm_legacy_reclaim_buffers(struct drm_device *dev, + struct drm_file *filp) {} +#endif + +/* Scatter Gather Support */ +#if IS_ENABLED(CONFIG_DRM_LEGACY) +void drm_legacy_sg_cleanup(struct drm_device *dev); +int drm_legacy_sg_alloc(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_legacy_sg_free(struct drm_device *dev, void *data, + struct drm_file *file_priv); +#endif + +#if IS_ENABLED(CONFIG_DRM_LEGACY) +void drm_legacy_init_members(struct drm_device *dev); +void drm_legacy_destroy_members(struct drm_device *dev); +void drm_legacy_dev_reinit(struct drm_device *dev); +int drm_legacy_setup(struct drm_device * dev); +#else +static inline void drm_legacy_init_members(struct drm_device *dev) {} +static inline void drm_legacy_destroy_members(struct drm_device *dev) {} +static inline void drm_legacy_dev_reinit(struct drm_device *dev) {} +static inline int drm_legacy_setup(struct drm_device * dev) { return 0; } +#endif + +#if IS_ENABLED(CONFIG_DRM_LEGACY) +void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master); +#else +static inline void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master) {} +#endif + +#if IS_ENABLED(CONFIG_DRM_LEGACY) +void drm_master_legacy_init(struct drm_master *master); +#else +static inline void drm_master_legacy_init(struct drm_master *master) {} +#endif + +#endif /* __DRM_LEGACY_H__ */ Index: sys/dev/drm/core/drm_memory.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_memory.c @@ -0,0 +1,196 @@ +/** + * \file drm_memory.c + * Memory management wrappers for DRM + * + * \author Rickard E. (Rik) Faith + * \author Gareth Hughes + */ + +/* + * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#ifdef __linux__ +#include +#endif + +#include +#include + +#include "drm_legacy.h" + +#if IS_ENABLED(CONFIG_AGP) + +#ifdef HAVE_PAGE_AGP +# include +#else +# ifdef __powerpc__ +# define PAGE_AGP pgprot_noncached_wc(PAGE_KERNEL) +# else +# define PAGE_AGP PAGE_KERNEL +# endif +#endif + +static void *agp_remap(unsigned long offset, unsigned long size, + struct drm_device *dev) +{ + unsigned long i, num_pages = + PAGE_ALIGN(size) / PAGE_SIZE; + struct drm_agp_mem *agpmem; + struct page **page_map; + struct page **phys_page_map; + void *addr; + + size = PAGE_ALIGN(size); + +#ifdef __alpha__ + offset -= dev->hose->mem_space->start; +#endif + + list_for_each_entry(agpmem, &dev->agp->memory, head) + if (agpmem->bound <= offset + && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= + (offset + size)) + break; + if (&agpmem->head == &dev->agp->memory) + return NULL; + + /* + * OK, we're mapping AGP space on a chipset/platform on which memory accesses by + * the CPU do not get remapped by the GART. We fix this by using the kernel's + * page-table instead (that's probably faster anyhow...). + */ + /* note: use vmalloc() because num_pages could be large... */ + page_map = vmalloc(array_size(num_pages, sizeof(struct page *))); + if (!page_map) + return NULL; + + phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE); + for (i = 0; i < num_pages; ++i) + page_map[i] = phys_page_map[i]; + addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP); + vfree(page_map); + + return addr; +} + +/** Wrapper around agp_free_memory() */ +void drm_free_agp(struct agp_memory *handle, int pages) +{ + agp_free_memory(handle); +} + +/** Wrapper around agp_bind_memory() */ +int drm_bind_agp(struct agp_memory *handle, unsigned int start) +{ + return agp_bind_memory(handle, start); +} + +/** Wrapper around agp_unbind_memory() */ +int drm_unbind_agp(struct agp_memory *handle) +{ + return agp_unbind_memory(handle); +} + +#else /* CONFIG_AGP */ +#ifdef __linux__ +static inline void *agp_remap(unsigned long offset, unsigned long size, + struct drm_device *dev) +{ + return NULL; +} +#endif /* __linux__ */ +#endif /* CONFIG_AGP */ + +#ifdef __linux__ +void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev) +{ + if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) + map->handle = agp_remap(map->offset, map->size, dev); + else + map->handle = ioremap(map->offset, map->size); +} +EXPORT_SYMBOL(drm_legacy_ioremap); + +void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev) +{ + if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) + map->handle = agp_remap(map->offset, map->size, dev); + else + map->handle = ioremap_wc(map->offset, map->size); +} +EXPORT_SYMBOL(drm_legacy_ioremap_wc); + +void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev) +{ + if (!map->handle || !map->size) + return; + + if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) + vunmap(map->handle); + else + iounmap(map->handle); +} +EXPORT_SYMBOL(drm_legacy_ioremapfree); +#endif /* __linux__ */ + +#ifdef __linux__ +bool drm_need_swiotlb(int dma_bits) +{ + struct resource *tmp; + resource_size_t max_iomem = 0; + + /* + * Xen paravirtual hosts require swiotlb regardless of requested dma + * transfer size. + * + * NOTE: Really, what it requires is use of the dma_alloc_coherent + * allocator used in ttm_dma_populate() instead of + * ttm_populate_and_map_pages(), which bounce buffers so much in + * Xen it leads to swiotlb buffer exhaustion. + */ + if (xen_pv_domain()) + return true; + + /* + * Enforce dma_alloc_coherent when memory encryption is active as well + * for the same reasons as for Xen paravirtual hosts. + */ + if (mem_encrypt_active()) + return true; + + for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) { + max_iomem = max(max_iomem, tmp->end); + } + + return max_iomem > ((u64)1 << dma_bits); +} +EXPORT_SYMBOL(drm_need_swiotlb); +#endif /* __linux__ */ Index: sys/dev/drm/core/drm_mm.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_mm.c @@ -0,0 +1,998 @@ +/************************************************************************** + * + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. + * Copyright 2016 Intel Corporation + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * + **************************************************************************/ + +/* + * Generic simple memory manager implementation. Intended to be used as a base + * class implementation for more advanced memory managers. + * + * Note that the algorithm used is quite simple and there might be substantial + * performance gains if a smarter free list is implemented. Currently it is + * just an unordered stack of free regions. This could easily be improved if + * an RB-tree is used instead. At least if we expect heavy fragmentation. + * + * Aligned allocations can also see improvement. + * + * Authors: + * Thomas Hellström + */ + +#include +#include +#include +#include +#include + +#include + +/** + * DOC: Overview + * + * drm_mm provides a simple range allocator. The drivers are free to use the + * resource allocator from the linux core if it suits them, the upside of drm_mm + * is that it's in the DRM core. Which means that it's easier to extend for + * some of the crazier special purpose needs of gpus. + * + * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node. + * Drivers are free to embed either of them into their own suitable + * datastructures. drm_mm itself will not do any memory allocations of its own, + * so if drivers choose not to embed nodes they need to still allocate them + * themselves. + * + * The range allocator also supports reservation of preallocated blocks. This is + * useful for taking over initial mode setting configurations from the firmware, + * where an object needs to be created which exactly matches the firmware's + * scanout target. As long as the range is still free it can be inserted anytime + * after the allocator is initialized, which helps with avoiding looped + * dependencies in the driver load sequence. + * + * drm_mm maintains a stack of most recently freed holes, which of all + * simplistic datastructures seems to be a fairly decent approach to clustering + * allocations and avoiding too much fragmentation. This means free space + * searches are O(num_holes). Given that all the fancy features drm_mm supports + * something better would be fairly complex and since gfx thrashing is a fairly + * steep cliff not a real concern. Removing a node again is O(1). + * + * drm_mm supports a few features: Alignment and range restrictions can be + * supplied. Furthermore every &drm_mm_node has a color value (which is just an + * opaque unsigned long) which in conjunction with a driver callback can be used + * to implement sophisticated placement restrictions. The i915 DRM driver uses + * this to implement guard pages between incompatible caching domains in the + * graphics TT. + * + * Two behaviors are supported for searching and allocating: bottom-up and + * top-down. The default is bottom-up. Top-down allocation can be used if the + * memory area has different restrictions, or just to reduce fragmentation. + * + * Finally iteration helpers to walk all nodes and all holes are provided as are + * some basic allocator dumpers for debugging. + * + * Note that this range allocator is not thread-safe, drivers need to protect + * modifications with their own locking. The idea behind this is that for a full + * memory manager additional data needs to be protected anyway, hence internal + * locking would be fully redundant. + */ + +#ifdef CONFIG_DRM_DEBUG_MM +#include + +#define STACKDEPTH 32 +#define BUFSZ 4096 + +static noinline void save_stack(struct drm_mm_node *node) +{ + unsigned long entries[STACKDEPTH]; + unsigned int n; + + n = stack_trace_save(entries, ARRAY_SIZE(entries), 1); + + /* May be called under spinlock, so avoid sleeping */ + node->stack = stack_depot_save(entries, n, GFP_NOWAIT); +} + +static void show_leaks(struct drm_mm *mm) +{ + struct drm_mm_node *node; + unsigned long *entries; + unsigned int nr_entries; + char *buf; + + buf = kmalloc(BUFSZ, GFP_KERNEL); + if (!buf) + return; + + list_for_each_entry(node, drm_mm_nodes(mm), node_list) { + if (!node->stack) { + DRM_ERROR("node [%08llx + %08llx]: unknown owner\n", + node->start, node->size); + continue; + } + + nr_entries = stack_depot_fetch(node->stack, &entries); + stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0); + DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s", + node->start, node->size, buf); + } + + kfree(buf); +} + +#undef STACKDEPTH +#undef BUFSZ +#else +static void save_stack(struct drm_mm_node *node) { } +static void show_leaks(struct drm_mm *mm) { } +#endif + +#define START(node) ((node)->start) +#define LAST(node) ((node)->start + (node)->size - 1) + +#ifdef __linux__ +INTERVAL_TREE_DEFINE(struct drm_mm_node, rb, + u64, __subtree_last, + START, LAST, static inline, drm_mm_interval_tree) + +struct drm_mm_node * +__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) +{ + return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree, + start, last) ?: (struct drm_mm_node *)&mm->head_node; +} +EXPORT_SYMBOL(__drm_mm_interval_first); +#endif + +#ifdef __linux__ +static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node, + struct drm_mm_node *node) +{ + struct drm_mm *mm = hole_node->mm; + struct rb_node **link, *rb; + struct drm_mm_node *parent; + bool leftmost; + + node->__subtree_last = LAST(node); + + if (hole_node->allocated) { + rb = &hole_node->rb; + while (rb) { + parent = rb_entry(rb, struct drm_mm_node, rb); + if (parent->__subtree_last >= node->__subtree_last) + break; + + parent->__subtree_last = node->__subtree_last; + rb = rb_parent(rb); + } + + rb = &hole_node->rb; + link = &hole_node->rb.rb_right; + leftmost = false; + } else { + rb = NULL; + link = &mm->interval_tree.rb_root.rb_node; + leftmost = true; + } + + while (*link) { + rb = *link; + parent = rb_entry(rb, struct drm_mm_node, rb); + if (parent->__subtree_last < node->__subtree_last) + parent->__subtree_last = node->__subtree_last; + if (node->start < parent->start) { + link = &parent->rb.rb_left; + } else { + link = &parent->rb.rb_right; + leftmost = false; + } + } + + rb_link_node(&node->rb, rb, link); + rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost, + &drm_mm_interval_tree_augment); +} +#endif /* __linux__ */ + +#ifdef __FreeBSD__ +#undef RB_INSERT +#endif +#define RB_INSERT(root, member, expr) do { \ + struct rb_node **link = &root.rb_node, *rb = NULL; \ + u64 x = expr(node); \ + while (*link) { \ + rb = *link; \ + if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \ + link = &rb->rb_left; \ + else \ + link = &rb->rb_right; \ + } \ + rb_link_node(&node->member, rb, link); \ + rb_insert_color(&node->member, &root); \ +} while (0) + +#define HOLE_SIZE(NODE) ((NODE)->hole_size) +#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE)) + +static u64 rb_to_hole_size(struct rb_node *rb) +{ + return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size; +} + +static void insert_hole_size(struct rb_root_cached *root, + struct drm_mm_node *node) +{ + struct rb_node **link = &root->rb_root.rb_node, *rb = NULL; + u64 x = node->hole_size; + bool first = true; + + while (*link) { + rb = *link; + if (x > rb_to_hole_size(rb)) { + link = &rb->rb_left; + } else { + link = &rb->rb_right; + first = false; + } + } + + rb_link_node(&node->rb_hole_size, rb, link); + rb_insert_color_cached(&node->rb_hole_size, root, first); +} + +static void add_hole(struct drm_mm_node *node) +{ + struct drm_mm *mm = node->mm; + + node->hole_size = + __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node); + DRM_MM_BUG_ON(!drm_mm_hole_follows(node)); + + insert_hole_size(&mm->holes_size, node); + RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR); + + list_add(&node->hole_stack, &mm->hole_stack); +} + +static void rm_hole(struct drm_mm_node *node) +{ + DRM_MM_BUG_ON(!drm_mm_hole_follows(node)); + + list_del(&node->hole_stack); + rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size); + rb_erase(&node->rb_hole_addr, &node->mm->holes_addr); + node->hole_size = 0; + + DRM_MM_BUG_ON(drm_mm_hole_follows(node)); +} + +static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb) +{ + return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size); +} + +static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb) +{ + return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr); +} + +#ifdef __linux__ +static inline u64 rb_hole_size(struct rb_node *rb) +{ + return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size; +} +#endif + +static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size) +{ + struct rb_node *rb = mm->holes_size.rb_root.rb_node; + struct drm_mm_node *best = NULL; + + do { + struct drm_mm_node *node = + rb_entry(rb, struct drm_mm_node, rb_hole_size); + + if (size <= node->hole_size) { + best = node; + rb = rb->rb_right; + } else { + rb = rb->rb_left; + } + } while (rb); + + return best; +} + +static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr) +{ + struct rb_node *rb = mm->holes_addr.rb_node; + struct drm_mm_node *node = NULL; + + while (rb) { + u64 hole_start; + + node = rb_hole_addr_to_node(rb); + hole_start = __drm_mm_hole_node_start(node); + + if (addr < hole_start) + rb = node->rb_hole_addr.rb_left; + else if (addr > hole_start + node->hole_size) + rb = node->rb_hole_addr.rb_right; + else + break; + } + + return node; +} + +static struct drm_mm_node * +first_hole(struct drm_mm *mm, + u64 start, u64 end, u64 size, + enum drm_mm_insert_mode mode) +{ + switch (mode) { + default: + case DRM_MM_INSERT_BEST: + return best_hole(mm, size); + + case DRM_MM_INSERT_LOW: + return find_hole(mm, start); + + case DRM_MM_INSERT_HIGH: + return find_hole(mm, end); + + case DRM_MM_INSERT_EVICT: + return list_first_entry_or_null(&mm->hole_stack, + struct drm_mm_node, + hole_stack); + } +} + +static struct drm_mm_node * +next_hole(struct drm_mm *mm, + struct drm_mm_node *node, + enum drm_mm_insert_mode mode) +{ + switch (mode) { + default: + case DRM_MM_INSERT_BEST: + return rb_hole_size_to_node(rb_prev(&node->rb_hole_size)); + + case DRM_MM_INSERT_LOW: + return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr)); + + case DRM_MM_INSERT_HIGH: + return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr)); + + case DRM_MM_INSERT_EVICT: + node = list_next_entry(node, hole_stack); + return &node->hole_stack == &mm->hole_stack ? NULL : node; + } +} + +/** + * drm_mm_reserve_node - insert an pre-initialized node + * @mm: drm_mm allocator to insert @node into + * @node: drm_mm_node to insert + * + * This functions inserts an already set-up &drm_mm_node into the allocator, + * meaning that start, size and color must be set by the caller. All other + * fields must be cleared to 0. This is useful to initialize the allocator with + * preallocated objects which must be set-up before the range allocator can be + * set-up, e.g. when taking over a firmware framebuffer. + * + * Returns: + * 0 on success, -ENOSPC if there's no hole where @node is. + */ +int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) +{ + u64 end = node->start + node->size; + struct drm_mm_node *hole; + u64 hole_start, hole_end; + u64 adj_start, adj_end; + + end = node->start + node->size; + if (unlikely(end <= node->start)) + return -ENOSPC; + + /* Find the relevant hole to add our node to */ + hole = find_hole(mm, node->start); + if (!hole) + return -ENOSPC; + + adj_start = hole_start = __drm_mm_hole_node_start(hole); + adj_end = hole_end = hole_start + hole->hole_size; + + if (mm->color_adjust) + mm->color_adjust(hole, node->color, &adj_start, &adj_end); + + if (adj_start > node->start || adj_end < end) + return -ENOSPC; + + node->mm = mm; + + list_add(&node->node_list, &hole->node_list); +#ifdef __linux__ + drm_mm_interval_tree_add_node(hole, node); +#endif + node->allocated = true; + node->hole_size = 0; + + rm_hole(hole); + if (node->start > hole_start) + add_hole(hole); + if (end < hole_end) + add_hole(node); + + save_stack(node); + return 0; +} +EXPORT_SYMBOL(drm_mm_reserve_node); + +static u64 rb_to_hole_size_or_zero(struct rb_node *rb) +{ + return rb ? rb_to_hole_size(rb) : 0; +} + +/** + * drm_mm_insert_node_in_range - ranged search for space and insert @node + * @mm: drm_mm to allocate from + * @node: preallocate node to insert + * @size: size of the allocation + * @alignment: alignment of the allocation + * @color: opaque tag value to use for this node + * @range_start: start of the allowed range for this node + * @range_end: end of the allowed range for this node + * @mode: fine-tune the allocation search and placement + * + * The preallocated @node must be cleared to 0. + * + * Returns: + * 0 on success, -ENOSPC if there's no suitable hole. + */ +int drm_mm_insert_node_in_range(struct drm_mm * const mm, + struct drm_mm_node * const node, + u64 size, u64 alignment, + unsigned long color, + u64 range_start, u64 range_end, + enum drm_mm_insert_mode mode) +{ + struct drm_mm_node *hole; + u64 remainder_mask; + bool once; + + DRM_MM_BUG_ON(range_start > range_end); + + if (unlikely(size == 0 || range_end - range_start < size)) + return -ENOSPC; + + if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size) + return -ENOSPC; + + if (alignment <= 1) + alignment = 0; + + once = mode & DRM_MM_INSERT_ONCE; + mode &= ~DRM_MM_INSERT_ONCE; + + remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0; + for (hole = first_hole(mm, range_start, range_end, size, mode); + hole; + hole = once ? NULL : next_hole(mm, hole, mode)) { + u64 hole_start = __drm_mm_hole_node_start(hole); + u64 hole_end = hole_start + hole->hole_size; + u64 adj_start, adj_end; + u64 col_start, col_end; + + if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end) + break; + + if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start) + break; + + col_start = hole_start; + col_end = hole_end; + if (mm->color_adjust) + mm->color_adjust(hole, color, &col_start, &col_end); + + adj_start = max(col_start, range_start); + adj_end = min(col_end, range_end); + + if (adj_end <= adj_start || adj_end - adj_start < size) + continue; + + if (mode == DRM_MM_INSERT_HIGH) + adj_start = adj_end - size; + + if (alignment) { + u64 rem; + + if (likely(remainder_mask)) + rem = adj_start & remainder_mask; + else + div64_u64_rem(adj_start, alignment, &rem); + if (rem) { + adj_start -= rem; + if (mode != DRM_MM_INSERT_HIGH) + adj_start += alignment; + + if (adj_start < max(col_start, range_start) || + min(col_end, range_end) - adj_start < size) + continue; + + if (adj_end <= adj_start || + adj_end - adj_start < size) + continue; + } + } + + node->mm = mm; + node->size = size; + node->start = adj_start; + node->color = color; + node->hole_size = 0; + + list_add(&node->node_list, &hole->node_list); +#ifdef __linux__ + drm_mm_interval_tree_add_node(hole, node); +#endif + node->allocated = true; + + rm_hole(hole); + if (adj_start > hole_start) + add_hole(hole); + if (adj_start + size < hole_end) + add_hole(node); + + save_stack(node); + return 0; + } + + return -ENOSPC; +} +EXPORT_SYMBOL(drm_mm_insert_node_in_range); + +/** + * drm_mm_remove_node - Remove a memory node from the allocator. + * @node: drm_mm_node to remove + * + * This just removes a node from its drm_mm allocator. The node does not need to + * be cleared again before it can be re-inserted into this or any other drm_mm + * allocator. It is a bug to call this function on a unallocated node. + */ +void drm_mm_remove_node(struct drm_mm_node *node) +{ +#ifdef __linux__ + struct drm_mm *mm = node->mm; +#endif + struct drm_mm_node *prev_node; + + DRM_MM_BUG_ON(!node->allocated); + DRM_MM_BUG_ON(node->scanned_block); + + prev_node = list_prev_entry(node, node_list); + + if (drm_mm_hole_follows(node)) + rm_hole(node); + +#ifdef __linux__ + drm_mm_interval_tree_remove(node, &mm->interval_tree); +#endif + list_del(&node->node_list); + node->allocated = false; + + if (drm_mm_hole_follows(prev_node)) + rm_hole(prev_node); + add_hole(prev_node); +} +EXPORT_SYMBOL(drm_mm_remove_node); + +/** + * drm_mm_replace_node - move an allocation from @old to @new + * @old: drm_mm_node to remove from the allocator + * @new: drm_mm_node which should inherit @old's allocation + * + * This is useful for when drivers embed the drm_mm_node structure and hence + * can't move allocations by reassigning pointers. It's a combination of remove + * and insert with the guarantee that the allocation start will match. + */ +void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) +{ + struct drm_mm *mm = old->mm; + + DRM_MM_BUG_ON(!old->allocated); + + *new = *old; + + list_replace(&old->node_list, &new->node_list); + rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree); + + if (drm_mm_hole_follows(old)) { + list_replace(&old->hole_stack, &new->hole_stack); + rb_replace_node_cached(&old->rb_hole_size, + &new->rb_hole_size, + &mm->holes_size); + rb_replace_node(&old->rb_hole_addr, + &new->rb_hole_addr, + &mm->holes_addr); + } + + old->allocated = false; + new->allocated = true; +} +EXPORT_SYMBOL(drm_mm_replace_node); + +/** + * DOC: lru scan roster + * + * Very often GPUs need to have continuous allocations for a given object. When + * evicting objects to make space for a new one it is therefore not most + * efficient when we simply start to select all objects from the tail of an LRU + * until there's a suitable hole: Especially for big objects or nodes that + * otherwise have special allocation constraints there's a good chance we evict + * lots of (smaller) objects unnecessarily. + * + * The DRM range allocator supports this use-case through the scanning + * interfaces. First a scan operation needs to be initialized with + * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds + * objects to the roster, probably by walking an LRU list, but this can be + * freely implemented. Eviction candiates are added using + * drm_mm_scan_add_block() until a suitable hole is found or there are no + * further evictable objects. Eviction roster metadata is tracked in &struct + * drm_mm_scan. + * + * The driver must walk through all objects again in exactly the reverse + * order to restore the allocator state. Note that while the allocator is used + * in the scan mode no other operation is allowed. + * + * Finally the driver evicts all objects selected (drm_mm_scan_remove_block() + * reported true) in the scan, and any overlapping nodes after color adjustment + * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and + * since freeing a node is also O(1) the overall complexity is + * O(scanned_objects). So like the free stack which needs to be walked before a + * scan operation even begins this is linear in the number of objects. It + * doesn't seem to hurt too badly. + */ + +/** + * drm_mm_scan_init_with_range - initialize range-restricted lru scanning + * @scan: scan state + * @mm: drm_mm to scan + * @size: size of the allocation + * @alignment: alignment of the allocation + * @color: opaque tag value to use for the allocation + * @start: start of the allowed range for the allocation + * @end: end of the allowed range for the allocation + * @mode: fine-tune the allocation search and placement + * + * This simply sets up the scanning routines with the parameters for the desired + * hole. + * + * Warning: + * As long as the scan list is non-empty, no other operations than + * adding/removing nodes to/from the scan list are allowed. + */ +void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, + struct drm_mm *mm, + u64 size, + u64 alignment, + unsigned long color, + u64 start, + u64 end, + enum drm_mm_insert_mode mode) +{ + DRM_MM_BUG_ON(start >= end); + DRM_MM_BUG_ON(!size || size > end - start); + DRM_MM_BUG_ON(mm->scan_active); + + scan->mm = mm; + + if (alignment <= 1) + alignment = 0; + + scan->color = color; + scan->alignment = alignment; + scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0; + scan->size = size; + scan->mode = mode; + + DRM_MM_BUG_ON(end <= start); + scan->range_start = start; + scan->range_end = end; + + scan->hit_start = U64_MAX; + scan->hit_end = 0; +} +EXPORT_SYMBOL(drm_mm_scan_init_with_range); + +/** + * drm_mm_scan_add_block - add a node to the scan list + * @scan: the active drm_mm scanner + * @node: drm_mm_node to add + * + * Add a node to the scan list that might be freed to make space for the desired + * hole. + * + * Returns: + * True if a hole has been found, false otherwise. + */ +bool drm_mm_scan_add_block(struct drm_mm_scan *scan, + struct drm_mm_node *node) +{ + struct drm_mm *mm = scan->mm; + struct drm_mm_node *hole; + u64 hole_start, hole_end; + u64 col_start, col_end; + u64 adj_start, adj_end; + + DRM_MM_BUG_ON(node->mm != mm); + DRM_MM_BUG_ON(!node->allocated); + DRM_MM_BUG_ON(node->scanned_block); + node->scanned_block = true; + mm->scan_active++; + + /* Remove this block from the node_list so that we enlarge the hole + * (distance between the end of our previous node and the start of + * or next), without poisoning the link so that we can restore it + * later in drm_mm_scan_remove_block(). + */ + hole = list_prev_entry(node, node_list); + DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node); + __list_del_entry(&node->node_list); + + hole_start = __drm_mm_hole_node_start(hole); + hole_end = __drm_mm_hole_node_end(hole); + + col_start = hole_start; + col_end = hole_end; + if (mm->color_adjust) + mm->color_adjust(hole, scan->color, &col_start, &col_end); + + adj_start = max(col_start, scan->range_start); + adj_end = min(col_end, scan->range_end); + if (adj_end <= adj_start || adj_end - adj_start < scan->size) + return false; + + if (scan->mode == DRM_MM_INSERT_HIGH) + adj_start = adj_end - scan->size; + + if (scan->alignment) { + u64 rem; + + if (likely(scan->remainder_mask)) + rem = adj_start & scan->remainder_mask; + else + div64_u64_rem(adj_start, scan->alignment, &rem); + if (rem) { + adj_start -= rem; + if (scan->mode != DRM_MM_INSERT_HIGH) + adj_start += scan->alignment; + if (adj_start < max(col_start, scan->range_start) || + min(col_end, scan->range_end) - adj_start < scan->size) + return false; + + if (adj_end <= adj_start || + adj_end - adj_start < scan->size) + return false; + } + } + + scan->hit_start = adj_start; + scan->hit_end = adj_start + scan->size; + + DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end); + DRM_MM_BUG_ON(scan->hit_start < hole_start); + DRM_MM_BUG_ON(scan->hit_end > hole_end); + + return true; +} +EXPORT_SYMBOL(drm_mm_scan_add_block); + +/** + * drm_mm_scan_remove_block - remove a node from the scan list + * @scan: the active drm_mm scanner + * @node: drm_mm_node to remove + * + * Nodes **must** be removed in exactly the reverse order from the scan list as + * they have been added (e.g. using list_add() as they are added and then + * list_for_each() over that eviction list to remove), otherwise the internal + * state of the memory manager will be corrupted. + * + * When the scan list is empty, the selected memory nodes can be freed. An + * immediately following drm_mm_insert_node_in_range_generic() or one of the + * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return + * the just freed block (because it's at the top of the free_stack list). + * + * Returns: + * True if this block should be evicted, false otherwise. Will always + * return false when no hole has been found. + */ +bool drm_mm_scan_remove_block(struct drm_mm_scan *scan, + struct drm_mm_node *node) +{ + struct drm_mm_node *prev_node; + + DRM_MM_BUG_ON(node->mm != scan->mm); + DRM_MM_BUG_ON(!node->scanned_block); + node->scanned_block = false; + + DRM_MM_BUG_ON(!node->mm->scan_active); + node->mm->scan_active--; + + /* During drm_mm_scan_add_block() we decoupled this node leaving + * its pointers intact. Now that the caller is walking back along + * the eviction list we can restore this block into its rightful + * place on the full node_list. To confirm that the caller is walking + * backwards correctly we check that prev_node->next == node->next, + * i.e. both believe the same node should be on the other side of the + * hole. + */ + prev_node = list_prev_entry(node, node_list); + DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) != + list_next_entry(node, node_list)); + list_add(&node->node_list, &prev_node->node_list); + + return (node->start + node->size > scan->hit_start && + node->start < scan->hit_end); +} +EXPORT_SYMBOL(drm_mm_scan_remove_block); + +/** + * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole + * @scan: drm_mm scan with target hole + * + * After completing an eviction scan and removing the selected nodes, we may + * need to remove a few more nodes from either side of the target hole if + * mm.color_adjust is being used. + * + * Returns: + * A node to evict, or NULL if there are no overlapping nodes. + */ +struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan) +{ + struct drm_mm *mm = scan->mm; + struct drm_mm_node *hole; + u64 hole_start, hole_end; + + DRM_MM_BUG_ON(list_empty(&mm->hole_stack)); + + if (!mm->color_adjust) + return NULL; + + /* + * The hole found during scanning should ideally be the first element + * in the hole_stack list, but due to side-effects in the driver it + * may not be. + */ + list_for_each_entry(hole, &mm->hole_stack, hole_stack) { + hole_start = __drm_mm_hole_node_start(hole); + hole_end = hole_start + hole->hole_size; + + if (hole_start <= scan->hit_start && + hole_end >= scan->hit_end) + break; + } + + /* We should only be called after we found the hole previously */ + DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack); + if (unlikely(&hole->hole_stack == &mm->hole_stack)) + return NULL; + + DRM_MM_BUG_ON(hole_start > scan->hit_start); + DRM_MM_BUG_ON(hole_end < scan->hit_end); + + mm->color_adjust(hole, scan->color, &hole_start, &hole_end); + if (hole_start > scan->hit_start) + return hole; + if (hole_end < scan->hit_end) + return list_next_entry(hole, node_list); + + return NULL; +} +EXPORT_SYMBOL(drm_mm_scan_color_evict); + +/** + * drm_mm_init - initialize a drm-mm allocator + * @mm: the drm_mm structure to initialize + * @start: start of the range managed by @mm + * @size: end of the range managed by @mm + * + * Note that @mm must be cleared to 0 before calling this function. + */ +void drm_mm_init(struct drm_mm *mm, u64 start, u64 size) +{ + DRM_MM_BUG_ON(start + size <= start); + + mm->color_adjust = NULL; + + INIT_LIST_HEAD(&mm->hole_stack); + mm->interval_tree = RB_ROOT_CACHED; + mm->holes_size = RB_ROOT_CACHED; + mm->holes_addr = RB_ROOT; + + /* Clever trick to avoid a special case in the free hole tracking. */ + INIT_LIST_HEAD(&mm->head_node.node_list); + mm->head_node.allocated = false; + mm->head_node.mm = mm; + mm->head_node.start = start + size; + mm->head_node.size = -size; + add_hole(&mm->head_node); + + mm->scan_active = 0; +} +EXPORT_SYMBOL(drm_mm_init); + +/** + * drm_mm_takedown - clean up a drm_mm allocator + * @mm: drm_mm allocator to clean up + * + * Note that it is a bug to call this function on an allocator which is not + * clean. + */ +void drm_mm_takedown(struct drm_mm *mm) +{ + if (WARN(!drm_mm_clean(mm), + "Memory manager not clean during takedown.\n")) + show_leaks(mm); +} +EXPORT_SYMBOL(drm_mm_takedown); + +static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry) +{ + u64 start, size; + + size = entry->hole_size; + if (size) { + start = drm_mm_hole_node_start(entry); + drm_printf(p, "%#018llx-%#018llx: %llu: free\n", + start, start + size, size); + } + + return size; +} +/** + * drm_mm_print - print allocator state + * @mm: drm_mm allocator to print + * @p: DRM printer to use + */ +void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p) +{ + const struct drm_mm_node *entry; + u64 total_used = 0, total_free = 0, total = 0; + + total_free += drm_mm_dump_hole(p, &mm->head_node); + + drm_mm_for_each_node(entry, mm) { + drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start, + entry->start + entry->size, entry->size); + total_used += entry->size; + total_free += drm_mm_dump_hole(p, entry); + } + total = total_free + total_used; + + drm_printf(p, "total: %llu, used %llu free %llu\n", total, + total_used, total_free); +} +EXPORT_SYMBOL(drm_mm_print); Index: sys/dev/drm/core/drm_mode_config.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_mode_config.c @@ -0,0 +1,508 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include + +#include +#include +#include +#include +#include + +#include "drm_crtc_internal.h" +#include "drm_internal.h" + +int drm_modeset_register_all(struct drm_device *dev) +{ + int ret; + + ret = drm_plane_register_all(dev); + if (ret) + goto err_plane; + + ret = drm_crtc_register_all(dev); + if (ret) + goto err_crtc; + + ret = drm_encoder_register_all(dev); + if (ret) + goto err_encoder; + + ret = drm_connector_register_all(dev); + if (ret) + goto err_connector; + + return 0; + +err_connector: + drm_encoder_unregister_all(dev); +err_encoder: + drm_crtc_unregister_all(dev); +err_crtc: + drm_plane_unregister_all(dev); +err_plane: + return ret; +} + +void drm_modeset_unregister_all(struct drm_device *dev) +{ + drm_connector_unregister_all(dev); + drm_encoder_unregister_all(dev); + drm_crtc_unregister_all(dev); + drm_plane_unregister_all(dev); +} + +/** + * drm_mode_getresources - get graphics configuration + * @dev: drm device for the ioctl + * @data: data pointer for the ioctl + * @file_priv: drm file for the ioctl call + * + * Construct a set of configuration description structures and return + * them to the user, including CRTC, connector and framebuffer configuration. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_getresources(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_mode_card_res *card_res = data; + struct drm_framebuffer *fb; + struct drm_connector *connector; + struct drm_crtc *crtc; + struct drm_encoder *encoder; + int count, ret = 0; + uint32_t __user *fb_id; + uint32_t __user *crtc_id; + uint32_t __user *connector_id; + uint32_t __user *encoder_id; + struct drm_connector_list_iter conn_iter; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EOPNOTSUPP; + + mutex_lock(&file_priv->fbs_lock); + count = 0; + fb_id = u64_to_user_ptr(card_res->fb_id_ptr); + list_for_each_entry(fb, &file_priv->fbs, filp_head) { + if (count < card_res->count_fbs && + put_user(fb->base.id, fb_id + count)) { + mutex_unlock(&file_priv->fbs_lock); + return -EFAULT; + } + count++; + } + card_res->count_fbs = count; + mutex_unlock(&file_priv->fbs_lock); + + card_res->max_height = dev->mode_config.max_height; + card_res->min_height = dev->mode_config.min_height; + card_res->max_width = dev->mode_config.max_width; + card_res->min_width = dev->mode_config.min_width; + + count = 0; + crtc_id = u64_to_user_ptr(card_res->crtc_id_ptr); + drm_for_each_crtc(crtc, dev) { + if (drm_lease_held(file_priv, crtc->base.id)) { + if (count < card_res->count_crtcs && + put_user(crtc->base.id, crtc_id + count)) + return -EFAULT; + count++; + } + } + card_res->count_crtcs = count; + + count = 0; + encoder_id = u64_to_user_ptr(card_res->encoder_id_ptr); + drm_for_each_encoder(encoder, dev) { + if (count < card_res->count_encoders && + put_user(encoder->base.id, encoder_id + count)) + return -EFAULT; + count++; + } + card_res->count_encoders = count; + + drm_connector_list_iter_begin(dev, &conn_iter); + count = 0; + connector_id = u64_to_user_ptr(card_res->connector_id_ptr); + drm_for_each_connector_iter(connector, &conn_iter) { + /* only expose writeback connectors if userspace understands them */ + if (!file_priv->writeback_connectors && + (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)) + continue; + + if (drm_lease_held(file_priv, connector->base.id)) { + if (count < card_res->count_connectors && + put_user(connector->base.id, connector_id + count)) { + drm_connector_list_iter_end(&conn_iter); + return -EFAULT; + } + count++; + } + } + card_res->count_connectors = count; + drm_connector_list_iter_end(&conn_iter); + + return ret; +} + +/** + * drm_mode_config_reset - call ->reset callbacks + * @dev: drm device + * + * This functions calls all the crtc's, encoder's and connector's ->reset + * callback. Drivers can use this in e.g. their driver load or resume code to + * reset hardware and software state. + */ +void drm_mode_config_reset(struct drm_device *dev) +{ + struct drm_crtc *crtc; + struct drm_plane *plane; + struct drm_encoder *encoder; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + + drm_for_each_plane(plane, dev) + if (plane->funcs->reset) + plane->funcs->reset(plane); + + drm_for_each_crtc(crtc, dev) + if (crtc->funcs->reset) + crtc->funcs->reset(crtc); + + drm_for_each_encoder(encoder, dev) + if (encoder->funcs->reset) + encoder->funcs->reset(encoder); + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) + if (connector->funcs->reset) + connector->funcs->reset(connector); + drm_connector_list_iter_end(&conn_iter); +} +EXPORT_SYMBOL(drm_mode_config_reset); + +/* + * Global properties + */ +static const struct drm_prop_enum_list drm_plane_type_enum_list[] = { + { DRM_PLANE_TYPE_OVERLAY, "Overlay" }, + { DRM_PLANE_TYPE_PRIMARY, "Primary" }, + { DRM_PLANE_TYPE_CURSOR, "Cursor" }, +}; + +static int drm_mode_create_standard_properties(struct drm_device *dev) +{ + struct drm_property *prop; + int ret; + + ret = drm_connector_create_standard_properties(dev); + if (ret) + return ret; + + prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, + "type", drm_plane_type_enum_list, + ARRAY_SIZE(drm_plane_type_enum_list)); + if (!prop) + return -ENOMEM; + dev->mode_config.plane_type_property = prop; + + prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, + "SRC_X", 0, UINT_MAX); + if (!prop) + return -ENOMEM; + dev->mode_config.prop_src_x = prop; + + prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, + "SRC_Y", 0, UINT_MAX); + if (!prop) + return -ENOMEM; + dev->mode_config.prop_src_y = prop; + + prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, + "SRC_W", 0, UINT_MAX); + if (!prop) + return -ENOMEM; + dev->mode_config.prop_src_w = prop; + + prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, + "SRC_H", 0, UINT_MAX); + if (!prop) + return -ENOMEM; + dev->mode_config.prop_src_h = prop; + + prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC, + "CRTC_X", INT_MIN, INT_MAX); + if (!prop) + return -ENOMEM; + dev->mode_config.prop_crtc_x = prop; + + prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC, + "CRTC_Y", INT_MIN, INT_MAX); + if (!prop) + return -ENOMEM; + dev->mode_config.prop_crtc_y = prop; + + prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, + "CRTC_W", 0, INT_MAX); + if (!prop) + return -ENOMEM; + dev->mode_config.prop_crtc_w = prop; + + prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, + "CRTC_H", 0, INT_MAX); + if (!prop) + return -ENOMEM; + dev->mode_config.prop_crtc_h = prop; + + prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC, + "FB_ID", DRM_MODE_OBJECT_FB); + if (!prop) + return -ENOMEM; + dev->mode_config.prop_fb_id = prop; + + prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC, + "IN_FENCE_FD", -1, INT_MAX); + if (!prop) + return -ENOMEM; + dev->mode_config.prop_in_fence_fd = prop; + + prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, + "OUT_FENCE_PTR", 0, U64_MAX); + if (!prop) + return -ENOMEM; + dev->mode_config.prop_out_fence_ptr = prop; + + prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC, + "CRTC_ID", DRM_MODE_OBJECT_CRTC); + if (!prop) + return -ENOMEM; + dev->mode_config.prop_crtc_id = prop; + + prop = drm_property_create(dev, + DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB, + "FB_DAMAGE_CLIPS", 0); + if (!prop) + return -ENOMEM; + dev->mode_config.prop_fb_damage_clips = prop; + + prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC, + "ACTIVE"); + if (!prop) + return -ENOMEM; + dev->mode_config.prop_active = prop; + + prop = drm_property_create(dev, + DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB, + "MODE_ID", 0); + if (!prop) + return -ENOMEM; + dev->mode_config.prop_mode_id = prop; + + prop = drm_property_create_bool(dev, 0, + "VRR_ENABLED"); + if (!prop) + return -ENOMEM; + dev->mode_config.prop_vrr_enabled = prop; + + prop = drm_property_create(dev, + DRM_MODE_PROP_BLOB, + "DEGAMMA_LUT", 0); + if (!prop) + return -ENOMEM; + dev->mode_config.degamma_lut_property = prop; + + prop = drm_property_create_range(dev, + DRM_MODE_PROP_IMMUTABLE, + "DEGAMMA_LUT_SIZE", 0, UINT_MAX); + if (!prop) + return -ENOMEM; + dev->mode_config.degamma_lut_size_property = prop; + + prop = drm_property_create(dev, + DRM_MODE_PROP_BLOB, + "CTM", 0); + if (!prop) + return -ENOMEM; + dev->mode_config.ctm_property = prop; + + prop = drm_property_create(dev, + DRM_MODE_PROP_BLOB, + "GAMMA_LUT", 0); + if (!prop) + return -ENOMEM; + dev->mode_config.gamma_lut_property = prop; + + prop = drm_property_create_range(dev, + DRM_MODE_PROP_IMMUTABLE, + "GAMMA_LUT_SIZE", 0, UINT_MAX); + if (!prop) + return -ENOMEM; + dev->mode_config.gamma_lut_size_property = prop; + + prop = drm_property_create(dev, + DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_BLOB, + "IN_FORMATS", 0); + if (!prop) + return -ENOMEM; + dev->mode_config.modifiers_property = prop; + + return 0; +} + +/** + * drm_mode_config_init - initialize DRM mode_configuration structure + * @dev: DRM device + * + * Initialize @dev's mode_config structure, used for tracking the graphics + * configuration of @dev. + * + * Since this initializes the modeset locks, no locking is possible. Which is no + * problem, since this should happen single threaded at init time. It is the + * driver's problem to ensure this guarantee. + * + */ +void drm_mode_config_init(struct drm_device *dev) +{ + mutex_init(&dev->mode_config.mutex); + drm_modeset_lock_init(&dev->mode_config.connection_mutex); + mutex_init(&dev->mode_config.idr_mutex); + mutex_init(&dev->mode_config.fb_lock); + mutex_init(&dev->mode_config.blob_lock); + INIT_LIST_HEAD(&dev->mode_config.fb_list); + INIT_LIST_HEAD(&dev->mode_config.crtc_list); + INIT_LIST_HEAD(&dev->mode_config.connector_list); + INIT_LIST_HEAD(&dev->mode_config.encoder_list); + INIT_LIST_HEAD(&dev->mode_config.property_list); + INIT_LIST_HEAD(&dev->mode_config.property_blob_list); + INIT_LIST_HEAD(&dev->mode_config.plane_list); + INIT_LIST_HEAD(&dev->mode_config.privobj_list); + idr_init(&dev->mode_config.object_idr); + idr_init(&dev->mode_config.tile_idr); + ida_init(&dev->mode_config.connector_ida); + spin_lock_init(&dev->mode_config.connector_list_lock); + + init_llist_head(&dev->mode_config.connector_free_list); + INIT_WORK(&dev->mode_config.connector_free_work, drm_connector_free_work_fn); + + drm_mode_create_standard_properties(dev); + + /* Just to be sure */ + dev->mode_config.num_fb = 0; + dev->mode_config.num_connector = 0; + dev->mode_config.num_crtc = 0; + dev->mode_config.num_encoder = 0; + dev->mode_config.num_total_plane = 0; +} +EXPORT_SYMBOL(drm_mode_config_init); + +/** + * drm_mode_config_cleanup - free up DRM mode_config info + * @dev: DRM device + * + * Free up all the connectors and CRTCs associated with this DRM device, then + * free up the framebuffers and associated buffer objects. + * + * Note that since this /should/ happen single-threaded at driver/device + * teardown time, no locking is required. It's the driver's job to ensure that + * this guarantee actually holds true. + * + * FIXME: cleanup any dangling user buffer objects too + */ +void drm_mode_config_cleanup(struct drm_device *dev) +{ + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct drm_crtc *crtc, *ct; + struct drm_encoder *encoder, *enct; + struct drm_framebuffer *fb, *fbt; + struct drm_property *property, *pt; + struct drm_property_blob *blob, *bt; + struct drm_plane *plane, *plt; + + list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list, + head) { + encoder->funcs->destroy(encoder); + } + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + /* drm_connector_list_iter holds an full reference to the + * current connector itself, which means it is inherently safe + * against unreferencing the current connector - but not against + * deleting it right away. */ + drm_connector_put(connector); + } + drm_connector_list_iter_end(&conn_iter); + /* connector_iter drops references in a work item. */ + flush_work(&dev->mode_config.connector_free_work); + if (WARN_ON(!list_empty(&dev->mode_config.connector_list))) { + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) + DRM_ERROR("connector %s leaked!\n", connector->name); + drm_connector_list_iter_end(&conn_iter); + } + + list_for_each_entry_safe(property, pt, &dev->mode_config.property_list, + head) { + drm_property_destroy(dev, property); + } + + list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list, + head) { + plane->funcs->destroy(plane); + } + + list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) { + crtc->funcs->destroy(crtc); + } + + list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list, + head_global) { + drm_property_blob_put(blob); + } + + /* + * Single-threaded teardown context, so it's not required to grab the + * fb_lock to protect against concurrent fb_list access. Contrary, it + * would actually deadlock with the drm_framebuffer_cleanup function. + * + * Also, if there are any framebuffers left, that's a driver leak now, + * so politely WARN about this. + */ + WARN_ON(!list_empty(&dev->mode_config.fb_list)); + list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) { + struct drm_printer p = drm_debug_printer("[leaked fb]"); + drm_printf(&p, "framebuffer[%u]:\n", fb->base.id); + drm_framebuffer_print_info(&p, 1, fb); + drm_framebuffer_free(&fb->base.refcount); + } + + ida_destroy(&dev->mode_config.connector_ida); + idr_destroy(&dev->mode_config.tile_idr); + idr_destroy(&dev->mode_config.object_idr); + drm_modeset_lock_fini(&dev->mode_config.connection_mutex); +} +EXPORT_SYMBOL(drm_mode_config_cleanup); Index: sys/dev/drm/core/drm_mode_object.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_mode_object.c @@ -0,0 +1,540 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "drm_crtc_internal.h" + +/* + * Internal function to assign a slot in the object idr and optionally + * register the object into the idr. + */ +int __drm_mode_object_add(struct drm_device *dev, struct drm_mode_object *obj, + uint32_t obj_type, bool register_obj, + void (*obj_free_cb)(struct kref *kref)) +{ + int ret; + + WARN_ON(dev->registered && !obj_free_cb); + + mutex_lock(&dev->mode_config.idr_mutex); + ret = idr_alloc(&dev->mode_config.object_idr, register_obj ? obj : NULL, + 1, 0, GFP_KERNEL); + if (ret >= 0) { + /* + * Set up the object linking under the protection of the idr + * lock so that other users can't see inconsistent state. + */ + obj->id = ret; + obj->type = obj_type; + if (obj_free_cb) { + obj->free_cb = obj_free_cb; + kref_init(&obj->refcount); + } + } + mutex_unlock(&dev->mode_config.idr_mutex); + + return ret < 0 ? ret : 0; +} + +/** + * drm_mode_object_add - allocate a new modeset identifier + * @dev: DRM device + * @obj: object pointer, used to generate unique ID + * @obj_type: object type + * + * Create a unique identifier based on @ptr in @dev's identifier space. Used + * for tracking modes, CRTCs and connectors. + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_mode_object_add(struct drm_device *dev, + struct drm_mode_object *obj, uint32_t obj_type) +{ + return __drm_mode_object_add(dev, obj, obj_type, true, NULL); +} + +void drm_mode_object_register(struct drm_device *dev, + struct drm_mode_object *obj) +{ + mutex_lock(&dev->mode_config.idr_mutex); + idr_replace(&dev->mode_config.object_idr, obj, obj->id); + mutex_unlock(&dev->mode_config.idr_mutex); +} + +/** + * drm_mode_object_unregister - free a modeset identifer + * @dev: DRM device + * @object: object to free + * + * Free @id from @dev's unique identifier pool. + * This function can be called multiple times, and guards against + * multiple removals. + * These modeset identifiers are _not_ reference counted. Hence don't use this + * for reference counted modeset objects like framebuffers. + */ +void drm_mode_object_unregister(struct drm_device *dev, + struct drm_mode_object *object) +{ + WARN_ON(dev->registered && !object->free_cb); + + mutex_lock(&dev->mode_config.idr_mutex); + if (object->id) { + idr_remove(&dev->mode_config.object_idr, object->id); + object->id = 0; + } + mutex_unlock(&dev->mode_config.idr_mutex); +} + +/** + * drm_lease_required - check types which must be leased to be used + * @type: type of object + * + * Returns whether the provided type of drm_mode_object must + * be owned or leased to be used by a process. + */ +bool drm_mode_object_lease_required(uint32_t type) +{ + switch(type) { + case DRM_MODE_OBJECT_CRTC: + case DRM_MODE_OBJECT_CONNECTOR: + case DRM_MODE_OBJECT_PLANE: + return true; + default: + return false; + } +} + +struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id, uint32_t type) +{ + struct drm_mode_object *obj = NULL; + + mutex_lock(&dev->mode_config.idr_mutex); + obj = idr_find(&dev->mode_config.object_idr, id); + if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type) + obj = NULL; + if (obj && obj->id != id) + obj = NULL; + + if (obj && drm_mode_object_lease_required(obj->type) && + !_drm_lease_held(file_priv, obj->id)) + obj = NULL; + + if (obj && obj->free_cb) { + if (!kref_get_unless_zero(&obj->refcount)) + obj = NULL; + } + mutex_unlock(&dev->mode_config.idr_mutex); + + return obj; +} + +/** + * drm_mode_object_find - look up a drm object with static lifetime + * @dev: drm device + * @file_priv: drm file + * @id: id of the mode object + * @type: type of the mode object + * + * This function is used to look up a modeset object. It will acquire a + * reference for reference counted objects. This reference must be dropped again + * by callind drm_mode_object_put(). + */ +struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id, uint32_t type) +{ + struct drm_mode_object *obj = NULL; + + obj = __drm_mode_object_find(dev, file_priv, id, type); + return obj; +} +EXPORT_SYMBOL(drm_mode_object_find); + +/** + * drm_mode_object_put - release a mode object reference + * @obj: DRM mode object + * + * This function decrements the object's refcount if it is a refcounted modeset + * object. It is a no-op on any other object. This is used to drop references + * acquired with drm_mode_object_get(). + */ +void drm_mode_object_put(struct drm_mode_object *obj) +{ + if (obj->free_cb) { + DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, kref_read(&obj->refcount)); + kref_put(&obj->refcount, obj->free_cb); + } +} +EXPORT_SYMBOL(drm_mode_object_put); + +/** + * drm_mode_object_get - acquire a mode object reference + * @obj: DRM mode object + * + * This function increments the object's refcount if it is a refcounted modeset + * object. It is a no-op on any other object. References should be dropped again + * by calling drm_mode_object_put(). + */ +void drm_mode_object_get(struct drm_mode_object *obj) +{ + if (obj->free_cb) { + DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, kref_read(&obj->refcount)); + kref_get(&obj->refcount); + } +} +EXPORT_SYMBOL(drm_mode_object_get); + +/** + * drm_object_attach_property - attach a property to a modeset object + * @obj: drm modeset object + * @property: property to attach + * @init_val: initial value of the property + * + * This attaches the given property to the modeset object with the given initial + * value. Currently this function cannot fail since the properties are stored in + * a statically sized array. + */ +void drm_object_attach_property(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t init_val) +{ + int count = obj->properties->count; + + if (count == DRM_OBJECT_MAX_PROPERTY) { + WARN(1, "Failed to attach object property (type: 0x%x). Please " + "increase DRM_OBJECT_MAX_PROPERTY by 1 for each time " + "you see this message on the same object type.\n", + obj->type); + return; + } + + obj->properties->properties[count] = property; + obj->properties->values[count] = init_val; + obj->properties->count++; +} +EXPORT_SYMBOL(drm_object_attach_property); + +/** + * drm_object_property_set_value - set the value of a property + * @obj: drm mode object to set property value for + * @property: property to set + * @val: value the property should be set to + * + * This function sets a given property on a given object. This function only + * changes the software state of the property, it does not call into the + * driver's ->set_property callback. + * + * Note that atomic drivers should not have any need to call this, the core will + * ensure consistency of values reported back to userspace through the + * appropriate ->atomic_get_property callback. Only legacy drivers should call + * this function to update the tracked value (after clamping and other + * restrictions have been applied). + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_object_property_set_value(struct drm_mode_object *obj, + struct drm_property *property, uint64_t val) +{ + int i; + + WARN_ON(drm_drv_uses_atomic_modeset(property->dev) && + !(property->flags & DRM_MODE_PROP_IMMUTABLE)); + + for (i = 0; i < obj->properties->count; i++) { + if (obj->properties->properties[i] == property) { + obj->properties->values[i] = val; + return 0; + } + } + + return -EINVAL; +} +EXPORT_SYMBOL(drm_object_property_set_value); + +static int __drm_object_property_get_value(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t *val) +{ + int i; + + /* read-only properties bypass atomic mechanism and still store + * their value in obj->properties->values[].. mostly to avoid + * having to deal w/ EDID and similar props in atomic paths: + */ + if (drm_drv_uses_atomic_modeset(property->dev) && + !(property->flags & DRM_MODE_PROP_IMMUTABLE)) + return drm_atomic_get_property(obj, property, val); + + for (i = 0; i < obj->properties->count; i++) { + if (obj->properties->properties[i] == property) { + *val = obj->properties->values[i]; + return 0; + } + + } + + return -EINVAL; +} + +/** + * drm_object_property_get_value - retrieve the value of a property + * @obj: drm mode object to get property value from + * @property: property to retrieve + * @val: storage for the property value + * + * This function retrieves the softare state of the given property for the given + * property. Since there is no driver callback to retrieve the current property + * value this might be out of sync with the hardware, depending upon the driver + * and property. + * + * Atomic drivers should never call this function directly, the core will read + * out property values through the various ->atomic_get_property callbacks. + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_object_property_get_value(struct drm_mode_object *obj, + struct drm_property *property, uint64_t *val) +{ + WARN_ON(drm_drv_uses_atomic_modeset(property->dev)); + + return __drm_object_property_get_value(obj, property, val); +} +EXPORT_SYMBOL(drm_object_property_get_value); + +/* helper for getconnector and getproperties ioctls */ +int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic, + uint32_t __user *prop_ptr, + uint64_t __user *prop_values, + uint32_t *arg_count_props) +{ + int i, ret, count; + + for (i = 0, count = 0; i < obj->properties->count; i++) { + struct drm_property *prop = obj->properties->properties[i]; + uint64_t val; + + if ((prop->flags & DRM_MODE_PROP_ATOMIC) && !atomic) + continue; + + if (*arg_count_props > count) { + ret = __drm_object_property_get_value(obj, prop, &val); + if (ret) + return ret; + + if (put_user(prop->base.id, prop_ptr + count)) + return -EFAULT; + + if (put_user(val, prop_values + count)) + return -EFAULT; + } + + count++; + } + *arg_count_props = count; + + return 0; +} + +/** + * drm_mode_obj_get_properties_ioctl - get the current value of a object's property + * @dev: DRM device + * @data: ioctl data + * @file_priv: DRM file info + * + * This function retrieves the current value for an object's property. Compared + * to the connector specific ioctl this one is extended to also work on crtc and + * plane objects. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_mode_obj_get_properties *arg = data; + struct drm_mode_object *obj; + int ret = 0; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EOPNOTSUPP; + + drm_modeset_lock_all(dev); + + obj = drm_mode_object_find(dev, file_priv, arg->obj_id, arg->obj_type); + if (!obj) { + ret = -ENOENT; + goto out; + } + if (!obj->properties) { + ret = -EINVAL; + goto out_unref; + } + + ret = drm_mode_object_get_properties(obj, file_priv->atomic, + (uint32_t __user *)(unsigned long)(arg->props_ptr), + (uint64_t __user *)(unsigned long)(arg->prop_values_ptr), + &arg->count_props); + +out_unref: + drm_mode_object_put(obj); +out: + drm_modeset_unlock_all(dev); + return ret; +} + +struct drm_property *drm_mode_obj_find_prop_id(struct drm_mode_object *obj, + uint32_t prop_id) +{ + int i; + + for (i = 0; i < obj->properties->count; i++) + if (obj->properties->properties[i]->base.id == prop_id) + return obj->properties->properties[i]; + + return NULL; +} + +static int set_property_legacy(struct drm_mode_object *obj, + struct drm_property *prop, + uint64_t prop_value) +{ + struct drm_device *dev = prop->dev; + struct drm_mode_object *ref; + int ret = -EINVAL; + + if (!drm_property_change_valid_get(prop, prop_value, &ref)) + return -EINVAL; + + drm_modeset_lock_all(dev); + switch (obj->type) { + case DRM_MODE_OBJECT_CONNECTOR: + ret = drm_connector_set_obj_prop(obj, prop, prop_value); + break; + case DRM_MODE_OBJECT_CRTC: + ret = drm_mode_crtc_set_obj_prop(obj, prop, prop_value); + break; + case DRM_MODE_OBJECT_PLANE: + ret = drm_mode_plane_set_obj_prop(obj_to_plane(obj), + prop, prop_value); + break; + } + drm_property_change_valid_put(prop, ref); + drm_modeset_unlock_all(dev); + + return ret; +} + +static int set_property_atomic(struct drm_mode_object *obj, + struct drm_file *file_priv, + struct drm_property *prop, + uint64_t prop_value) +{ + struct drm_device *dev = prop->dev; + struct drm_atomic_state *state; + struct drm_modeset_acquire_ctx ctx; + int ret; + + state = drm_atomic_state_alloc(dev); + if (!state) + return -ENOMEM; + + drm_modeset_acquire_init(&ctx, 0); + state->acquire_ctx = &ctx; + +retry: + if (prop == state->dev->mode_config.dpms_property) { + if (obj->type != DRM_MODE_OBJECT_CONNECTOR) { + ret = -EINVAL; + goto out; + } + + ret = drm_atomic_connector_commit_dpms(state, + obj_to_connector(obj), + prop_value); + } else { + ret = drm_atomic_set_property(state, file_priv, obj, prop, prop_value); + if (ret) + goto out; + ret = drm_atomic_commit(state); + } +out: + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + drm_modeset_backoff(&ctx); + goto retry; + } + + drm_atomic_state_put(state); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + return ret; +} + +int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_mode_obj_set_property *arg = data; + struct drm_mode_object *arg_obj; + struct drm_property *property; + int ret = -EINVAL; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EOPNOTSUPP; + + arg_obj = drm_mode_object_find(dev, file_priv, arg->obj_id, arg->obj_type); + if (!arg_obj) + return -ENOENT; + + if (!arg_obj->properties) + goto out_unref; + + property = drm_mode_obj_find_prop_id(arg_obj, arg->prop_id); + if (!property) + goto out_unref; + + if (drm_drv_uses_atomic_modeset(property->dev)) + ret = set_property_atomic(arg_obj, file_priv, property, arg->value); + else + ret = set_property_legacy(arg_obj, property, arg->value); + +out_unref: + drm_mode_object_put(arg_obj); + return ret; +} Index: sys/dev/drm/core/drm_modes.c =================================================================== --- /dev/null +++ sys/dev/drm/core/drm_modes.c @@ -0,0 +1,2105 @@ +/* + * Copyright © 1997-2003 by The XFree86 Project, Inc. + * Copyright © 2007 Dave Airlie + * Copyright © 2007-2008 Intel Corporation + * Jesse Barnes + * Copyright 2005-2006 Luc Verhaegen + * Copyright (c) 2001, Andy Ritger aritger@nvidia.com + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Except as contained in this notice, the name of the copyright holder(s) + * and author(s) shall not be used in advertising or otherwise to promote + * the sale, use or other dealings in this Software without prior written + * authorization from the copyright holder(s) and author(s). + */ + +#include +#include +#include +#include + +#include