Index: head/sys/dev/drm2/drm_agpsupport.c =================================================================== --- head/sys/dev/drm2/drm_agpsupport.c (revision 312927) +++ head/sys/dev/drm2/drm_agpsupport.c (revision 312928) @@ -1,466 +1,468 @@ /** * \file drm_agpsupport.c * DRM support for AGP/GART backend * * \author Rickard E. (Rik) Faith * \author Gareth Hughes */ /* * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #include -#include #if __OS_HAS_AGP /** * Get AGP information. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a (output) drm_agp_info structure. * \return zero on success or a negative number on failure. * * Verifies the AGP device has been initialized and acquired and fills in the * drm_agp_info structure with the information in drm_agp_head::agp_info. */ int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info) { DRM_AGP_KERN *kern; if (!dev->agp || !dev->agp->acquired) return -EINVAL; kern = &dev->agp->agp_info; agp_get_info(dev->agp->bridge, kern); info->agp_version_major = 1; info->agp_version_minor = 0; info->mode = kern->ai_mode; info->aperture_base = kern->ai_aperture_base; info->aperture_size = kern->ai_aperture_size; info->memory_allowed = kern->ai_memory_allowed; info->memory_used = kern->ai_memory_used; info->id_vendor = kern->ai_devid & 0xffff; info->id_device = kern->ai_devid >> 16; return 0; } EXPORT_SYMBOL(drm_agp_info); int drm_agp_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_agp_info *info = data; int err; err = drm_agp_info(dev, info); if (err) return err; return 0; } /** * Acquire the AGP device. * * \param dev DRM device that is to acquire AGP. * \return zero on success or a negative number on failure. * * Verifies the AGP device hasn't been acquired before and calls * \c agp_backend_acquire. */ int drm_agp_acquire(struct drm_device * dev) { int retcode; if (!dev->agp) return -ENODEV; if (dev->agp->acquired) return -EBUSY; retcode = agp_acquire(dev->agp->bridge); if (retcode) return -retcode; dev->agp->acquired = 1; return 0; } EXPORT_SYMBOL(drm_agp_acquire); /** * Acquire the AGP device (ioctl). * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg user argument. * \return zero on success or a negative number on failure. * * Verifies the AGP device hasn't been acquired before and calls * \c agp_backend_acquire. */ int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { return drm_agp_acquire((struct drm_device *) file_priv->minor->dev); } /** * Release the AGP device. * * \param dev DRM device that is to release AGP. * \return zero on success or a negative number on failure. * * Verifies the AGP device has been acquired and calls \c agp_backend_release. */ int drm_agp_release(struct drm_device * dev) { if (!dev->agp || !dev->agp->acquired) return -EINVAL; agp_release(dev->agp->bridge); dev->agp->acquired = 0; return 0; } EXPORT_SYMBOL(drm_agp_release); int drm_agp_release_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { return drm_agp_release(dev); } /** * Enable the AGP bus. * * \param dev DRM device that has previously acquired AGP. * \param mode Requested AGP mode. * \return zero on success or a negative number on failure. * * Verifies the AGP device has been acquired but not enabled, and calls * \c agp_enable. */ int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode) { if (!dev->agp || !dev->agp->acquired) return -EINVAL; dev->agp->mode = mode.mode; agp_enable(dev->agp->bridge, mode.mode); dev->agp->enabled = 1; return 0; } EXPORT_SYMBOL(drm_agp_enable); int drm_agp_enable_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_agp_mode *mode = data; return drm_agp_enable(dev, *mode); } /** * Allocate AGP memory. * * \param inode device inode. * \param file_priv file private pointer. * \param cmd command. * \param arg pointer to a drm_agp_buffer structure. * \return zero on success or a negative number on failure. * * Verifies the AGP device is present and has been acquired, allocates the * memory via agp_allocate_memory() and creates a drm_agp_mem entry for it. */ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request) { struct drm_agp_mem *entry; DRM_AGP_MEM *memory; unsigned long pages; u32 type; struct agp_memory_info info; if (!dev->agp || !dev->agp->acquired) return -EINVAL; - if (!(entry = kzalloc(sizeof(*entry), GFP_KERNEL))) + if (!(entry = malloc(sizeof(*entry), DRM_MEM_AGPLISTS, M_NOWAIT))) return -ENOMEM; + memset(entry, 0, sizeof(*entry)); + pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; type = (u32) request->type; if (!(memory = agp_alloc_memory(dev->agp->bridge, type, pages << PAGE_SHIFT))) { - kfree(entry); + free(entry, DRM_MEM_AGPLISTS); return -ENOMEM; } entry->handle = (unsigned long)memory; entry->memory = memory; entry->bound = 0; entry->pages = pages; list_add(&entry->head, &dev->agp->memory); agp_memory_info(dev->agp->bridge, entry->memory, &info); request->handle = entry->handle; request->physical = info.ami_physical; return 0; } EXPORT_SYMBOL(drm_agp_alloc); int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_agp_buffer *request = data; return drm_agp_alloc(dev, request); } /** * Search for the AGP memory entry associated with a handle. * * \param dev DRM device structure. * \param handle AGP memory handle. * \return pointer to the drm_agp_mem structure associated with \p handle. * * Walks through drm_agp_head::memory until finding a matching handle. */ static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev, unsigned long handle) { struct drm_agp_mem *entry; list_for_each_entry(entry, &dev->agp->memory, head) { if (entry->handle == handle) return entry; } return NULL; } /** * Unbind AGP memory from the GATT (ioctl). * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_agp_binding structure. * \return zero on success or a negative number on failure. * * Verifies the AGP device is present and acquired, looks-up the AGP memory * entry and passes it to the unbind_agp() function. */ int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request) { struct drm_agp_mem *entry; int ret; if (!dev->agp || !dev->agp->acquired) return -EINVAL; if (!(entry = drm_agp_lookup_entry(dev, request->handle))) return -EINVAL; if (!entry->bound) return -EINVAL; ret = drm_unbind_agp(entry->memory); if (ret == 0) entry->bound = 0; return ret; } EXPORT_SYMBOL(drm_agp_unbind); int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_agp_binding *request = data; return drm_agp_unbind(dev, request); } /** * Bind AGP memory into the GATT (ioctl) * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_agp_binding structure. * \return zero on success or a negative number on failure. * * Verifies the AGP device is present and has been acquired and that no memory * is currently bound into the GATT. Looks-up the AGP memory entry and passes * it to bind_agp() function. */ int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request) { struct drm_agp_mem *entry; int retcode; int page; if (!dev->agp || !dev->agp->acquired) return -EINVAL; if (!(entry = drm_agp_lookup_entry(dev, request->handle))) return -EINVAL; if (entry->bound) return -EINVAL; page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE; if ((retcode = drm_bind_agp(entry->memory, page))) return retcode; entry->bound = dev->agp->base + (page << PAGE_SHIFT); DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n", dev->agp->base, entry->bound); return 0; } EXPORT_SYMBOL(drm_agp_bind); int drm_agp_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_agp_binding *request = data; return drm_agp_bind(dev, request); } /** * Free AGP memory (ioctl). * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_agp_buffer structure. * \return zero on success or a negative number on failure. * * Verifies the AGP device is present and has been acquired and looks up the * AGP memory entry. If the memory it's currently bound, unbind it via * unbind_agp(). Frees it via free_agp() as well as the entry itself * and unlinks from the doubly linked list it's inserted in. */ int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request) { struct drm_agp_mem *entry; if (!dev->agp || !dev->agp->acquired) return -EINVAL; if (!(entry = drm_agp_lookup_entry(dev, request->handle))) return -EINVAL; if (entry->bound) drm_unbind_agp(entry->memory); list_del(&entry->head); drm_free_agp(entry->memory, entry->pages); - kfree(entry); + free(entry, DRM_MEM_AGPLISTS); return 0; } EXPORT_SYMBOL(drm_agp_free); int drm_agp_free_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_agp_buffer *request = data; return drm_agp_free(dev, request); } /** * Initialize the AGP resources. * * \return pointer to a drm_agp_head structure. * * Gets the drm_agp_t structure which is made available by the agpgart module * via the inter_module_* functions. Creates and initializes a drm_agp_head * structure. */ struct drm_agp_head *drm_agp_init(struct drm_device *dev) { struct drm_agp_head *head = NULL; - if (!(head = kzalloc(sizeof(*head), GFP_KERNEL))) + if (!(head = malloc(sizeof(*head), DRM_MEM_AGPLISTS, M_NOWAIT))) return NULL; + memset((void *)head, 0, sizeof(*head)); head->bridge = agp_find_device(); if (!head->bridge) { - kfree(head); + free(head, DRM_MEM_AGPLISTS); return NULL; } else { agp_get_info(head->bridge, &head->agp_info); } INIT_LIST_HEAD(&head->memory); head->cant_use_aperture = 0; head->base = head->agp_info.ai_aperture_base; return head; } #ifdef FREEBSD_NOTYET /** * Binds a collection of pages into AGP memory at the given offset, returning * the AGP memory structure containing them. * * No reference is held on the pages during this time -- it is up to the * caller to handle that. */ DRM_AGP_MEM * drm_agp_bind_pages(struct drm_device *dev, struct page **pages, unsigned long num_pages, uint32_t gtt_offset, u32 type) { DRM_AGP_MEM *mem; int ret, i; DRM_DEBUG("\n"); mem = agp_allocate_memory(dev->agp->bridge, num_pages, type); if (mem == NULL) { DRM_ERROR("Failed to allocate memory for %ld pages\n", num_pages); return NULL; } for (i = 0; i < num_pages; i++) mem->pages[i] = pages[i]; mem->page_count = num_pages; mem->is_flushed = true; ret = agp_bind_memory(mem, gtt_offset / PAGE_SIZE); if (ret != 0) { DRM_ERROR("Failed to bind AGP memory: %d\n", ret); agp_free_memory(mem); return NULL; } return mem; } EXPORT_SYMBOL(drm_agp_bind_pages); #endif /* FREEBSD_NOTYET */ #endif /* __OS_HAS_AGP */ Index: head/sys/dev/drm2/drm_drv.c =================================================================== --- head/sys/dev/drm2/drm_drv.c (revision 312927) +++ head/sys/dev/drm2/drm_drv.c (revision 312928) @@ -1,512 +1,511 @@ /** * \file drm_drv.c * Generic driver template * * \author Rickard E. (Rik) Faith * \author Gareth Hughes * * To use this template, you must at least define the following (samples * given for the MGA driver): * * \code * #define DRIVER_AUTHOR "VA Linux Systems, Inc." * * #define DRIVER_NAME "mga" * #define DRIVER_DESC "Matrox G200/G400" * #define DRIVER_DATE "20001127" * * #define drm_x mga_##x * \endcode */ /* * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com * * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #include -#include #include #include #include struct sx drm_global_mutex; /** Ioctl table */ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER), DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER), DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH), /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */ DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), #if __OS_HAS_AGP DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), #endif DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED), #ifdef FREEBSD_NOTYET DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED), #endif /* FREEBSD_NOTYET */ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), }; #ifdef COMPAT_FREEBSD32 extern struct drm_ioctl_desc drm_compat_ioctls[]; #endif #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) /** * Take down the DRM device. * * \param dev DRM device structure. * * Frees every resource in \p dev. * * \sa drm_device */ int drm_lastclose(struct drm_device * dev) { #ifdef __linux__ struct drm_vma_entry *vma, *vma_temp; #endif DRM_DEBUG("\n"); if (dev->driver->lastclose) dev->driver->lastclose(dev); DRM_DEBUG("driver lastclose completed\n"); if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET)) drm_irq_uninstall(dev); DRM_LOCK(dev); /* Clear AGP information */ if (drm_core_has_AGP(dev) && dev->agp && !drm_core_check_feature(dev, DRIVER_MODESET)) { struct drm_agp_mem *entry, *tempe; /* Remove AGP resources, but leave dev->agp intact until drv_cleanup is called. */ list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) { if (entry->bound) drm_unbind_agp(entry->memory); drm_free_agp(entry->memory, entry->pages); - kfree(entry); + free(entry, DRM_MEM_AGPLISTS); } INIT_LIST_HEAD(&dev->agp->memory); if (dev->agp->acquired) drm_agp_release(dev); dev->agp->acquired = 0; dev->agp->enabled = 0; } if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg && !drm_core_check_feature(dev, DRIVER_MODESET)) { drm_sg_cleanup(dev->sg); dev->sg = NULL; } #ifdef __linux__ /* Clear vma list (only built for debugging) */ list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) { list_del(&vma->head); kfree(vma); } #endif if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && !drm_core_check_feature(dev, DRIVER_MODESET)) drm_dma_takedown(dev); DRM_UNLOCK(dev); DRM_DEBUG("lastclose completed\n"); return 0; } #ifdef __linux__ /** File operations structure */ static const struct file_operations drm_stub_fops = { .owner = THIS_MODULE, .open = drm_stub_open, .llseek = noop_llseek, }; #endif static int __init drm_core_init(void) { sx_init(&drm_global_mutex, "drm_global_mutex"); drm_global_init(); #if DRM_LINUX linux_ioctl_register_handler(&drm_handler); #endif /* DRM_LINUX */ DRM_INFO("Initialized %s %d.%d.%d %s\n", CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); return 0; } static void __exit drm_core_exit(void) { #if DRM_LINUX linux_ioctl_unregister_handler(&drm_handler); #endif /* DRM_LINUX */ drm_global_release(); sx_destroy(&drm_global_mutex); } SYSINIT(drm_register, SI_SUB_KLD, SI_ORDER_MIDDLE, drm_core_init, NULL); SYSUNINIT(drm_unregister, SI_SUB_KLD, SI_ORDER_MIDDLE, drm_core_exit, NULL); /** * Copy and IOCTL return string to user space */ static int drm_copy_field(char *buf, size_t *buf_len, const char *value) { int len; /* don't overflow userbuf */ len = strlen(value); if (len > *buf_len) len = *buf_len; /* let userspace know exact length of driver value (which could be * larger than the userspace-supplied buffer) */ *buf_len = strlen(value); /* finally, try filling in the userbuf */ if (len && buf) if (copy_to_user(buf, value, len)) return -EFAULT; return 0; } /** * Get version information * * \param inode device inode. * \param filp file pointer. * \param cmd command. * \param arg user argument, pointing to a drm_version structure. * \return zero on success or negative number on failure. * * Fills in the version information in \p arg. */ int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_version *version = data; int err; version->version_major = dev->driver->major; version->version_minor = dev->driver->minor; version->version_patchlevel = dev->driver->patchlevel; err = drm_copy_field(version->name, &version->name_len, dev->driver->name); if (!err) err = drm_copy_field(version->date, &version->date_len, dev->driver->date); if (!err) err = drm_copy_field(version->desc, &version->desc_len, dev->driver->desc); return err; } /** * Called whenever a process performs an ioctl on /dev/drm. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg user argument. * \return zero on success or negative number on failure. * * Looks up the ioctl function in the ::ioctls table, checking for root * previleges if so required, and dispatches to the respective function. */ int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags, DRM_STRUCTPROC *p) { struct drm_file *file_priv; struct drm_device *dev; struct drm_ioctl_desc *ioctl; drm_ioctl_t *func; unsigned int nr = DRM_IOCTL_NR(cmd); int retcode; dev = drm_get_device_from_kdev(kdev); retcode = devfs_get_cdevpriv((void **)&file_priv); if (retcode != 0) { DRM_ERROR("can't find authenticator\n"); return EINVAL; } retcode = -EINVAL; atomic_inc(&dev->ioctl_count); atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); ++file_priv->ioctl_count; DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n", DRM_CURRENTPID, cmd, nr, (long)file_priv->minor->device, file_priv->authenticated); switch (cmd) { case FIONBIO: case FIOASYNC: atomic_dec(&dev->ioctl_count); return 0; case FIOSETOWN: atomic_dec(&dev->ioctl_count); return fsetown(*(int *)data, &file_priv->minor->buf_sigio); case FIOGETOWN: atomic_dec(&dev->ioctl_count); *(int *) data = fgetown(&file_priv->minor->buf_sigio); return 0; } if (IOCGROUP(cmd) != DRM_IOCTL_BASE) { atomic_dec(&dev->ioctl_count); DRM_DEBUG("Bad ioctl group 0x%x\n", (int)IOCGROUP(cmd)); return EINVAL; } if ((nr >= DRM_CORE_IOCTL_COUNT) && ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) goto err_i1; #ifdef COMPAT_FREEBSD32 if (SV_CURPROC_FLAG(SV_ILP32) && (nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) && (nr < DRM_COMMAND_BASE + *dev->driver->num_compat_ioctls) && (dev->driver->compat_ioctls[nr - DRM_COMMAND_BASE].func != NULL)) { ioctl = &dev->driver->compat_ioctls[nr - DRM_COMMAND_BASE]; } else #endif if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; } else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) { #ifdef COMPAT_FREEBSD32 /* * Called whenever a 32-bit process running under a 64-bit * kernel performs an ioctl on /dev/drm. */ if (SV_CURPROC_FLAG(SV_ILP32) && drm_compat_ioctls[nr].func != NULL) /* * Assume that ioctls without an explicit compat * routine will just work. This may not always be a * good assumption, but it's better than always * failing. */ ioctl = &drm_compat_ioctls[nr]; else #endif ioctl = &drm_ioctls[nr]; } else goto err_i1; /* Do not trust userspace, use our own definition */ func = ioctl->func; /* is there a local override? */ #ifdef FREEBSD_NOTYET #ifdef COMPAT_FREEBSD32 if (SV_CURPROC_FLAG(SV_ILP32) && (nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_compat_ioctl) func = dev->driver->dma_compat_ioctl; else #endif #endif /* FREEBSD_NOTYET */ if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl) func = dev->driver->dma_ioctl; if (!func) { DRM_DEBUG("no function\n"); retcode = -EINVAL; } else if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(p)) || ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) || ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) || (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL))) { retcode = -EACCES; } else { if (ioctl->flags & DRM_UNLOCKED) retcode = func(dev, data, file_priv); else { sx_xlock(&drm_global_mutex); retcode = func(dev, data, file_priv); sx_xunlock(&drm_global_mutex); } } err_i1: atomic_dec(&dev->ioctl_count); if (retcode == -ERESTARTSYS) { /* * FIXME: Find where in i915 ERESTARTSYS should be * converted to EINTR. */ DRM_DEBUG("ret = %d -> %d\n", retcode, -EINTR); retcode = -EINTR; } if (retcode) DRM_DEBUG("ret = %d\n", retcode); if (retcode != 0 && (drm_debug & DRM_DEBUGBITS_FAILED_IOCTL) != 0) { printf( "pid %d, cmd 0x%02lx, nr 0x%02x, dev 0x%lx, auth %d, res %d\n", DRM_CURRENTPID, cmd, nr, (long)file_priv->minor->device, file_priv->authenticated, -retcode); } return -retcode; } EXPORT_SYMBOL(drm_ioctl); struct drm_local_map *drm_getsarea(struct drm_device *dev) { struct drm_map_list *entry; list_for_each_entry(entry, &dev->maplist, head) { if (entry->map && entry->map->type == _DRM_SHM && (entry->map->flags & _DRM_CONTAINS_LOCK)) { return entry->map; } } return NULL; } EXPORT_SYMBOL(drm_getsarea); Index: head/sys/dev/drm2/drm_os_freebsd.c =================================================================== --- head/sys/dev/drm2/drm_os_freebsd.c (revision 312927) +++ head/sys/dev/drm2/drm_os_freebsd.c (revision 312928) @@ -1,499 +1,499 @@ #include __FBSDID("$FreeBSD$"); #include #include #include devclass_t drm_devclass; MALLOC_DEFINE(DRM_MEM_DMA, "drm_dma", "DRM DMA Data Structures"); MALLOC_DEFINE(DRM_MEM_SAREA, "drm_sarea", "DRM SAREA Data Structures"); MALLOC_DEFINE(DRM_MEM_DRIVER, "drm_driver", "DRM DRIVER Data Structures"); MALLOC_DEFINE(DRM_MEM_MAGIC, "drm_magic", "DRM MAGIC Data Structures"); MALLOC_DEFINE(DRM_MEM_MINOR, "drm_minor", "DRM MINOR Data Structures"); MALLOC_DEFINE(DRM_MEM_IOCTLS, "drm_ioctls", "DRM IOCTL Data Structures"); MALLOC_DEFINE(DRM_MEM_MAPS, "drm_maps", "DRM MAP Data Structures"); MALLOC_DEFINE(DRM_MEM_BUFS, "drm_bufs", "DRM BUFFER Data Structures"); MALLOC_DEFINE(DRM_MEM_SEGS, "drm_segs", "DRM SEGMENTS Data Structures"); MALLOC_DEFINE(DRM_MEM_PAGES, "drm_pages", "DRM PAGES Data Structures"); MALLOC_DEFINE(DRM_MEM_FILES, "drm_files", "DRM FILE Data Structures"); MALLOC_DEFINE(DRM_MEM_QUEUES, "drm_queues", "DRM QUEUE Data Structures"); MALLOC_DEFINE(DRM_MEM_CMDS, "drm_cmds", "DRM COMMAND Data Structures"); MALLOC_DEFINE(DRM_MEM_MAPPINGS, "drm_mapping", "DRM MAPPING Data Structures"); MALLOC_DEFINE(DRM_MEM_BUFLISTS, "drm_buflists", "DRM BUFLISTS Data Structures"); +MALLOC_DEFINE(DRM_MEM_AGPLISTS, "drm_agplists", "DRM AGPLISTS Data Structures"); MALLOC_DEFINE(DRM_MEM_CTXBITMAP, "drm_ctxbitmap", "DRM CTXBITMAP Data Structures"); MALLOC_DEFINE(DRM_MEM_SGLISTS, "drm_sglists", "DRM SGLISTS Data Structures"); MALLOC_DEFINE(DRM_MEM_MM, "drm_sman", "DRM MEMORY MANAGER Data Structures"); MALLOC_DEFINE(DRM_MEM_HASHTAB, "drm_hashtab", "DRM HASHTABLE Data Structures"); MALLOC_DEFINE(DRM_MEM_KMS, "drm_kms", "DRM KMS Data Structures"); MALLOC_DEFINE(DRM_MEM_VBLANK, "drm_vblank", "DRM VBLANK Handling Data"); const char *fb_mode_option = NULL; #define NSEC_PER_USEC 1000L #define NSEC_PER_SEC 1000000000L int64_t timeval_to_ns(const struct timeval *tv) { return ((int64_t)tv->tv_sec * NSEC_PER_SEC) + tv->tv_usec * NSEC_PER_USEC; } struct timeval ns_to_timeval(const int64_t nsec) { struct timeval tv; long rem; if (nsec == 0) { tv.tv_sec = 0; tv.tv_usec = 0; return (tv); } tv.tv_sec = nsec / NSEC_PER_SEC; rem = nsec % NSEC_PER_SEC; if (rem < 0) { tv.tv_sec--; rem += NSEC_PER_SEC; } tv.tv_usec = rem / 1000; return (tv); } /* Copied from OFED. */ unsigned long drm_linux_timer_hz_mask; static void drm_linux_timer_init(void *arg) { /* * Compute an internal HZ value which can divide 2**32 to * avoid timer rounding problems when the tick value wraps * around 2**32: */ drm_linux_timer_hz_mask = 1; while (drm_linux_timer_hz_mask < (unsigned long)hz) drm_linux_timer_hz_mask *= 2; drm_linux_timer_hz_mask--; } SYSINIT(drm_linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, drm_linux_timer_init, NULL); static const drm_pci_id_list_t * drm_find_description(int vendor, int device, const drm_pci_id_list_t *idlist) { int i = 0; for (i = 0; idlist[i].vendor != 0; i++) { if ((idlist[i].vendor == vendor) && ((idlist[i].device == device) || (idlist[i].device == 0))) { return (&idlist[i]); } } return (NULL); } /* * drm_probe_helper: called by a driver at the end of its probe * method. */ int drm_probe_helper(device_t kdev, const drm_pci_id_list_t *idlist) { const drm_pci_id_list_t *id_entry; int vendor, device; vendor = pci_get_vendor(kdev); device = pci_get_device(kdev); if (pci_get_class(kdev) != PCIC_DISPLAY || (pci_get_subclass(kdev) != PCIS_DISPLAY_VGA && pci_get_subclass(kdev) != PCIS_DISPLAY_OTHER)) return (-ENXIO); id_entry = drm_find_description(vendor, device, idlist); if (id_entry != NULL) { if (device_get_desc(kdev) == NULL) { DRM_DEBUG("%s desc: %s\n", device_get_nameunit(kdev), id_entry->name); device_set_desc(kdev, id_entry->name); } return (0); } return (-ENXIO); } /* * drm_attach_helper: called by a driver at the end of its attach * method. */ int drm_attach_helper(device_t kdev, const drm_pci_id_list_t *idlist, struct drm_driver *driver) { struct drm_device *dev; int vendor, device; int ret; dev = device_get_softc(kdev); vendor = pci_get_vendor(kdev); device = pci_get_device(kdev); dev->id_entry = drm_find_description(vendor, device, idlist); ret = drm_get_pci_dev(kdev, dev, driver); return (ret); } int drm_generic_suspend(device_t kdev) { struct drm_device *dev; int error; DRM_DEBUG_KMS("Starting suspend\n"); dev = device_get_softc(kdev); if (dev->driver->suspend) { pm_message_t state; state.event = PM_EVENT_SUSPEND; error = -dev->driver->suspend(dev, state); if (error) goto out; } error = bus_generic_suspend(kdev); out: DRM_DEBUG_KMS("Finished suspend: %d\n", error); return error; } int drm_generic_resume(device_t kdev) { struct drm_device *dev; int error; DRM_DEBUG_KMS("Starting resume\n"); dev = device_get_softc(kdev); if (dev->driver->resume) { error = -dev->driver->resume(dev); if (error) goto out; } error = bus_generic_resume(kdev); out: DRM_DEBUG_KMS("Finished resume: %d\n", error); return error; } int drm_generic_detach(device_t kdev) { struct drm_device *dev; int i; dev = device_get_softc(kdev); drm_put_dev(dev); /* Clean up PCI resources allocated by drm_bufs.c. We're not really * worried about resource consumption while the DRM is inactive (between * lastclose and firstopen or unload) because these aren't actually * taking up KVA, just keeping the PCI resource allocated. */ for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) { if (dev->pcir[i] == NULL) continue; bus_release_resource(dev->dev, SYS_RES_MEMORY, dev->pcirid[i], dev->pcir[i]); dev->pcir[i] = NULL; } if (pci_disable_busmaster(dev->dev)) DRM_ERROR("Request to disable bus-master failed.\n"); return (0); } int drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx, struct sysctl_oid *top) { struct sysctl_oid *oid; snprintf(dev->busid_str, sizeof(dev->busid_str), "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus, dev->pci_slot, dev->pci_func); oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid", CTLFLAG_RD, dev->busid_str, 0, NULL); if (oid == NULL) return (-ENOMEM); dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0; oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL); if (oid == NULL) return (-ENOMEM); return (0); } static int drm_device_find_capability(struct drm_device *dev, int cap) { return (pci_find_cap(dev->dev, cap, NULL) == 0); } int drm_pci_device_is_agp(struct drm_device *dev) { if (dev->driver->device_is_agp != NULL) { int ret; /* device_is_agp returns a tristate, 0 = not AGP, 1 = definitely * AGP, 2 = fall back to PCI capability */ ret = (*dev->driver->device_is_agp)(dev); if (ret != DRM_MIGHT_BE_AGP) return ret; } return (drm_device_find_capability(dev, PCIY_AGP)); } int drm_pci_device_is_pcie(struct drm_device *dev) { return (drm_device_find_capability(dev, PCIY_EXPRESS)); } static bool dmi_found(const struct dmi_system_id *dsi) { char *hw_vendor, *hw_prod; int i, slot; bool res; hw_vendor = kern_getenv("smbios.planar.maker"); hw_prod = kern_getenv("smbios.planar.product"); res = true; for (i = 0; i < nitems(dsi->matches); i++) { slot = dsi->matches[i].slot; switch (slot) { case DMI_NONE: break; case DMI_SYS_VENDOR: case DMI_BOARD_VENDOR: if (hw_vendor != NULL && !strcmp(hw_vendor, dsi->matches[i].substr)) { break; } else { res = false; goto out; } case DMI_PRODUCT_NAME: case DMI_BOARD_NAME: if (hw_prod != NULL && !strcmp(hw_prod, dsi->matches[i].substr)) { break; } else { res = false; goto out; } default: res = false; goto out; } } out: freeenv(hw_vendor); freeenv(hw_prod); return (res); } bool dmi_check_system(const struct dmi_system_id *sysid) { const struct dmi_system_id *dsi; bool res; for (res = false, dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) { if (dmi_found(dsi)) { res = true; if (dsi->callback != NULL && dsi->callback(dsi)) break; } } return (res); } #if __OS_HAS_MTRR int drm_mtrr_add(unsigned long offset, unsigned long size, unsigned int flags) { int act; struct mem_range_desc mrdesc; mrdesc.mr_base = offset; mrdesc.mr_len = size; mrdesc.mr_flags = flags; act = MEMRANGE_SET_UPDATE; strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner)); return (-mem_range_attr_set(&mrdesc, &act)); } int drm_mtrr_del(int handle __unused, unsigned long offset, unsigned long size, unsigned int flags) { int act; struct mem_range_desc mrdesc; mrdesc.mr_base = offset; mrdesc.mr_len = size; mrdesc.mr_flags = flags; act = MEMRANGE_SET_REMOVE; strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner)); return (-mem_range_attr_set(&mrdesc, &act)); } #endif void drm_clflush_pages(vm_page_t *pages, unsigned long num_pages) { #if defined(__i386__) || defined(__amd64__) pmap_invalidate_cache_pages(pages, num_pages); #else DRM_ERROR("drm_clflush_pages not implemented on this architecture"); #endif } void drm_clflush_virt_range(char *addr, unsigned long length) { #if defined(__i386__) || defined(__amd64__) pmap_invalidate_cache_range((vm_offset_t)addr, (vm_offset_t)addr + length, TRUE); #else DRM_ERROR("drm_clflush_virt_range not implemented on this architecture"); #endif } void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize, char *linebuf, size_t linebuflen, bool ascii __unused) { int i, j, c; i = j = 0; while (i < len && j <= linebuflen) { c = ((const char *)buf)[i]; if (i != 0) { if (i % rowsize == 0) { /* Newline required. */ sprintf(linebuf + j, "\n"); ++j; } else if (i % groupsize == 0) { /* Space required. */ sprintf(linebuf + j, " "); ++j; } } if (j > linebuflen - 4) break; sprintf(linebuf + j, "%02X", c); j += 2; ++i; } if (j <= linebuflen) sprintf(linebuf + j, "\n"); } #if DRM_LINUX #include MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1); #define LINUX_IOCTL_DRM_MIN 0x6400 #define LINUX_IOCTL_DRM_MAX 0x64ff static linux_ioctl_function_t drm_linux_ioctl; static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl, LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX}; /* The bits for in/out are switched on Linux */ #define LINUX_IOC_IN IOC_OUT #define LINUX_IOC_OUT IOC_IN static int drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args) { int error; int cmd = args->cmd; args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT); if (cmd & LINUX_IOC_IN) args->cmd |= IOC_IN; if (cmd & LINUX_IOC_OUT) args->cmd |= IOC_OUT; error = ioctl(p, (struct ioctl_args *)args); return error; } #endif /* DRM_LINUX */ static int drm_modevent(module_t mod, int type, void *data) { switch (type) { case MOD_LOAD: TUNABLE_INT_FETCH("drm.debug", &drm_debug); TUNABLE_INT_FETCH("drm.notyet", &drm_notyet); break; } return (0); } static moduledata_t drm_mod = { "drmn", drm_modevent, 0 }; DECLARE_MODULE(drmn, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); MODULE_VERSION(drmn, 1); MODULE_DEPEND(drmn, agp, 1, 1, 1); MODULE_DEPEND(drmn, pci, 1, 1, 1); MODULE_DEPEND(drmn, mem, 1, 1, 1); -MODULE_DEPEND(drmn, linuxkpi, 1, 1, 1); MODULE_DEPEND(drmn, iicbus, 1, 1, 1); Index: head/sys/dev/drm2/drm_os_freebsd.h =================================================================== --- head/sys/dev/drm2/drm_os_freebsd.h (revision 312927) +++ head/sys/dev/drm2/drm_os_freebsd.h (revision 312928) @@ -1,703 +1,704 @@ /** * \file drm_os_freebsd.h * OS abstraction macros. */ #include __FBSDID("$FreeBSD$"); #ifndef _DRM_OS_FREEBSD_H_ #define _DRM_OS_FREEBSD_H_ #include #include #if _BYTE_ORDER == _BIG_ENDIAN #define __BIG_ENDIAN 4321 #else #define __LITTLE_ENDIAN 1234 #endif #ifdef __LP64__ #define BITS_PER_LONG 64 #else #define BITS_PER_LONG 32 #endif #ifndef __user #define __user #endif #ifndef __iomem #define __iomem #endif #ifndef __always_unused #define __always_unused #endif #ifndef __must_check #define __must_check #endif #ifndef __force #define __force #endif #ifndef uninitialized_var #define uninitialized_var(x) x #endif #define cpu_to_le16(x) htole16(x) #define le16_to_cpu(x) le16toh(x) #define cpu_to_le32(x) htole32(x) #define le32_to_cpu(x) le32toh(x) #define cpu_to_be16(x) htobe16(x) #define be16_to_cpu(x) be16toh(x) #define cpu_to_be32(x) htobe32(x) #define be32_to_cpu(x) be32toh(x) #define be32_to_cpup(x) be32toh(*x) typedef vm_paddr_t dma_addr_t; typedef vm_paddr_t resource_size_t; #define wait_queue_head_t atomic_t typedef uint64_t u64; typedef uint32_t u32; typedef uint16_t u16; typedef uint8_t u8; typedef int64_t s64; typedef int32_t s32; typedef int16_t s16; typedef int8_t s8; typedef uint16_t __le16; typedef uint32_t __le32; typedef uint64_t __le64; typedef uint16_t __be16; typedef uint32_t __be32; typedef uint64_t __be64; #define DRM_IRQ_ARGS void *arg typedef void irqreturn_t; #define IRQ_HANDLED /* nothing */ #define IRQ_NONE /* nothing */ #define __init #define __exit #define BUILD_BUG_ON(x) CTASSERT(!(x)) #define BUILD_BUG_ON_NOT_POWER_OF_2(x) #ifndef WARN #define WARN(condition, format, ...) ({ \ int __ret_warn_on = !!(condition); \ if (unlikely(__ret_warn_on)) \ DRM_ERROR(format, ##__VA_ARGS__); \ unlikely(__ret_warn_on); \ }) #endif #define WARN_ONCE(condition, format, ...) \ WARN(condition, format, ##__VA_ARGS__) #define WARN_ON(cond) WARN(cond, "WARN ON: " #cond) #define WARN_ON_SMP(cond) WARN_ON(cond) #define BUG() panic("BUG") #define BUG_ON(cond) KASSERT(!(cond), ("BUG ON: " #cond " -> 0x%jx", (uintmax_t)(cond))) #define unlikely(x) __builtin_expect(!!(x), 0) #define likely(x) __builtin_expect(!!(x), 1) #define container_of(ptr, type, member) ({ \ __typeof( ((type *)0)->member ) *__mptr = (ptr); \ (type *)( (char *)__mptr - offsetof(type,member) );}) #define KHZ2PICOS(a) (1000000000UL/(a)) #define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) #define HZ hz #define DRM_HZ hz #define DRM_CURRENTPID curthread->td_proc->p_pid #define DRM_SUSER(p) (priv_check(p, PRIV_DRIVER) == 0) #define udelay(usecs) DELAY(usecs) #define mdelay(msecs) do { int loops = (msecs); \ while (loops--) DELAY(1000); \ } while (0) #define DRM_UDELAY(udelay) DELAY(udelay) #define drm_msleep(x, msg) pause((msg), ((int64_t)(x)) * hz / 1000) #define DRM_MSLEEP(msecs) drm_msleep((msecs), "drm_msleep") #define get_seconds() time_second #define ioread8(addr) *(volatile uint8_t *)((char *)addr) #define ioread16(addr) *(volatile uint16_t *)((char *)addr) #define ioread32(addr) *(volatile uint32_t *)((char *)addr) #define iowrite8(data, addr) *(volatile uint8_t *)((char *)addr) = data; #define iowrite16(data, addr) *(volatile uint16_t *)((char *)addr) = data; #define iowrite32(data, addr) *(volatile uint32_t *)((char *)addr) = data; #define DRM_READ8(map, offset) \ *(volatile u_int8_t *)(((vm_offset_t)(map)->handle) + \ (vm_offset_t)(offset)) #define DRM_READ16(map, offset) \ le16toh(*(volatile u_int16_t *)(((vm_offset_t)(map)->handle) + \ (vm_offset_t)(offset))) #define DRM_READ32(map, offset) \ le32toh(*(volatile u_int32_t *)(((vm_offset_t)(map)->handle) + \ (vm_offset_t)(offset))) #define DRM_READ64(map, offset) \ le64toh(*(volatile u_int64_t *)(((vm_offset_t)(map)->handle) + \ (vm_offset_t)(offset))) #define DRM_WRITE8(map, offset, val) \ *(volatile u_int8_t *)(((vm_offset_t)(map)->handle) + \ (vm_offset_t)(offset)) = val #define DRM_WRITE16(map, offset, val) \ *(volatile u_int16_t *)(((vm_offset_t)(map)->handle) + \ (vm_offset_t)(offset)) = htole16(val) #define DRM_WRITE32(map, offset, val) \ *(volatile u_int32_t *)(((vm_offset_t)(map)->handle) + \ (vm_offset_t)(offset)) = htole32(val) #define DRM_WRITE64(map, offset, val) \ *(volatile u_int64_t *)(((vm_offset_t)(map)->handle) + \ (vm_offset_t)(offset)) = htole64(val) /* DRM_READMEMORYBARRIER() prevents reordering of reads. * DRM_WRITEMEMORYBARRIER() prevents reordering of writes. * DRM_MEMORYBARRIER() prevents reordering of reads and writes. */ #define DRM_READMEMORYBARRIER() rmb() #define DRM_WRITEMEMORYBARRIER() wmb() #define DRM_MEMORYBARRIER() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() #define smp_mb__before_atomic_inc() mb() #define smp_mb__after_atomic_inc() mb() #define barrier() __compiler_membar() #define do_div(a, b) ((a) /= (b)) #define div64_u64(a, b) ((a) / (b)) #define lower_32_bits(n) ((u32)(n)) #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) #define __set_bit(n, s) set_bit((n), (s)) #define __clear_bit(n, s) clear_bit((n), (s)) #define min_t(type, x, y) ({ \ type __min1 = (x); \ type __min2 = (y); \ __min1 < __min2 ? __min1 : __min2; }) #define max_t(type, x, y) ({ \ type __max1 = (x); \ type __max2 = (y); \ __max1 > __max2 ? __max1 : __max2; }) #define memset_io(a, b, c) memset((a), (b), (c)) #define memcpy_fromio(a, b, c) memcpy((a), (b), (c)) #define memcpy_toio(a, b, c) memcpy((a), (b), (c)) #define VERIFY_READ VM_PROT_READ #define VERIFY_WRITE VM_PROT_WRITE #define access_ok(prot, p, l) useracc((p), (l), (prot)) /* XXXKIB what is the right code for the FreeBSD ? */ /* kib@ used ENXIO here -- dumbbell@ */ #define EREMOTEIO EIO #define ERESTARTSYS 512 /* Same value as Linux. */ #define KTR_DRM KTR_DEV #define KTR_DRM_REG KTR_SPARE3 #define DRM_AGP_KERN struct agp_info #define DRM_AGP_MEM void #define PCI_VENDOR_ID_APPLE 0x106b #define PCI_VENDOR_ID_ASUSTEK 0x1043 #define PCI_VENDOR_ID_ATI 0x1002 #define PCI_VENDOR_ID_DELL 0x1028 #define PCI_VENDOR_ID_HP 0x103c #define PCI_VENDOR_ID_IBM 0x1014 #define PCI_VENDOR_ID_INTEL 0x8086 #define PCI_VENDOR_ID_SERVERWORKS 0x1166 #define PCI_VENDOR_ID_SONY 0x104d #define PCI_VENDOR_ID_VIA 0x1106 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) #define DIV_ROUND_CLOSEST(n,d) (((n) + (d) / 2) / (d)) #define div_u64(n, d) ((n) / (d)) #define hweight32(i) bitcount32(i) static inline unsigned long roundup_pow_of_two(unsigned long x) { return (1UL << flsl(x - 1)); } /** * ror32 - rotate a 32-bit value right * @word: value to rotate * @shift: bits to roll * * Source: include/linux/bitops.h */ static inline uint32_t ror32(uint32_t word, unsigned int shift) { return (word >> shift) | (word << (32 - shift)); } #define IS_ALIGNED(x, y) (((x) & ((y) - 1)) == 0) #define round_down(x, y) rounddown2((x), (y)) #define round_up(x, y) roundup2((x), (y)) #define get_unaligned(ptr) \ ({ __typeof__(*(ptr)) __tmp; \ memcpy(&__tmp, (ptr), sizeof(*(ptr))); __tmp; }) #if _BYTE_ORDER == _LITTLE_ENDIAN /* Taken from linux/include/linux/unaligned/le_struct.h. */ struct __una_u32 { u32 x; } __packed; static inline u32 __get_unaligned_cpu32(const void *p) { const struct __una_u32 *ptr = (const struct __una_u32 *)p; return (ptr->x); } static inline u32 get_unaligned_le32(const void *p) { return (__get_unaligned_cpu32((const u8 *)p)); } #else /* Taken from linux/include/linux/unaligned/le_byteshift.h. */ static inline u32 __get_unaligned_le32(const u8 *p) { return (p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24); } static inline u32 get_unaligned_le32(const void *p) { return (__get_unaligned_le32((const u8 *)p)); } #endif static inline unsigned long ilog2(unsigned long x) { return (flsl(x) - 1); } static inline int64_t abs64(int64_t x) { return (x < 0 ? -x : x); } int64_t timeval_to_ns(const struct timeval *tv); struct timeval ns_to_timeval(const int64_t nsec); #define PAGE_ALIGN(addr) round_page(addr) #define page_to_phys(x) VM_PAGE_TO_PHYS(x) #define offset_in_page(x) ((x) & PAGE_MASK) #define drm_get_device_from_kdev(_kdev) (((struct drm_minor *)(_kdev)->si_drv1)->dev) #define DRM_IOC_VOID IOC_VOID #define DRM_IOC_READ IOC_OUT #define DRM_IOC_WRITE IOC_IN #define DRM_IOC_READWRITE IOC_INOUT #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) static inline long __copy_to_user(void __user *to, const void *from, unsigned long n) { return (copyout(from, to, n) != 0 ? n : 0); } #define copy_to_user(to, from, n) __copy_to_user((to), (from), (n)) static inline int __put_user(size_t size, void *ptr, void *x) { size = copy_to_user(ptr, x, size); return (size ? -EFAULT : size); } #define put_user(x, ptr) __put_user(sizeof(*ptr), (ptr), &(x)) static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { return ((copyin(__DECONST(void *, from), to, n) != 0 ? n : 0)); } #define copy_from_user(to, from, n) __copy_from_user((to), (from), (n)) static inline int __get_user(size_t size, const void *ptr, void *x) { size = copy_from_user(x, ptr, size); return (size ? -EFAULT : size); } #define get_user(x, ptr) __get_user(sizeof(*ptr), (ptr), &(x)) static inline int __copy_to_user_inatomic(void __user *to, const void *from, unsigned n) { return (copyout_nofault(from, to, n) != 0 ? n : 0); } #define __copy_to_user_inatomic_nocache(to, from, n) \ __copy_to_user_inatomic((to), (from), (n)) static inline unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { /* * XXXKIB. Equivalent Linux function is implemented using * MOVNTI for aligned moves. For unaligned head and tail, * normal move is performed. As such, it is not incorrect, if * only somewhat slower, to use normal copyin. All uses * except shmem_pwrite_fast() have the destination mapped WC. */ return ((copyin_nofault(__DECONST(void *, from), to, n) != 0 ? n : 0)); } #define __copy_from_user_inatomic_nocache(to, from, n) \ __copy_from_user_inatomic((to), (from), (n)) static inline int fault_in_multipages_readable(const char __user *uaddr, int size) { char c; int ret = 0; const char __user *end = uaddr + size - 1; if (unlikely(size == 0)) return ret; while (uaddr <= end) { ret = -copyin(uaddr, &c, 1); if (ret != 0) return -EFAULT; uaddr += PAGE_SIZE; } /* Check whether the range spilled into the next page. */ if (((unsigned long)uaddr & ~PAGE_MASK) == ((unsigned long)end & ~PAGE_MASK)) { ret = -copyin(end, &c, 1); } return ret; } static inline int fault_in_multipages_writeable(char __user *uaddr, int size) { int ret = 0; char __user *end = uaddr + size - 1; if (unlikely(size == 0)) return ret; /* * Writing zeroes into userspace here is OK, because we know that if * the zero gets there, we'll be overwriting it. */ while (uaddr <= end) { ret = subyte(uaddr, 0); if (ret != 0) return -EFAULT; uaddr += PAGE_SIZE; } /* Check whether the range spilled into the next page. */ if (((unsigned long)uaddr & ~PAGE_MASK) == ((unsigned long)end & ~PAGE_MASK)) ret = subyte(end, 0); return ret; } enum __drm_capabilities { CAP_SYS_ADMIN }; static inline bool capable(enum __drm_capabilities cap) { switch (cap) { case CAP_SYS_ADMIN: return DRM_SUSER(curthread); default: panic("%s: unhandled capability: %0x", __func__, cap); return (false); } } #define to_user_ptr(x) ((void *)(uintptr_t)(x)) #define sigemptyset(set) SIGEMPTYSET(set) #define sigaddset(set, sig) SIGADDSET(set, sig) #define DRM_LOCK(dev) sx_xlock(&(dev)->dev_struct_lock) #define DRM_UNLOCK(dev) sx_xunlock(&(dev)->dev_struct_lock) extern unsigned long drm_linux_timer_hz_mask; #define jiffies ticks #define jiffies_to_msecs(x) (((int64_t)(x)) * 1000 / hz) #define msecs_to_jiffies(x) (((int64_t)(x)) * hz / 1000) #define timespec_to_jiffies(x) (((x)->tv_sec * 1000000 + (x)->tv_nsec) * hz / 1000000) #define time_after(a,b) ((long)(b) - (long)(a) < 0) #define time_after_eq(a,b) ((long)(b) - (long)(a) <= 0) #define round_jiffies(j) ((unsigned long)(((j) + drm_linux_timer_hz_mask) & ~drm_linux_timer_hz_mask)) #define round_jiffies_up(j) round_jiffies(j) /* TODO */ #define round_jiffies_up_relative(j) round_jiffies_up(j) /* TODO */ #define getrawmonotonic(ts) getnanouptime(ts) #define wake_up(queue) wakeup_one((void *)queue) #define wake_up_interruptible(queue) wakeup_one((void *)queue) #define wake_up_all(queue) wakeup((void *)queue) #define wake_up_interruptible_all(queue) wakeup((void *)queue) struct completion { unsigned int done; struct mtx lock; }; #define INIT_COMPLETION(c) ((c).done = 0); static inline void init_completion(struct completion *c) { mtx_init(&c->lock, "drmcompl", NULL, MTX_DEF); c->done = 0; } static inline void free_completion(struct completion *c) { mtx_destroy(&c->lock); } static inline void complete_all(struct completion *c) { mtx_lock(&c->lock); c->done++; mtx_unlock(&c->lock); wakeup(c); } static inline long wait_for_completion_interruptible_timeout(struct completion *c, unsigned long timeout) { unsigned long start_jiffies, elapsed_jiffies; bool timeout_expired = false, awakened = false; long ret = timeout; start_jiffies = ticks; mtx_lock(&c->lock); while (c->done == 0 && !timeout_expired) { ret = -msleep(c, &c->lock, PCATCH, "drmwco", timeout); switch(ret) { case -EWOULDBLOCK: timeout_expired = true; ret = 0; break; case -EINTR: case -ERESTART: ret = -ERESTARTSYS; break; case 0: awakened = true; break; } } mtx_unlock(&c->lock); if (awakened) { elapsed_jiffies = ticks - start_jiffies; ret = timeout > elapsed_jiffies ? timeout - elapsed_jiffies : 1; } return (ret); } MALLOC_DECLARE(DRM_MEM_DMA); MALLOC_DECLARE(DRM_MEM_SAREA); MALLOC_DECLARE(DRM_MEM_DRIVER); MALLOC_DECLARE(DRM_MEM_MAGIC); MALLOC_DECLARE(DRM_MEM_MINOR); MALLOC_DECLARE(DRM_MEM_IOCTLS); MALLOC_DECLARE(DRM_MEM_MAPS); MALLOC_DECLARE(DRM_MEM_BUFS); MALLOC_DECLARE(DRM_MEM_SEGS); MALLOC_DECLARE(DRM_MEM_PAGES); MALLOC_DECLARE(DRM_MEM_FILES); MALLOC_DECLARE(DRM_MEM_QUEUES); MALLOC_DECLARE(DRM_MEM_CMDS); MALLOC_DECLARE(DRM_MEM_MAPPINGS); MALLOC_DECLARE(DRM_MEM_BUFLISTS); +MALLOC_DECLARE(DRM_MEM_AGPLISTS); MALLOC_DECLARE(DRM_MEM_CTXBITMAP); MALLOC_DECLARE(DRM_MEM_SGLISTS); MALLOC_DECLARE(DRM_MEM_MM); MALLOC_DECLARE(DRM_MEM_HASHTAB); MALLOC_DECLARE(DRM_MEM_KMS); MALLOC_DECLARE(DRM_MEM_VBLANK); #define simple_strtol(a, b, c) strtol((a), (b), (c)) typedef struct drm_pci_id_list { int vendor; int device; long driver_private; char *name; } drm_pci_id_list_t; #ifdef __i386__ #define CONFIG_X86 1 #endif #ifdef __amd64__ #define CONFIG_X86 1 #define CONFIG_X86_64 1 #endif #ifdef __ia64__ #define CONFIG_IA64 1 #endif #if defined(__i386__) || defined(__amd64__) #define CONFIG_ACPI #define CONFIG_DRM_I915_KMS #undef CONFIG_INTEL_IOMMU #endif #ifdef COMPAT_FREEBSD32 #define CONFIG_COMPAT #endif #ifndef __arm__ #define CONFIG_AGP 1 #define CONFIG_MTRR 1 #endif #define CONFIG_FB 1 extern const char *fb_mode_option; #undef CONFIG_DEBUG_FS #undef CONFIG_VGA_CONSOLE #define EXPORT_SYMBOL(x) #define EXPORT_SYMBOL_GPL(x) #define MODULE_AUTHOR(author) #define MODULE_DESCRIPTION(desc) #define MODULE_LICENSE(license) #define MODULE_PARM_DESC(name, desc) #define MODULE_DEVICE_TABLE(name, list) #define module_param_named(name, var, type, perm) #define printk printf #define pr_err DRM_ERROR #define pr_warn DRM_WARNING #define pr_warn_once DRM_WARNING #define KERN_DEBUG "" /* I2C compatibility. */ #define I2C_M_RD IIC_M_RD #define I2C_M_WR IIC_M_WR #define I2C_M_NOSTART IIC_M_NOSTART struct fb_info * framebuffer_alloc(void); void framebuffer_release(struct fb_info *info); #define console_lock() #define console_unlock() #define console_trylock() true #define PM_EVENT_SUSPEND 0x0002 #define PM_EVENT_QUIESCE 0x0008 #define PM_EVENT_PRETHAW PM_EVENT_QUIESCE typedef struct pm_message { int event; } pm_message_t; static inline int pci_read_config_byte(device_t kdev, int where, u8 *val) { *val = (u8)pci_read_config(kdev, where, 1); return (0); } static inline int pci_write_config_byte(device_t kdev, int where, u8 val) { pci_write_config(kdev, where, val, 1); return (0); } static inline int pci_read_config_word(device_t kdev, int where, uint16_t *val) { *val = (uint16_t)pci_read_config(kdev, where, 2); return (0); } static inline int pci_write_config_word(device_t kdev, int where, uint16_t val) { pci_write_config(kdev, where, val, 2); return (0); } static inline int pci_read_config_dword(device_t kdev, int where, uint32_t *val) { *val = (uint32_t)pci_read_config(kdev, where, 4); return (0); } static inline int pci_write_config_dword(device_t kdev, int where, uint32_t val) { pci_write_config(kdev, where, val, 4); return (0); } static inline void on_each_cpu(void callback(void *data), void *data, int wait) { smp_rendezvous(NULL, callback, NULL, data); } void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize, char *linebuf, size_t linebuflen, bool ascii); #define KIB_NOTYET() \ do { \ if (drm_debug && drm_notyet) \ printf("NOTYET: %s at %s:%d\n", __func__, __FILE__, __LINE__); \ } while (0) #endif /* _DRM_OS_FREEBSD_H_ */ Index: head/sys/dev/drm2/drm_stub.c =================================================================== --- head/sys/dev/drm2/drm_stub.c (revision 312927) +++ head/sys/dev/drm2/drm_stub.c (revision 312928) @@ -1,502 +1,501 @@ /** * \file drm_stub.h * Stub support * * \author Rickard E. (Rik) Faith */ /* * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org * * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #include #include -#include #ifdef DRM_DEBUG_DEFAULT_ON unsigned int drm_debug = (DRM_DEBUGBITS_DEBUG | DRM_DEBUGBITS_KMS | DRM_DEBUGBITS_FAILED_IOCTL); #else unsigned int drm_debug = 0; /* 1 to enable debug output */ #endif EXPORT_SYMBOL(drm_debug); unsigned int drm_notyet = 0; unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ EXPORT_SYMBOL(drm_vblank_offdelay); unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ EXPORT_SYMBOL(drm_timestamp_precision); /* * Default to use monotonic timestamps for wait-for-vblank and page-flip * complete events. */ unsigned int drm_timestamp_monotonic = 1; MODULE_AUTHOR(CORE_AUTHOR); MODULE_DESCRIPTION(CORE_DESC); MODULE_LICENSE("GPL and additional rights"); MODULE_PARM_DESC(debug, "Enable debug output"); MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]"); MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps"); module_param_named(debug, drm_debug, int, 0600); module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); static struct cdevsw drm_cdevsw = { .d_version = D_VERSION, .d_open = drm_open, .d_read = drm_read, .d_ioctl = drm_ioctl, .d_poll = drm_poll, .d_mmap_single = drm_mmap_single, .d_name = "drm", .d_flags = D_TRACKCLOSE }; static int drm_minor_get_id(struct drm_device *dev, int type) { int new_id; new_id = device_get_unit(dev->dev); if (new_id >= 64) return -EINVAL; if (type == DRM_MINOR_CONTROL) { new_id += 64; } else if (type == DRM_MINOR_RENDER) { new_id += 128; } return new_id; } struct drm_master *drm_master_create(struct drm_minor *minor) { struct drm_master *master; master = malloc(sizeof(*master), DRM_MEM_KMS, M_NOWAIT | M_ZERO); if (!master) return NULL; refcount_init(&master->refcount, 1); mtx_init(&master->lock.spinlock, "drm_master__lock__spinlock", NULL, MTX_DEF); DRM_INIT_WAITQUEUE(&master->lock.lock_queue); drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER); INIT_LIST_HEAD(&master->magicfree); master->minor = minor; list_add_tail(&master->head, &minor->master_list); return master; } struct drm_master *drm_master_get(struct drm_master *master) { refcount_acquire(&master->refcount); return master; } EXPORT_SYMBOL(drm_master_get); static void drm_master_destroy(struct drm_master *master) { struct drm_magic_entry *pt, *next; struct drm_device *dev = master->minor->dev; struct drm_map_list *r_list, *list_temp; list_del(&master->head); if (dev->driver->master_destroy) dev->driver->master_destroy(dev, master); list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) { if (r_list->master == master) { drm_rmmap_locked(dev, r_list->map); r_list = NULL; } } if (master->unique) { free(master->unique, DRM_MEM_DRIVER); master->unique = NULL; master->unique_len = 0; } list_for_each_entry_safe(pt, next, &master->magicfree, head) { list_del(&pt->head); drm_ht_remove_item(&master->magiclist, &pt->hash_item); free(pt, DRM_MEM_MAGIC); } drm_ht_remove(&master->magiclist); free(master, DRM_MEM_KMS); } void drm_master_put(struct drm_master **master) { if (refcount_release(&(*master)->refcount)) drm_master_destroy(*master); *master = NULL; } EXPORT_SYMBOL(drm_master_put); int drm_setmaster_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { int ret; if (file_priv->is_master) return 0; if (file_priv->minor->master && file_priv->minor->master != file_priv->master) return -EINVAL; if (!file_priv->master) return -EINVAL; if (file_priv->minor->master) return -EINVAL; DRM_LOCK(dev); file_priv->minor->master = drm_master_get(file_priv->master); file_priv->is_master = 1; if (dev->driver->master_set) { ret = dev->driver->master_set(dev, file_priv, false); if (unlikely(ret != 0)) { file_priv->is_master = 0; drm_master_put(&file_priv->minor->master); } } DRM_UNLOCK(dev); return 0; } int drm_dropmaster_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { if (!file_priv->is_master) return -EINVAL; if (!file_priv->minor->master) return -EINVAL; DRM_LOCK(dev); if (dev->driver->master_drop) dev->driver->master_drop(dev, file_priv, false); drm_master_put(&file_priv->minor->master); file_priv->is_master = 0; DRM_UNLOCK(dev); return 0; } int drm_fill_in_dev(struct drm_device *dev, struct drm_driver *driver) { int retcode, i; INIT_LIST_HEAD(&dev->filelist); INIT_LIST_HEAD(&dev->ctxlist); INIT_LIST_HEAD(&dev->maplist); INIT_LIST_HEAD(&dev->vblank_event_list); mtx_init(&dev->irq_lock, "drmirq", NULL, MTX_DEF); mtx_init(&dev->count_lock, "drmcount", NULL, MTX_DEF); mtx_init(&dev->event_lock, "drmev", NULL, MTX_DEF); sx_init(&dev->dev_struct_lock, "drmslk"); mtx_init(&dev->ctxlist_mutex, "drmctxlist", NULL, MTX_DEF); mtx_init(&dev->pcir_lock, "drmpcir", NULL, MTX_DEF); if (drm_ht_create(&dev->map_hash, 12)) { return -ENOMEM; } /* the DRM has 6 basic counters */ dev->counters = 6; dev->types[0] = _DRM_STAT_LOCK; dev->types[1] = _DRM_STAT_OPENS; dev->types[2] = _DRM_STAT_CLOSES; dev->types[3] = _DRM_STAT_IOCTLS; dev->types[4] = _DRM_STAT_LOCKS; dev->types[5] = _DRM_STAT_UNLOCKS; /* * FIXME Linux<->FreeBSD: this is done in drm_setup() on Linux. */ for (i = 0; i < ARRAY_SIZE(dev->counts); i++) atomic_set(&dev->counts[i], 0); dev->driver = driver; retcode = drm_pci_agp_init(dev); if (retcode) goto error_out_unreg; retcode = drm_ctxbitmap_init(dev); if (retcode) { DRM_ERROR("Cannot allocate memory for context bitmap.\n"); goto error_out_unreg; } if (driver->driver_features & DRIVER_GEM) { retcode = drm_gem_init(dev); if (retcode) { DRM_ERROR("Cannot initialize graphics execution " "manager (GEM)\n"); goto error_out_unreg; } } retcode = drm_sysctl_init(dev); if (retcode != 0) { DRM_ERROR("Failed to create hw.dri sysctl entry: %d\n", retcode); } return 0; error_out_unreg: drm_cancel_fill_in_dev(dev); return retcode; } EXPORT_SYMBOL(drm_fill_in_dev); void drm_cancel_fill_in_dev(struct drm_device *dev) { struct drm_driver *driver; driver = dev->driver; drm_sysctl_cleanup(dev); if (driver->driver_features & DRIVER_GEM) drm_gem_destroy(dev); drm_ctxbitmap_cleanup(dev); if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp && dev->agp->agp_mtrr >= 0) { int retval; retval = drm_mtrr_del(dev->agp->agp_mtrr, dev->agp->agp_info.ai_aperture_base, dev->agp->agp_info.ai_aperture_size, DRM_MTRR_WC); DRM_DEBUG("mtrr_del=%d\n", retval); } - kfree(dev->agp); + free(dev->agp, DRM_MEM_AGPLISTS); dev->agp = NULL; drm_ht_remove(&dev->map_hash); mtx_destroy(&dev->irq_lock); mtx_destroy(&dev->count_lock); mtx_destroy(&dev->event_lock); sx_destroy(&dev->dev_struct_lock); mtx_destroy(&dev->ctxlist_mutex); mtx_destroy(&dev->pcir_lock); } /** * Get a secondary minor number. * * \param dev device data structure * \param sec-minor structure to hold the assigned minor * \return negative number on failure. * * Search an empty entry and initialize it to the given parameters, and * create the proc init entry via proc_init(). This routines assigns * minor numbers to secondary heads of multi-headed cards */ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type) { struct drm_minor *new_minor; int ret; int minor_id; const char *minor_devname; DRM_DEBUG("\n"); minor_id = drm_minor_get_id(dev, type); if (minor_id < 0) return minor_id; new_minor = malloc(sizeof(struct drm_minor), DRM_MEM_MINOR, M_NOWAIT | M_ZERO); if (!new_minor) { ret = -ENOMEM; goto err_idr; } new_minor->type = type; new_minor->dev = dev; new_minor->index = minor_id; INIT_LIST_HEAD(&new_minor->master_list); new_minor->buf_sigio = NULL; switch (type) { case DRM_MINOR_CONTROL: minor_devname = "dri/controlD%d"; break; case DRM_MINOR_RENDER: minor_devname = "dri/renderD%d"; break; default: minor_devname = "dri/card%d"; break; } ret = make_dev_p(MAKEDEV_WAITOK | MAKEDEV_CHECKNAME, &new_minor->device, &drm_cdevsw, 0, DRM_DEV_UID, DRM_DEV_GID, DRM_DEV_MODE, minor_devname, minor_id); if (ret) { DRM_ERROR("Failed to create cdev: %d\n", ret); goto err_mem; } new_minor->device->si_drv1 = new_minor; *minor = new_minor; DRM_DEBUG("new minor assigned %d\n", minor_id); return 0; err_mem: free(new_minor, DRM_MEM_MINOR); err_idr: *minor = NULL; return ret; } EXPORT_SYMBOL(drm_get_minor); /** * Put a secondary minor number. * * \param sec_minor - structure to be released * \return always zero * * Cleans up the proc resources. Not legal for this to be the * last minor released. * */ int drm_put_minor(struct drm_minor **minor_p) { struct drm_minor *minor = *minor_p; DRM_DEBUG("release secondary minor %d\n", minor->index); funsetown(&minor->buf_sigio); destroy_dev(minor->device); free(minor, DRM_MEM_MINOR); *minor_p = NULL; return 0; } EXPORT_SYMBOL(drm_put_minor); /** * Called via drm_exit() at module unload time or when pci device is * unplugged. * * Cleans up all DRM device, calling drm_lastclose(). * */ void drm_put_dev(struct drm_device *dev) { struct drm_driver *driver; struct drm_map_list *r_list, *list_temp; DRM_DEBUG("\n"); if (!dev) { DRM_ERROR("cleanup called no dev\n"); return; } driver = dev->driver; drm_lastclose(dev); if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp && dev->agp->agp_mtrr >= 0) { int retval; retval = drm_mtrr_del(dev->agp->agp_mtrr, dev->agp->agp_info.ai_aperture_base, dev->agp->agp_info.ai_aperture_size, DRM_MTRR_WC); DRM_DEBUG("mtrr_del=%d\n", retval); } if (drm_core_check_feature(dev, DRIVER_MODESET)) drm_mode_group_free(&dev->primary->mode_group); if (dev->driver->unload) dev->driver->unload(dev); drm_sysctl_cleanup(dev); if (drm_core_has_AGP(dev) && dev->agp) { - kfree(dev->agp); + free(dev->agp, DRM_MEM_AGPLISTS); dev->agp = NULL; } drm_vblank_cleanup(dev); list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) drm_rmmap(dev, r_list->map); drm_ht_remove(&dev->map_hash); drm_ctxbitmap_cleanup(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) drm_put_minor(&dev->control); if (driver->driver_features & DRIVER_GEM) drm_gem_destroy(dev); drm_put_minor(&dev->primary); mtx_destroy(&dev->irq_lock); mtx_destroy(&dev->count_lock); mtx_destroy(&dev->event_lock); sx_destroy(&dev->dev_struct_lock); mtx_destroy(&dev->ctxlist_mutex); mtx_destroy(&dev->pcir_lock); #ifdef FREEBSD_NOTYET list_del(&dev->driver_item); #endif /* FREEBSD_NOTYET */ } EXPORT_SYMBOL(drm_put_dev); Index: head/sys/modules/drm2/drm2/Makefile =================================================================== --- head/sys/modules/drm2/drm2/Makefile (revision 312927) +++ head/sys/modules/drm2/drm2/Makefile (revision 312928) @@ -1,65 +1,63 @@ # $FreeBSD$ .PATH: ${.CURDIR}/../../../dev/drm2 ${.CURDIR}/../../../dev/drm2/ttm KMOD = drm2 SRCS = \ drm_agpsupport.c \ drm_auth.c \ drm_bufs.c \ drm_buffer.c \ drm_context.c \ drm_crtc.c \ drm_crtc_helper.c \ drm_dma.c \ drm_dp_helper.c \ drm_dp_iic_helper.c \ drm_drv.c \ drm_edid.c \ drm_fb_helper.c \ drm_fops.c \ drm_gem.c \ drm_gem_names.c \ drm_global.c \ drm_hashtab.c \ drm_ioctl.c \ drm_irq.c \ drm_linux_list_sort.c \ drm_lock.c \ drm_memory.c \ drm_mm.c \ drm_modes.c \ drm_pci.c \ drm_scatter.c \ drm_stub.c \ drm_sysctl.c \ drm_vm.c \ drm_os_freebsd.c \ ttm_agp_backend.c \ ttm_lock.c \ ttm_object.c \ ttm_tt.c \ ttm_bo_util.c \ ttm_bo.c \ ttm_bo_manager.c \ ttm_execbuf_util.c \ ttm_memory.c \ ttm_page_alloc.c \ ttm_bo_vm.c \ ati_pcigart.c #ttm_page_alloc_dma.c -CFLAGS+= -I${.CURDIR}/../../../compat/linuxkpi/common/include - .if ${MACHINE_CPUARCH} == "amd64" || ${MACHINE_ARCH} == "powerpc64" SRCS += drm_ioc32.c .endif SRCS +=device_if.h bus_if.h pci_if.h device_if.h iicbus_if.h opt_drm.h \ opt_vm.h opt_compat.h opt_syscons.h .if ${MACHINE_CPUARCH} == "powerpc" CWARNFLAGS+=-Wno-cast-qual .endif .include