diff --git a/share/man/man9/bus_activate_resource.9 b/share/man/man9/bus_activate_resource.9 index be878349ab3c..7b87197b8d11 100644 --- a/share/man/man9/bus_activate_resource.9 +++ b/share/man/man9/bus_activate_resource.9 @@ -1,144 +1,126 @@ .\" -*- nroff -*- .\" .\" Copyright (c) 2003 M. Warner Losh .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE DEVELOPERS ``AS IS'' AND ANY EXPRESS OR .\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES .\" OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. .\" IN NO EVENT SHALL THE DEVELOPERS BE LIABLE FOR ANY DIRECT, INDIRECT, .\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT .\" NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, .\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY .\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT .\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF .\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .\" -.Dd May 20, 2016 +.Dd March 13, 2024 .Dt BUS_ACTIVATE_RESOURCE 9 .Os .Sh NAME .Nm bus_activate_resource , bus_deactivate_resource .Nd activate or deactivate a resource .Sh SYNOPSIS .In sys/param.h .In sys/bus.h .Pp .In machine/bus.h .In sys/rman.h .In machine/resource.h .Ft int .Fo bus_activate_resource -.Fa "device_t dev" "int type" "int rid" "struct resource *r" +.Fa "device_t dev" "struct resource *r" .Fc .Ft int .Fo bus_deactivate_resource -.Fa "device_t dev" "int type" "int rid" "struct resource *r" +.Fa "device_t dev" "struct resource *r" .Fc .Sh DESCRIPTION These functions activate or deactivate a previously allocated resource. In general, resources must be activated before they can be accessed by the driver. Bus drivers may perform additional actions to ensure that the resource is ready to be accessed. For example, the PCI bus driver enables memory decoding in a PCI device's command register when activating a memory resource. .Pp The arguments are as follows: .Bl -tag -width indent .It Fa dev The device that requests ownership of the resource. Before allocation, the resource is owned by the parent bus. -.It Fa type -The type of resource you want to allocate. -It is one of: -.Pp -.Bl -tag -width ".Dv SYS_RES_MEMORY" -compact -.It Dv PCI_RES_BUS -for PCI bus numbers -.It Dv SYS_RES_IRQ -for IRQs -.It Dv SYS_RES_DRQ -for ISA DMA lines -.It Dv SYS_RES_IOPORT -for I/O ports -.It Dv SYS_RES_MEMORY -for I/O memory -.El -.It Fa rid -A pointer to a bus specific handle that identifies the resource being allocated. .It Fa r A pointer to the .Vt "struct resource" returned by .Xr bus_alloc_resource 9 . .El .Ss Resource Mapping Resources which can be mapped for CPU access by a .Xr bus_space 9 tag and handle will create a mapping of the entire resource when activated. The tag and handle for this mapping are stored in .Fa r and can be retrieved via .Xr rman_get_bustag 9 and .Xr rman_get_bushandle 9 . These can be used with the .Xr bus_space 9 API to access device registers or memory described by .Fa r . If the mapping is associated with a virtual address, the virtual address can be retrieved via .Xr rman_get_virtual 9 . .Pp This implicit mapping can be disabled by passing the .Dv RF_UNMAPPED flag to .Xr bus_alloc_resource 9 . A driver may use this if it wishes to allocate its own mappings of a resource using .Xr bus_map_resource 9 . .Pp A wrapper API for .Xr bus_space 9 is also provided that accepts the associated resource as the first argument in place of the .Xr bus_space 9 tag and handle. The functions in this wrapper API are named similarly to the .Xr bus_space 9 API except that .Dq _space is removed from their name. For example, .Fn bus_read_4 can be used in place of .Fn bus_space_read_4 . The wrapper API is preferred in new drivers. .Pp These two statements both read a 32-bit register at the start of a resource: .Bd -literal bus_space_read_4(rman_get_bustag(res), rman_get_bushandle(res), 0); bus_read_4(res, 0); .Ed .Sh RETURN VALUES Zero is returned on success, otherwise an error is returned. .Sh SEE ALSO .Xr bus_alloc_resource 9 , .Xr bus_map_resource 9 , .Xr bus_space 9 , .Xr device 9 , .Xr driver 9 .Sh AUTHORS This manual page was written by .An Warner Losh Aq Mt imp@FreeBSD.org . diff --git a/sys/arm/arm/nexus.c b/sys/arm/arm/nexus.c index 0cb896ce346e..8274a792839d 100644 --- a/sys/arm/arm/nexus.c +++ b/sys/arm/arm/nexus.c @@ -1,435 +1,431 @@ /*- * Copyright 1998 Massachusetts Institute of Technology * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * This code implements a `root nexus' for Arm Architecture * machines. The function of the root nexus is to serve as an * attachment point for both processors and buses, and to manage * resources which are common to all of them. In particular, * this code implements the core resource managers for interrupt * requests and I/O memory address space. */ #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #include "ofw_bus_if.h" #endif static MALLOC_DEFINE(M_NEXUSDEV, "nexusdev", "Nexus device"); struct nexus_device { struct resource_list nx_resources; }; #define DEVTONX(dev) ((struct nexus_device *)device_get_ivars(dev)) static struct rman mem_rman; static struct rman irq_rman; static device_probe_t nexus_probe; static device_attach_t nexus_attach; static bus_add_child_t nexus_add_child; static bus_print_child_t nexus_print_child; static bus_activate_resource_t nexus_activate_resource; static bus_deactivate_resource_t nexus_deactivate_resource; static bus_get_rman_t nexus_get_rman; static bus_map_resource_t nexus_map_resource; static bus_unmap_resource_t nexus_unmap_resource; #ifdef SMP static bus_bind_intr_t nexus_bind_intr; #endif static bus_config_intr_t nexus_config_intr; static bus_describe_intr_t nexus_describe_intr; static bus_setup_intr_t nexus_setup_intr; static bus_teardown_intr_t nexus_teardown_intr; static bus_get_bus_tag_t nexus_get_bus_tag; static bus_get_dma_tag_t nexus_get_dma_tag; #ifdef FDT static ofw_bus_map_intr_t nexus_ofw_map_intr; #endif /* * Normally NULL (which results in defaults which are handled in * busdma_machdep), platform init code can use nexus_set_dma_tag() to set this * to a tag that will be inherited by all busses and devices on the platform. */ static bus_dma_tag_t nexus_dma_tag; static device_method_t nexus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nexus_probe), DEVMETHOD(device_attach, nexus_attach), /* Bus interface */ DEVMETHOD(bus_add_child, nexus_add_child), DEVMETHOD(bus_print_child, nexus_print_child), DEVMETHOD(bus_activate_resource, nexus_activate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_rman_adjust_resource), DEVMETHOD(bus_alloc_resource, bus_generic_rman_alloc_resource), DEVMETHOD(bus_deactivate_resource, nexus_deactivate_resource), DEVMETHOD(bus_get_rman, nexus_get_rman), DEVMETHOD(bus_map_resource, nexus_map_resource), DEVMETHOD(bus_release_resource, bus_generic_rman_release_resource), DEVMETHOD(bus_unmap_resource, nexus_unmap_resource), #ifdef SMP DEVMETHOD(bus_bind_intr, nexus_bind_intr), #endif DEVMETHOD(bus_config_intr, nexus_config_intr), DEVMETHOD(bus_describe_intr, nexus_describe_intr), DEVMETHOD(bus_setup_intr, nexus_setup_intr), DEVMETHOD(bus_teardown_intr, nexus_teardown_intr), DEVMETHOD(bus_get_bus_tag, nexus_get_bus_tag), DEVMETHOD(bus_get_dma_tag, nexus_get_dma_tag), #ifdef FDT /* ofw_bus interface */ DEVMETHOD(ofw_bus_map_intr, nexus_ofw_map_intr), #endif DEVMETHOD_END }; static driver_t nexus_driver = { "nexus", nexus_methods, 1 /* no softc */ }; EARLY_DRIVER_MODULE(nexus, root, nexus_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_EARLY); static int nexus_probe(device_t dev) { device_quiet(dev); /* suppress attach message for neatness */ return (BUS_PROBE_DEFAULT); } static int nexus_attach(device_t dev) { mem_rman.rm_start = 0; mem_rman.rm_end = BUS_SPACE_MAXADDR; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "I/O memory addresses"; if (rman_init(&mem_rman) || rman_manage_region(&mem_rman, 0, BUS_SPACE_MAXADDR)) panic("nexus_probe mem_rman"); irq_rman.rm_start = 0; irq_rman.rm_end = ~0; irq_rman.rm_type = RMAN_ARRAY; irq_rman.rm_descr = "Interrupts"; if (rman_init(&irq_rman) || rman_manage_region(&irq_rman, 0, ~0)) panic("nexus_attach irq_rman"); /* First, add ofwbus0. */ device_add_child(dev, "ofwbus", 0); /* * Next, deal with the children we know about already. */ bus_generic_probe(dev); bus_generic_attach(dev); return (0); } static int nexus_print_child(device_t bus, device_t child) { int retval = 0; retval += bus_print_child_header(bus, child); retval += printf("\n"); return (retval); } static device_t nexus_add_child(device_t bus, u_int order, const char *name, int unit) { device_t child; struct nexus_device *ndev; ndev = malloc(sizeof(struct nexus_device), M_NEXUSDEV, M_NOWAIT|M_ZERO); if (!ndev) return (0); resource_list_init(&ndev->nx_resources); child = device_add_child_ordered(bus, order, name, unit); device_set_ivars(child, ndev); return (child); } static struct rman * nexus_get_rman(device_t bus, int type, u_int flags) { switch (type) { case SYS_RES_IRQ: return (&irq_rman); case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (&mem_rman); default: return (NULL); } } static bus_space_tag_t nexus_get_bus_tag(device_t bus __unused, device_t child __unused) { #ifdef FDT return (fdtbus_bs_tag); #else return ((void *)1); #endif } static bus_dma_tag_t nexus_get_dma_tag(device_t dev, device_t child) { return (nexus_dma_tag); } void nexus_set_dma_tag(bus_dma_tag_t tag) { nexus_dma_tag = tag; } static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { int ret = ENODEV; device_printf(dev, "bus_config_intr is obsolete and not supported!\n"); ret = EOPNOTSUPP; return (ret); } static int nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) { if ((rman_get_flags(res) & RF_SHAREABLE) == 0) flags |= INTR_EXCL; return (intr_setup_irq(child, res, filt, intr, arg, flags, cookiep)); } static int nexus_teardown_intr(device_t dev, device_t child, struct resource *r, void *ih) { return (intr_teardown_irq(child, r, ih)); } static int nexus_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { return (intr_describe_irq(child, irq, cookie, descr)); } #ifdef SMP static int nexus_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { return (intr_bind_irq(child, irq, cpu)); } #endif static int -nexus_activate_resource(device_t bus, device_t child, int type, int rid, - struct resource *r) +nexus_activate_resource(device_t bus, device_t child, struct resource *r) { int err; - switch (type) { + switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: - return (bus_generic_rman_activate_resource(bus, child, type, - rid, r)); + return (bus_generic_rman_activate_resource(bus, child, r)); case SYS_RES_IRQ: err = rman_activate_resource(r); if (err != 0) return (err); err = intr_activate_irq(child, r); if (err != 0) { rman_deactivate_resource(r); return (err); } return (0); default: return (EINVAL); } } static int nexus_map_resource(device_t bus, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); #ifdef FDT error = bus_space_map(fdtbus_bs_tag, start, length, 0, &map->r_bushandle); if (error) return (error); map->r_bustag = fdtbus_bs_tag; map->r_vaddr = (void *)map->r_bushandle; #else map->r_vaddr = pmap_mapdev(start, length); if (map->r_vaddr == NULL) return (ENOMEM); map->r_bustag = (void *)1; map->r_bushandle = (bus_space_handle_t)map->r_vaddr; #endif map->r_size = length; return (0); } static int nexus_unmap_resource(device_t bus, device_t child, struct resource *r, struct resource_map *map) { switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: #ifdef FDT bus_space_unmap(map->r_bustag, map->r_bushandle, map->r_size); #else pmap_unmapdev(map->r_vaddr, map->r_size); #endif return (0); default: return (EINVAL); } } static int -nexus_deactivate_resource(device_t bus, device_t child, int type, int rid, - struct resource *r) +nexus_deactivate_resource(device_t bus, device_t child, struct resource *r) { int error; - switch (type) { + switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: - return (bus_generic_rman_deactivate_resource(bus, child, type, - rid, r)); + return (bus_generic_rman_deactivate_resource(bus, child, r)); case SYS_RES_IRQ: error = rman_deactivate_resource(r); if (error) return (error); intr_deactivate_irq(child, r); return (0); default: return (EINVAL); } } #ifdef FDT static int nexus_ofw_map_intr(device_t dev, device_t child, phandle_t iparent, int icells, pcell_t *intr) { u_int irq; struct intr_map_data_fdt *fdt_data; size_t len; len = sizeof(*fdt_data) + icells * sizeof(pcell_t); fdt_data = (struct intr_map_data_fdt *)intr_alloc_map_data( INTR_MAP_DATA_FDT, len, M_WAITOK | M_ZERO); fdt_data->iparent = iparent; fdt_data->ncells = icells; memcpy(fdt_data->cells, intr, icells * sizeof(pcell_t)); irq = intr_map_irq(NULL, iparent, (struct intr_map_data *)fdt_data); return (irq); } #endif /* FDT */ diff --git a/sys/arm/mv/mv_pci.c b/sys/arm/mv/mv_pci.c index a8a6d1d50002..a24a71cd4ecf 100644 --- a/sys/arm/mv/mv_pci.c +++ b/sys/arm/mv/mv_pci.c @@ -1,1412 +1,1403 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2008 MARVELL INTERNATIONAL LTD. * Copyright (c) 2010 The FreeBSD Foundation * Copyright (c) 2010-2015 Semihalf * All rights reserved. * * Developed by Semihalf. * * Portions of this software were developed by Semihalf * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of MARVELL nor the names of contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Marvell integrated PCI/PCI-Express controller driver. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ofw_bus_if.h" #include "pcib_if.h" #include #include #include #include #include #ifdef DEBUG #define debugf(fmt, args...) do { printf(fmt,##args); } while (0) #else #define debugf(fmt, args...) #endif /* * Code and data related to fdt-based PCI configuration. * * This stuff used to be in dev/fdt/fdt_pci.c and fdt_common.h, but it was * always Marvell-specific so that was deleted and the code now lives here. */ struct mv_pci_range { u_long base_pci; u_long base_parent; u_long len; }; #define FDT_RANGES_CELLS ((3 + 3 + 2) * 2) #define PCI_SPACE_LEN 0x00400000 static void mv_pci_range_dump(struct mv_pci_range *range) { #ifdef DEBUG printf("\n"); printf(" base_pci = 0x%08lx\n", range->base_pci); printf(" base_par = 0x%08lx\n", range->base_parent); printf(" len = 0x%08lx\n", range->len); #endif } static int mv_pci_ranges_decode(phandle_t node, struct mv_pci_range *io_space, struct mv_pci_range *mem_space) { pcell_t ranges[FDT_RANGES_CELLS]; struct mv_pci_range *pci_space; pcell_t addr_cells, size_cells, par_addr_cells; pcell_t *rangesptr; pcell_t cell0, cell2; int tuple_size, tuples, i, rv, offset_cells, len; int portid, is_io_space; /* * Retrieve 'ranges' property. */ if ((fdt_addrsize_cells(node, &addr_cells, &size_cells)) != 0) return (EINVAL); if (addr_cells != 3 || size_cells != 2) return (ERANGE); par_addr_cells = fdt_parent_addr_cells(node); if (par_addr_cells > 3) return (ERANGE); len = OF_getproplen(node, "ranges"); if (len > sizeof(ranges)) return (ENOMEM); if (OF_getprop(node, "ranges", ranges, sizeof(ranges)) <= 0) return (EINVAL); tuple_size = sizeof(pcell_t) * (addr_cells + par_addr_cells + size_cells); tuples = len / tuple_size; /* * Initialize the ranges so that we don't have to worry about * having them all defined in the FDT. In particular, it is * perfectly fine not to want I/O space on PCI buses. */ bzero(io_space, sizeof(*io_space)); bzero(mem_space, sizeof(*mem_space)); rangesptr = &ranges[0]; offset_cells = 0; for (i = 0; i < tuples; i++) { cell0 = fdt_data_get((void *)rangesptr, 1); rangesptr++; /* cell1 */ rangesptr++; cell2 = fdt_data_get((void *)rangesptr, 1); rangesptr++; portid = fdt_data_get((void *)(rangesptr+1), 1); if (cell0 & 0x02000000) { pci_space = mem_space; is_io_space = 0; } else if (cell0 & 0x01000000) { pci_space = io_space; is_io_space = 1; } else { rv = ERANGE; goto out; } if (par_addr_cells == 3) { /* * This is a PCI subnode 'ranges'. Skip cell0 and * cell1 of this entry and only use cell2. */ offset_cells = 2; rangesptr += offset_cells; } if ((par_addr_cells - offset_cells) > 2) { rv = ERANGE; goto out; } pci_space->base_parent = fdt_data_get((void *)rangesptr, par_addr_cells - offset_cells); rangesptr += par_addr_cells - offset_cells; if (size_cells > 2) { rv = ERANGE; goto out; } pci_space->len = fdt_data_get((void *)rangesptr, size_cells); rangesptr += size_cells; pci_space->base_pci = cell2; if (pci_space->len == 0) { pci_space->len = PCI_SPACE_LEN; pci_space->base_parent = fdt_immr_va + PCI_SPACE_LEN * ( 2 * portid + is_io_space); } } rv = 0; out: return (rv); } static int mv_pci_ranges(phandle_t node, struct mv_pci_range *io_space, struct mv_pci_range *mem_space) { int err; debugf("Processing PCI node: %x\n", node); if ((err = mv_pci_ranges_decode(node, io_space, mem_space)) != 0) { debugf("could not decode parent PCI node 'ranges'\n"); return (err); } debugf("Post fixup dump:\n"); mv_pci_range_dump(io_space); mv_pci_range_dump(mem_space); return (0); } int mv_pci_devmap(phandle_t node, struct devmap_entry *devmap, vm_offset_t io_va, vm_offset_t mem_va) { struct mv_pci_range io_space, mem_space; int error; if ((error = mv_pci_ranges_decode(node, &io_space, &mem_space)) != 0) return (error); devmap->pd_va = (io_va ? io_va : io_space.base_parent); devmap->pd_pa = io_space.base_parent; devmap->pd_size = io_space.len; devmap++; devmap->pd_va = (mem_va ? mem_va : mem_space.base_parent); devmap->pd_pa = mem_space.base_parent; devmap->pd_size = mem_space.len; return (0); } /* * Code and data related to the Marvell pcib driver. */ #define PCI_CFG_ENA (1U << 31) #define PCI_CFG_BUS(bus) (((bus) & 0xff) << 16) #define PCI_CFG_DEV(dev) (((dev) & 0x1f) << 11) #define PCI_CFG_FUN(fun) (((fun) & 0x7) << 8) #define PCI_CFG_PCIE_REG(reg) ((reg) & 0xfc) #define PCI_REG_CFG_ADDR 0x0C78 #define PCI_REG_CFG_DATA 0x0C7C #define PCIE_REG_CFG_ADDR 0x18F8 #define PCIE_REG_CFG_DATA 0x18FC #define PCIE_REG_CONTROL 0x1A00 #define PCIE_CTRL_LINK1X 0x00000001 #define PCIE_REG_STATUS 0x1A04 #define PCIE_REG_IRQ_MASK 0x1910 #define PCIE_CONTROL_ROOT_CMPLX (1 << 1) #define PCIE_CONTROL_HOT_RESET (1 << 24) #define PCIE_LINK_TIMEOUT 1000000 #define PCIE_STATUS_LINK_DOWN 1 #define PCIE_STATUS_DEV_OFFS 16 /* Minimum PCI Memory and I/O allocations taken from PCI spec (in bytes) */ #define PCI_MIN_IO_ALLOC 4 #define PCI_MIN_MEM_ALLOC 16 #define BITS_PER_UINT32 (NBBY * sizeof(uint32_t)) struct mv_pcib_softc { device_t sc_dev; struct rman sc_mem_rman; bus_addr_t sc_mem_base; bus_addr_t sc_mem_size; uint32_t sc_mem_map[MV_PCI_MEM_SLICE_SIZE / (PCI_MIN_MEM_ALLOC * BITS_PER_UINT32)]; int sc_win_target; int sc_mem_win_attr; struct rman sc_io_rman; bus_addr_t sc_io_base; bus_addr_t sc_io_size; uint32_t sc_io_map[MV_PCI_IO_SLICE_SIZE / (PCI_MIN_IO_ALLOC * BITS_PER_UINT32)]; int sc_io_win_attr; struct resource *sc_res; bus_space_handle_t sc_bsh; bus_space_tag_t sc_bst; int sc_rid; struct mtx sc_msi_mtx; uint32_t sc_msi_bitmap; int sc_busnr; /* Host bridge bus number */ int sc_devnr; /* Host bridge device number */ int sc_type; int sc_mode; /* Endpoint / Root Complex */ int sc_msi_supported; int sc_skip_enable_procedure; int sc_enable_find_root_slot; struct ofw_bus_iinfo sc_pci_iinfo; int ap_segment; /* PCI domain */ }; /* Local forward prototypes */ static int mv_pcib_decode_win(phandle_t, struct mv_pcib_softc *); static void mv_pcib_hw_cfginit(void); static uint32_t mv_pcib_hw_cfgread(struct mv_pcib_softc *, u_int, u_int, u_int, u_int, int); static void mv_pcib_hw_cfgwrite(struct mv_pcib_softc *, u_int, u_int, u_int, u_int, uint32_t, int); static int mv_pcib_init(struct mv_pcib_softc *, int, int); static int mv_pcib_init_all_bars(struct mv_pcib_softc *, int, int, int, int); static void mv_pcib_init_bridge(struct mv_pcib_softc *, int, int, int); static inline void pcib_write_irq_mask(struct mv_pcib_softc *, uint32_t); static void mv_pcib_enable(struct mv_pcib_softc *, uint32_t); static int mv_pcib_mem_init(struct mv_pcib_softc *); /* Forward prototypes */ static int mv_pcib_probe(device_t); static int mv_pcib_attach(device_t); static struct rman *mv_pcib_get_rman(device_t, int, u_int); static struct resource *mv_pcib_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int mv_pcib_adjust_resource(device_t, device_t, struct resource *, rman_res_t, rman_res_t); static int mv_pcib_release_resource(device_t, device_t, int, int, struct resource *); -static int mv_pcib_activate_resource(device_t, device_t, int, int, - struct resource *r); -static int mv_pcib_deactivate_resource(device_t, device_t, int, int, - struct resource *r); +static int mv_pcib_activate_resource(device_t, device_t, struct resource *); +static int mv_pcib_deactivate_resource(device_t, device_t, struct resource *); static int mv_pcib_map_resource(device_t, device_t, struct resource *, struct resource_map_request *, struct resource_map *); static int mv_pcib_unmap_resource(device_t, device_t, struct resource *, struct resource_map *); static int mv_pcib_read_ivar(device_t, device_t, int, uintptr_t *); static int mv_pcib_write_ivar(device_t, device_t, int, uintptr_t); static int mv_pcib_maxslots(device_t); static uint32_t mv_pcib_read_config(device_t, u_int, u_int, u_int, u_int, int); static void mv_pcib_write_config(device_t, u_int, u_int, u_int, u_int, uint32_t, int); static int mv_pcib_route_interrupt(device_t, device_t, int); static int mv_pcib_alloc_msi(device_t, device_t, int, int, int *); static int mv_pcib_map_msi(device_t, device_t, int, uint64_t *, uint32_t *); static int mv_pcib_release_msi(device_t, device_t, int, int *); /* * Bus interface definitions. */ static device_method_t mv_pcib_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mv_pcib_probe), DEVMETHOD(device_attach, mv_pcib_attach), /* Bus interface */ DEVMETHOD(bus_read_ivar, mv_pcib_read_ivar), DEVMETHOD(bus_write_ivar, mv_pcib_write_ivar), DEVMETHOD(bus_get_rman, mv_pcib_get_rman), DEVMETHOD(bus_alloc_resource, mv_pcib_alloc_resource), DEVMETHOD(bus_adjust_resource, mv_pcib_adjust_resource), DEVMETHOD(bus_release_resource, mv_pcib_release_resource), DEVMETHOD(bus_activate_resource, mv_pcib_activate_resource), DEVMETHOD(bus_deactivate_resource, mv_pcib_deactivate_resource), DEVMETHOD(bus_map_resource, mv_pcib_map_resource), DEVMETHOD(bus_unmap_resource, mv_pcib_unmap_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, mv_pcib_maxslots), DEVMETHOD(pcib_read_config, mv_pcib_read_config), DEVMETHOD(pcib_write_config, mv_pcib_write_config), DEVMETHOD(pcib_route_interrupt, mv_pcib_route_interrupt), DEVMETHOD(pcib_request_feature, pcib_request_feature_allow), DEVMETHOD(pcib_alloc_msi, mv_pcib_alloc_msi), DEVMETHOD(pcib_release_msi, mv_pcib_release_msi), DEVMETHOD(pcib_map_msi, mv_pcib_map_msi), /* OFW bus interface */ DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; static driver_t mv_pcib_driver = { "pcib", mv_pcib_methods, sizeof(struct mv_pcib_softc), }; DRIVER_MODULE(mv_pcib, ofwbus, mv_pcib_driver, 0, 0); DRIVER_MODULE(mv_pcib, pcib_ctrl, mv_pcib_driver, 0, 0); static struct mtx pcicfg_mtx; static int mv_pcib_probe(device_t self) { phandle_t node; node = ofw_bus_get_node(self); if (!mv_fdt_is_type(node, "pci")) return (ENXIO); if (!(ofw_bus_is_compatible(self, "mrvl,pcie") || ofw_bus_is_compatible(self, "mrvl,pci") || ofw_bus_node_is_compatible( OF_parent(node), "marvell,armada-370-pcie"))) return (ENXIO); if (!ofw_bus_status_okay(self)) return (ENXIO); device_set_desc(self, "Marvell Integrated PCI/PCI-E Controller"); return (BUS_PROBE_DEFAULT); } static int mv_pcib_attach(device_t self) { struct mv_pcib_softc *sc; phandle_t node, parnode; uint32_t val, reg0; int err, bus, devfn, port_id; sc = device_get_softc(self); sc->sc_dev = self; node = ofw_bus_get_node(self); parnode = OF_parent(node); if (OF_getencprop(node, "marvell,pcie-port", &(port_id), sizeof(port_id)) <= 0) { /* If port ID does not exist in the FDT set value to 0 */ if (!OF_hasprop(node, "marvell,pcie-port")) port_id = 0; else return(ENXIO); } sc->ap_segment = port_id; if (ofw_bus_node_is_compatible(node, "mrvl,pcie")) { sc->sc_type = MV_TYPE_PCIE; sc->sc_win_target = MV_WIN_PCIE_TARGET(port_id); sc->sc_mem_win_attr = MV_WIN_PCIE_MEM_ATTR(port_id); sc->sc_io_win_attr = MV_WIN_PCIE_IO_ATTR(port_id); sc->sc_skip_enable_procedure = 1; } else if (ofw_bus_node_is_compatible(parnode, "marvell,armada-370-pcie")) { sc->sc_type = MV_TYPE_PCIE; sc->sc_win_target = MV_WIN_PCIE_TARGET_ARMADA38X(port_id); sc->sc_mem_win_attr = MV_WIN_PCIE_MEM_ATTR_ARMADA38X(port_id); sc->sc_io_win_attr = MV_WIN_PCIE_IO_ATTR_ARMADA38X(port_id); sc->sc_enable_find_root_slot = 1; } else if (ofw_bus_node_is_compatible(node, "mrvl,pci")) { sc->sc_type = MV_TYPE_PCI; sc->sc_win_target = MV_WIN_PCI_TARGET; sc->sc_mem_win_attr = MV_WIN_PCI_MEM_ATTR; sc->sc_io_win_attr = MV_WIN_PCI_IO_ATTR; } else return (ENXIO); /* * Retrieve our mem-mapped registers range. */ sc->sc_rid = 0; sc->sc_res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &sc->sc_rid, RF_ACTIVE); if (sc->sc_res == NULL) { device_printf(self, "could not map memory\n"); return (ENXIO); } sc->sc_bst = rman_get_bustag(sc->sc_res); sc->sc_bsh = rman_get_bushandle(sc->sc_res); val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_CONTROL); sc->sc_mode = (val & PCIE_CONTROL_ROOT_CMPLX ? MV_MODE_ROOT : MV_MODE_ENDPOINT); /* * Get PCI interrupt info. */ if (sc->sc_mode == MV_MODE_ROOT) ofw_bus_setup_iinfo(node, &sc->sc_pci_iinfo, sizeof(pcell_t)); /* * Configure decode windows for PCI(E) access. */ if (mv_pcib_decode_win(node, sc) != 0) return (ENXIO); mv_pcib_hw_cfginit(); /* * Enable PCIE device. */ mv_pcib_enable(sc, port_id); /* * Memory management. */ err = mv_pcib_mem_init(sc); if (err) return (err); /* * Preliminary bus enumeration to find first linked devices and set * appropriate bus number from which should start the actual enumeration */ for (bus = 0; bus < PCI_BUSMAX; bus++) { for (devfn = 0; devfn < mv_pcib_maxslots(self); devfn++) { reg0 = mv_pcib_read_config(self, bus, devfn, devfn & 0x7, 0x0, 4); if (reg0 == (~0U)) continue; /* no device */ else { sc->sc_busnr = bus; /* update bus number */ break; } } } if (sc->sc_mode == MV_MODE_ROOT) { err = mv_pcib_init(sc, sc->sc_busnr, mv_pcib_maxslots(sc->sc_dev)); if (err) goto error; device_add_child(self, "pci", -1); } else { sc->sc_devnr = 1; bus_space_write_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS, 1 << PCIE_STATUS_DEV_OFFS); device_add_child(self, "pci_ep", -1); } mtx_init(&sc->sc_msi_mtx, "msi_mtx", NULL, MTX_DEF); return (bus_generic_attach(self)); error: /* XXX SYS_RES_ should be released here */ rman_fini(&sc->sc_mem_rman); rman_fini(&sc->sc_io_rman); return (err); } static void mv_pcib_enable(struct mv_pcib_softc *sc, uint32_t unit) { uint32_t val; int timeout; if (sc->sc_skip_enable_procedure) goto pcib_enable_root_mode; /* * Check if PCIE device is enabled. */ if ((sc->sc_skip_enable_procedure == 0) && (read_cpu_ctrl(CPU_CONTROL) & CPU_CONTROL_PCIE_DISABLE(unit))) { write_cpu_ctrl(CPU_CONTROL, read_cpu_ctrl(CPU_CONTROL) & ~(CPU_CONTROL_PCIE_DISABLE(unit))); timeout = PCIE_LINK_TIMEOUT; val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS); while (((val & PCIE_STATUS_LINK_DOWN) == 1) && (timeout > 0)) { DELAY(1000); timeout -= 1000; val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS); } } pcib_enable_root_mode: if (sc->sc_mode == MV_MODE_ROOT) { /* * Enable PCI bridge. */ val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIR_COMMAND); val |= PCIM_CMD_SERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_PORTEN; bus_space_write_4(sc->sc_bst, sc->sc_bsh, PCIR_COMMAND, val); } } static int mv_pcib_mem_init(struct mv_pcib_softc *sc) { int err; /* * Memory management. */ sc->sc_mem_rman.rm_type = RMAN_ARRAY; err = rman_init(&sc->sc_mem_rman); if (err) return (err); sc->sc_io_rman.rm_type = RMAN_ARRAY; err = rman_init(&sc->sc_io_rman); if (err) { rman_fini(&sc->sc_mem_rman); return (err); } err = rman_manage_region(&sc->sc_mem_rman, sc->sc_mem_base, sc->sc_mem_base + sc->sc_mem_size - 1); if (err) goto error; err = rman_manage_region(&sc->sc_io_rman, sc->sc_io_base, sc->sc_io_base + sc->sc_io_size - 1); if (err) goto error; return (0); error: rman_fini(&sc->sc_mem_rman); rman_fini(&sc->sc_io_rman); return (err); } static inline uint32_t pcib_bit_get(uint32_t *map, uint32_t bit) { uint32_t n = bit / BITS_PER_UINT32; bit = bit % BITS_PER_UINT32; return (map[n] & (1 << bit)); } static inline void pcib_bit_set(uint32_t *map, uint32_t bit) { uint32_t n = bit / BITS_PER_UINT32; bit = bit % BITS_PER_UINT32; map[n] |= (1 << bit); } static inline uint32_t pcib_map_check(uint32_t *map, uint32_t start, uint32_t bits) { uint32_t i; for (i = start; i < start + bits; i++) if (pcib_bit_get(map, i)) return (0); return (1); } static inline void pcib_map_set(uint32_t *map, uint32_t start, uint32_t bits) { uint32_t i; for (i = start; i < start + bits; i++) pcib_bit_set(map, i); } /* * The idea of this allocator is taken from ARM No-Cache memory * management code (sys/arm/arm/vm_machdep.c). */ static bus_addr_t pcib_alloc(struct mv_pcib_softc *sc, uint32_t smask) { uint32_t bits, bits_limit, i, *map, min_alloc, size; bus_addr_t addr = 0; bus_addr_t base; if (smask & 1) { base = sc->sc_io_base; min_alloc = PCI_MIN_IO_ALLOC; bits_limit = sc->sc_io_size / min_alloc; map = sc->sc_io_map; smask &= ~0x3; } else { base = sc->sc_mem_base; min_alloc = PCI_MIN_MEM_ALLOC; bits_limit = sc->sc_mem_size / min_alloc; map = sc->sc_mem_map; smask &= ~0xF; } size = ~smask + 1; bits = size / min_alloc; for (i = 0; i + bits <= bits_limit; i += bits) if (pcib_map_check(map, i, bits)) { pcib_map_set(map, i, bits); addr = base + (i * min_alloc); return (addr); } return (addr); } static int mv_pcib_init_bar(struct mv_pcib_softc *sc, int bus, int slot, int func, int barno) { uint32_t addr, bar; int reg, width; reg = PCIR_BAR(barno); /* * Need to init the BAR register with 0xffffffff before correct * value can be read. */ mv_pcib_write_config(sc->sc_dev, bus, slot, func, reg, ~0, 4); bar = mv_pcib_read_config(sc->sc_dev, bus, slot, func, reg, 4); if (bar == 0) return (1); /* Calculate BAR size: 64 or 32 bit (in 32-bit units) */ width = ((bar & 7) == 4) ? 2 : 1; addr = pcib_alloc(sc, bar); if (!addr) return (-1); if (bootverbose) printf("PCI %u:%u:%u: reg %x: smask=%08x: addr=%08x\n", bus, slot, func, reg, bar, addr); mv_pcib_write_config(sc->sc_dev, bus, slot, func, reg, addr, 4); if (width == 2) mv_pcib_write_config(sc->sc_dev, bus, slot, func, reg + 4, 0, 4); return (width); } static void mv_pcib_init_bridge(struct mv_pcib_softc *sc, int bus, int slot, int func) { bus_addr_t io_base, mem_base; uint32_t io_limit, mem_limit; int secbus; io_base = sc->sc_io_base; io_limit = io_base + sc->sc_io_size - 1; mem_base = sc->sc_mem_base; mem_limit = mem_base + sc->sc_mem_size - 1; /* Configure I/O decode registers */ mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOBASEL_1, io_base >> 8, 1); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOBASEH_1, io_base >> 16, 2); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOLIMITL_1, io_limit >> 8, 1); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOLIMITH_1, io_limit >> 16, 2); /* Configure memory decode registers */ mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_MEMBASE_1, mem_base >> 16, 2); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_MEMLIMIT_1, mem_limit >> 16, 2); /* Disable memory prefetch decode */ mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMBASEL_1, 0x10, 2); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMBASEH_1, 0x0, 4); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMLIMITL_1, 0xF, 2); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMLIMITH_1, 0x0, 4); secbus = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_SECBUS_1, 1); /* Configure buses behind the bridge */ mv_pcib_init(sc, secbus, PCI_SLOTMAX); } static int mv_pcib_init(struct mv_pcib_softc *sc, int bus, int maxslot) { int slot, func, maxfunc, error; uint8_t hdrtype, command, class, subclass; for (slot = 0; slot <= maxslot; slot++) { maxfunc = 0; for (func = 0; func <= maxfunc; func++) { hdrtype = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_HDRTYPE, 1); if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) continue; if (func == 0 && (hdrtype & PCIM_MFDEV)) maxfunc = PCI_FUNCMAX; command = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_COMMAND, 1); command &= ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_COMMAND, command, 1); error = mv_pcib_init_all_bars(sc, bus, slot, func, hdrtype); if (error) return (error); command |= PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_PORTEN; mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_COMMAND, command, 1); /* Handle PCI-PCI bridges */ class = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_CLASS, 1); subclass = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_SUBCLASS, 1); if (class != PCIC_BRIDGE || subclass != PCIS_BRIDGE_PCI) continue; mv_pcib_init_bridge(sc, bus, slot, func); } } /* Enable all ABCD interrupts */ pcib_write_irq_mask(sc, (0xF << 24)); return (0); } static int mv_pcib_init_all_bars(struct mv_pcib_softc *sc, int bus, int slot, int func, int hdrtype) { int maxbar, bar, i; maxbar = (hdrtype & PCIM_HDRTYPE) ? 0 : 6; bar = 0; /* Program the base address registers */ while (bar < maxbar) { i = mv_pcib_init_bar(sc, bus, slot, func, bar); bar += i; if (i < 0) { device_printf(sc->sc_dev, "PCI IO/Memory space exhausted\n"); return (ENOMEM); } } return (0); } static struct rman * mv_pcib_get_rman(device_t dev, int type, u_int flags) { struct mv_pcib_softc *sc = device_get_softc(dev); switch (type) { case SYS_RES_IOPORT: return (&sc->sc_io_rman); case SYS_RES_MEMORY: return (&sc->sc_mem_rman); default: return (NULL); } } static struct resource * mv_pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct mv_pcib_softc *sc = device_get_softc(dev); switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; #ifdef PCI_RES_BUS case PCI_RES_BUS: return (pci_domain_alloc_bus(sc->ap_segment, child, rid, start, end, count, flags)); #endif default: return (BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, type, rid, start, end, count, flags)); } if (RMAN_IS_DEFAULT_RANGE(start, end)) { start = sc->sc_mem_base; end = sc->sc_mem_base + sc->sc_mem_size - 1; count = sc->sc_mem_size; } if ((start < sc->sc_mem_base) || (start + count - 1 != end) || (end > sc->sc_mem_base + sc->sc_mem_size - 1)) return (NULL); return (bus_generic_rman_alloc_resource(dev, child, type, rid, start, end, count, flags)); } static int mv_pcib_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { #ifdef PCI_RES_BUS struct mv_pcib_softc *sc = device_get_softc(dev); #endif switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_adjust_resource(dev, child, r, start, end)); #ifdef PCI_RES_BUS case PCI_RES_BUS: return (pci_domain_adjust_bus(sc->ap_segment, child, r, start, end)); #endif default: return (bus_generic_adjust_resource(dev, child, r, start, end)); } } static int mv_pcib_release_resource(device_t dev, device_t child, int type, int rid, struct resource *res) { #ifdef PCI_RES_BUS struct mv_pcib_softc *sc = device_get_softc(dev); #endif switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_release_resource(dev, child, type, rid, res)); #ifdef PCI_RES_BUS case PCI_RES_BUS: return (pci_domain_release_bus(sc->ap_segment, child, rid, res)); #endif default: return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child, type, rid, res)); } } static int -mv_pcib_activate_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +mv_pcib_activate_resource(device_t dev, device_t child, struct resource *r) { #ifdef PCI_RES_BUS struct mv_pcib_softc *sc = device_get_softc(dev); #endif - switch (type) { + switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: - return (bus_generic_rman_activate_resource(dev, child, type, - rid, r)); + return (bus_generic_rman_activate_resource(dev, child, r)); #ifdef PCI_RES_BUS case PCI_RES_BUS: - return (pci_domain_activate_bus(sc->ap_segment, child, rid, r)); + return (pci_domain_activate_bus(sc->ap_segment, child, r)); #endif default: - return (bus_generic_activate_resource(dev, child, type, rid, - r)); + return (bus_generic_activate_resource(dev, child, r)); } } static int -mv_pcib_deactivate_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +mv_pcib_deactivate_resource(device_t dev, device_t child, struct resource *r) { #ifdef PCI_RES_BUS struct mv_pcib_softc *sc = device_get_softc(dev); #endif - switch (type) { + switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: - return (bus_generic_rman_deactivate_resource(dev, child, type, - rid, r)); + return (bus_generic_rman_deactivate_resource(dev, child, r)); #ifdef PCI_RES_BUS case PCI_RES_BUS: - return (pci_domain_deactivate_bus(sc->ap_segment, child, rid, - r)); + return (pci_domain_deactivate_bus(sc->ap_segment, child, r)); #endif default: - return (bus_generic_deactivate_resource(dev, child, type, rid, - r)); + return (bus_generic_deactivate_resource(dev, child, r)); } } static int mv_pcib_map_resource(device_t dev, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); /* Mappings are only supported on I/O and memory resources. */ switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); map->r_bustag = fdtbus_bs_tag; map->r_bushandle = start; map->r_size = length; return (0); } static int mv_pcib_unmap_resource(device_t dev, device_t child, struct resource *r, struct resource_map *map) { switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (0); default: return (EINVAL); } } static int mv_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct mv_pcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: *result = sc->sc_busnr; return (0); case PCIB_IVAR_DOMAIN: *result = device_get_unit(dev); return (0); } return (ENOENT); } static int mv_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct mv_pcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_busnr = value; return (0); } return (ENOENT); } static inline void pcib_write_irq_mask(struct mv_pcib_softc *sc, uint32_t mask) { if (sc->sc_type != MV_TYPE_PCIE) return; bus_space_write_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_IRQ_MASK, mask); } static void mv_pcib_hw_cfginit(void) { static int opened = 0; if (opened) return; mtx_init(&pcicfg_mtx, "pcicfg", NULL, MTX_SPIN); opened = 1; } static uint32_t mv_pcib_hw_cfgread(struct mv_pcib_softc *sc, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { uint32_t addr, data, ca, cd; ca = (sc->sc_type != MV_TYPE_PCI) ? PCIE_REG_CFG_ADDR : PCI_REG_CFG_ADDR; cd = (sc->sc_type != MV_TYPE_PCI) ? PCIE_REG_CFG_DATA : PCI_REG_CFG_DATA; addr = PCI_CFG_ENA | PCI_CFG_BUS(bus) | PCI_CFG_DEV(slot) | PCI_CFG_FUN(func) | PCI_CFG_PCIE_REG(reg); mtx_lock_spin(&pcicfg_mtx); bus_space_write_4(sc->sc_bst, sc->sc_bsh, ca, addr); data = ~0; switch (bytes) { case 1: data = bus_space_read_1(sc->sc_bst, sc->sc_bsh, cd + (reg & 3)); break; case 2: data = le16toh(bus_space_read_2(sc->sc_bst, sc->sc_bsh, cd + (reg & 2))); break; case 4: data = le32toh(bus_space_read_4(sc->sc_bst, sc->sc_bsh, cd)); break; } mtx_unlock_spin(&pcicfg_mtx); return (data); } static void mv_pcib_hw_cfgwrite(struct mv_pcib_softc *sc, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { uint32_t addr, ca, cd; ca = (sc->sc_type != MV_TYPE_PCI) ? PCIE_REG_CFG_ADDR : PCI_REG_CFG_ADDR; cd = (sc->sc_type != MV_TYPE_PCI) ? PCIE_REG_CFG_DATA : PCI_REG_CFG_DATA; addr = PCI_CFG_ENA | PCI_CFG_BUS(bus) | PCI_CFG_DEV(slot) | PCI_CFG_FUN(func) | PCI_CFG_PCIE_REG(reg); mtx_lock_spin(&pcicfg_mtx); bus_space_write_4(sc->sc_bst, sc->sc_bsh, ca, addr); switch (bytes) { case 1: bus_space_write_1(sc->sc_bst, sc->sc_bsh, cd + (reg & 3), data); break; case 2: bus_space_write_2(sc->sc_bst, sc->sc_bsh, cd + (reg & 2), htole16(data)); break; case 4: bus_space_write_4(sc->sc_bst, sc->sc_bsh, cd, htole32(data)); break; } mtx_unlock_spin(&pcicfg_mtx); } static int mv_pcib_maxslots(device_t dev) { struct mv_pcib_softc *sc = device_get_softc(dev); return ((sc->sc_type != MV_TYPE_PCI) ? 1 : PCI_SLOTMAX); } static int mv_pcib_root_slot(device_t dev, u_int bus, u_int slot, u_int func) { struct mv_pcib_softc *sc = device_get_softc(dev); uint32_t vendor, device; /* On platforms other than Armada38x, root link is always at slot 0 */ if (!sc->sc_enable_find_root_slot) return (slot == 0); vendor = mv_pcib_hw_cfgread(sc, bus, slot, func, PCIR_VENDOR, PCIR_VENDOR_LENGTH); device = mv_pcib_hw_cfgread(sc, bus, slot, func, PCIR_DEVICE, PCIR_DEVICE_LENGTH) & MV_DEV_FAMILY_MASK; return (vendor == PCI_VENDORID_MRVL && device == MV_DEV_ARMADA38X); } static uint32_t mv_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct mv_pcib_softc *sc = device_get_softc(dev); /* Return ~0 if link is inactive or trying to read from Root */ if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS) & PCIE_STATUS_LINK_DOWN) || mv_pcib_root_slot(dev, bus, slot, func)) return (~0U); return (mv_pcib_hw_cfgread(sc, bus, slot, func, reg, bytes)); } static void mv_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t val, int bytes) { struct mv_pcib_softc *sc = device_get_softc(dev); /* Return if link is inactive or trying to write to Root */ if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS) & PCIE_STATUS_LINK_DOWN) || mv_pcib_root_slot(dev, bus, slot, func)) return; mv_pcib_hw_cfgwrite(sc, bus, slot, func, reg, val, bytes); } static int mv_pcib_route_interrupt(device_t bus, device_t dev, int pin) { struct mv_pcib_softc *sc; struct ofw_pci_register reg; uint32_t pintr, mintr[4]; int icells; phandle_t iparent; sc = device_get_softc(bus); pintr = pin; /* Fabricate imap information in case this isn't an OFW device */ bzero(®, sizeof(reg)); reg.phys_hi = (pci_get_bus(dev) << OFW_PCI_PHYS_HI_BUSSHIFT) | (pci_get_slot(dev) << OFW_PCI_PHYS_HI_DEVICESHIFT) | (pci_get_function(dev) << OFW_PCI_PHYS_HI_FUNCTIONSHIFT); icells = ofw_bus_lookup_imap(ofw_bus_get_node(dev), &sc->sc_pci_iinfo, ®, sizeof(reg), &pintr, sizeof(pintr), mintr, sizeof(mintr), &iparent); if (icells > 0) return (ofw_bus_map_intr(dev, iparent, icells, mintr)); /* Maybe it's a real interrupt, not an intpin */ if (pin > 4) return (pin); device_printf(bus, "could not route pin %d for device %d.%d\n", pin, pci_get_slot(dev), pci_get_function(dev)); return (PCI_INVALID_IRQ); } static int mv_pcib_decode_win(phandle_t node, struct mv_pcib_softc *sc) { struct mv_pci_range io_space, mem_space; device_t dev; int error; dev = sc->sc_dev; if ((error = mv_pci_ranges(node, &io_space, &mem_space)) != 0) { device_printf(dev, "could not retrieve 'ranges' data\n"); return (error); } /* Configure CPU decoding windows */ error = decode_win_cpu_set(sc->sc_win_target, sc->sc_io_win_attr, io_space.base_parent, io_space.len, ~0); if (error < 0) { device_printf(dev, "could not set up CPU decode " "window for PCI IO\n"); return (ENXIO); } error = decode_win_cpu_set(sc->sc_win_target, sc->sc_mem_win_attr, mem_space.base_parent, mem_space.len, mem_space.base_parent); if (error < 0) { device_printf(dev, "could not set up CPU decode " "windows for PCI MEM\n"); return (ENXIO); } sc->sc_io_base = io_space.base_parent; sc->sc_io_size = io_space.len; sc->sc_mem_base = mem_space.base_parent; sc->sc_mem_size = mem_space.len; return (0); } static int mv_pcib_map_msi(device_t dev, device_t child, int irq, uint64_t *addr, uint32_t *data) { struct mv_pcib_softc *sc; sc = device_get_softc(dev); if (!sc->sc_msi_supported) return (ENOTSUP); irq = irq - MSI_IRQ; /* validate parameters */ if (isclr(&sc->sc_msi_bitmap, irq)) { device_printf(dev, "invalid MSI 0x%x\n", irq); return (EINVAL); } mv_msi_data(irq, addr, data); debugf("%s: irq: %d addr: %jx data: %x\n", __func__, irq, *addr, *data); return (0); } static int mv_pcib_alloc_msi(device_t dev, device_t child, int count, int maxcount __unused, int *irqs) { struct mv_pcib_softc *sc; u_int start = 0, i; sc = device_get_softc(dev); if (!sc->sc_msi_supported) return (ENOTSUP); if (powerof2(count) == 0 || count > MSI_IRQ_NUM) return (EINVAL); mtx_lock(&sc->sc_msi_mtx); for (start = 0; (start + count) < MSI_IRQ_NUM; start++) { for (i = start; i < start + count; i++) { if (isset(&sc->sc_msi_bitmap, i)) break; } if (i == start + count) break; } if ((start + count) == MSI_IRQ_NUM) { mtx_unlock(&sc->sc_msi_mtx); return (ENXIO); } for (i = start; i < start + count; i++) { setbit(&sc->sc_msi_bitmap, i); *irqs++ = MSI_IRQ + i; } debugf("%s: start: %x count: %x\n", __func__, start, count); mtx_unlock(&sc->sc_msi_mtx); return (0); } static int mv_pcib_release_msi(device_t dev, device_t child, int count, int *irqs) { struct mv_pcib_softc *sc; u_int i; sc = device_get_softc(dev); if(!sc->sc_msi_supported) return (ENOTSUP); mtx_lock(&sc->sc_msi_mtx); for (i = 0; i < count; i++) clrbit(&sc->sc_msi_bitmap, irqs[i] - MSI_IRQ); mtx_unlock(&sc->sc_msi_mtx); return (0); } diff --git a/sys/arm64/arm64/nexus.c b/sys/arm64/arm64/nexus.c index ed500ba5ea6d..3e9399384855 100644 --- a/sys/arm64/arm64/nexus.c +++ b/sys/arm64/arm64/nexus.c @@ -1,621 +1,617 @@ /*- * Copyright 1998 Massachusetts Institute of Technology * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * This code implements a `root nexus' for Arm Architecture * machines. The function of the root nexus is to serve as an * attachment point for both processors and buses, and to manage * resources which are common to all of them. In particular, * this code implements the core resource managers for interrupt * requests and I/O memory address space. */ #include "opt_acpi.h" #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #include #include "ofw_bus_if.h" #endif #ifdef DEV_ACPI #include #include #include "acpi_bus_if.h" #endif extern struct bus_space memmap_bus; static MALLOC_DEFINE(M_NEXUSDEV, "nexusdev", "Nexus device"); struct nexus_device { struct resource_list nx_resources; }; static int force_np; SYSCTL_INT(_kern, OID_AUTO, force_nonposted, CTLFLAG_RDTUN, &force_np, 0, "Force all devices to use non-posted device memory"); #define DEVTONX(dev) ((struct nexus_device *)device_get_ivars(dev)) static struct rman mem_rman; static struct rman irq_rman; static int nexus_attach(device_t); #ifdef FDT static device_probe_t nexus_fdt_probe; static device_attach_t nexus_fdt_attach; static bus_activate_resource_t nexus_fdt_activate_resource; #endif #ifdef DEV_ACPI static device_probe_t nexus_acpi_probe; static device_attach_t nexus_acpi_attach; #endif static bus_add_child_t nexus_add_child; static bus_print_child_t nexus_print_child; static bus_activate_resource_t nexus_activate_resource; static bus_alloc_resource_t nexus_alloc_resource; static bus_deactivate_resource_t nexus_deactivate_resource; static bus_get_resource_list_t nexus_get_reslist; static bus_get_rman_t nexus_get_rman; static bus_map_resource_t nexus_map_resource; static bus_unmap_resource_t nexus_unmap_resource; #ifdef SMP static bus_bind_intr_t nexus_bind_intr; #endif static bus_config_intr_t nexus_config_intr; static bus_describe_intr_t nexus_describe_intr; static bus_setup_intr_t nexus_setup_intr; static bus_teardown_intr_t nexus_teardown_intr; static bus_get_bus_tag_t nexus_get_bus_tag; #ifdef FDT static ofw_bus_map_intr_t nexus_ofw_map_intr; #endif static device_method_t nexus_methods[] = { /* Bus interface */ DEVMETHOD(bus_add_child, nexus_add_child), DEVMETHOD(bus_print_child, nexus_print_child), DEVMETHOD(bus_activate_resource, nexus_activate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_rman_adjust_resource), DEVMETHOD(bus_alloc_resource, nexus_alloc_resource), DEVMETHOD(bus_deactivate_resource, nexus_deactivate_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_get_resource_list, nexus_get_reslist), DEVMETHOD(bus_get_rman, nexus_get_rman), DEVMETHOD(bus_map_resource, nexus_map_resource), DEVMETHOD(bus_release_resource, bus_generic_rman_release_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_unmap_resource, nexus_unmap_resource), #ifdef SMP DEVMETHOD(bus_bind_intr, nexus_bind_intr), #endif DEVMETHOD(bus_config_intr, nexus_config_intr), DEVMETHOD(bus_describe_intr, nexus_describe_intr), DEVMETHOD(bus_setup_intr, nexus_setup_intr), DEVMETHOD(bus_teardown_intr, nexus_teardown_intr), DEVMETHOD(bus_get_bus_tag, nexus_get_bus_tag), DEVMETHOD_END }; static driver_t nexus_driver = { "nexus", nexus_methods, 1 /* no softc */ }; static int nexus_attach(device_t dev) { mem_rman.rm_start = 0; mem_rman.rm_end = BUS_SPACE_MAXADDR; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "I/O memory addresses"; if (rman_init(&mem_rman) || rman_manage_region(&mem_rman, 0, BUS_SPACE_MAXADDR)) panic("nexus_attach mem_rman"); irq_rman.rm_start = 0; irq_rman.rm_end = ~0; irq_rman.rm_type = RMAN_ARRAY; irq_rman.rm_descr = "Interrupts"; if (rman_init(&irq_rman) || rman_manage_region(&irq_rman, 0, ~0)) panic("nexus_attach irq_rman"); bus_generic_probe(dev); bus_generic_attach(dev); return (0); } static int nexus_print_child(device_t bus, device_t child) { int retval = 0; retval += bus_print_child_header(bus, child); retval += printf("\n"); return (retval); } static device_t nexus_add_child(device_t bus, u_int order, const char *name, int unit) { device_t child; struct nexus_device *ndev; ndev = malloc(sizeof(struct nexus_device), M_NEXUSDEV, M_NOWAIT|M_ZERO); if (!ndev) return (0); resource_list_init(&ndev->nx_resources); child = device_add_child_ordered(bus, order, name, unit); /* should we free this in nexus_child_detached? */ device_set_ivars(child, ndev); return (child); } static struct rman * nexus_get_rman(device_t bus, int type, u_int flags) { switch (type) { case SYS_RES_IRQ: return (&irq_rman); case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (&mem_rman); default: return (NULL); } } /* * Allocate a resource on behalf of child. NB: child is usually going to be a * child of one of our descendants, not a direct child of nexus0. */ static struct resource * nexus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct nexus_device *ndev = DEVTONX(child); struct resource_list_entry *rle; /* * If this is an allocation of the "default" range for a given * RID, and we know what the resources for this device are * (ie. they aren't maintained by a child bus), then work out * the start/end values. */ if (RMAN_IS_DEFAULT_RANGE(start, end) && (count == 1)) { if (device_get_parent(child) != bus || ndev == NULL) return (NULL); rle = resource_list_find(&ndev->nx_resources, type, *rid); if (rle == NULL) return (NULL); start = rle->start; end = rle->end; count = rle->count; } return (bus_generic_rman_alloc_resource(bus, child, type, rid, start, end, count, flags)); } static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { /* * On arm64 (due to INTRNG), ACPI interrupt configuration is * done in nexus_acpi_map_intr(). */ return (0); } static int nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) { int error; if ((rman_get_flags(res) & RF_SHAREABLE) == 0) flags |= INTR_EXCL; /* We depend here on rman_activate_resource() being idempotent. */ error = rman_activate_resource(res); if (error) return (error); error = intr_setup_irq(child, res, filt, intr, arg, flags, cookiep); return (error); } static int nexus_teardown_intr(device_t dev, device_t child, struct resource *r, void *ih) { return (intr_teardown_irq(child, r, ih)); } static int nexus_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { return (intr_describe_irq(child, irq, cookie, descr)); } #ifdef SMP static int nexus_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { return (intr_bind_irq(child, irq, cpu)); } #endif static bus_space_tag_t nexus_get_bus_tag(device_t bus __unused, device_t child __unused) { return (&memmap_bus); } static int -nexus_activate_resource_flags(device_t bus, device_t child, int type, int rid, - struct resource *r, int flags) +nexus_activate_resource_flags(device_t bus, device_t child, struct resource *r, + int flags) { struct resource_map_request args; struct resource_map map; int err, use_np; if ((err = rman_activate_resource(r)) != 0) return (err); /* * If this is a memory resource, map it into the kernel. */ - switch (type) { + switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: if ((rman_get_flags(r) & RF_UNMAPPED) == 0) { resource_init_map_request(&args); use_np = (flags & BUS_SPACE_MAP_NONPOSTED) != 0 || force_np; if (!use_np) resource_int_value(device_get_name(child), device_get_unit(child), "force_nonposted", &use_np); if (use_np) args.memattr = VM_MEMATTR_DEVICE_NP; err = nexus_map_resource(bus, child, r, &args, &map); if (err != 0) { rman_deactivate_resource(r); return (err); } rman_set_mapping(r, &map); } break; case SYS_RES_IRQ: err = intr_activate_irq(child, r); if (err != 0) { rman_deactivate_resource(r); return (err); } } return (0); } static int -nexus_activate_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +nexus_activate_resource(device_t dev, device_t child, struct resource *r) { - return (nexus_activate_resource_flags(dev, child, type, rid, r, 0)); + return (nexus_activate_resource_flags(dev, child, r, 0)); } static struct resource_list * nexus_get_reslist(device_t dev, device_t child) { struct nexus_device *ndev = DEVTONX(child); return (&ndev->nx_resources); } static int -nexus_deactivate_resource(device_t bus, device_t child, int type, int rid, - struct resource *r) +nexus_deactivate_resource(device_t bus, device_t child, struct resource *r) { int error; - switch (type) { + switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: - return (bus_generic_rman_deactivate_resource(bus, child, type, - rid, r)); + return (bus_generic_rman_deactivate_resource(bus, child, r)); case SYS_RES_IRQ: error = rman_deactivate_resource(r); if (error) return (error); intr_deactivate_irq(child, r); return (0); default: return (EINVAL); } } static int nexus_map_resource(device_t bus, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if ((rman_get_flags(r) & RF_ACTIVE) == 0) return (ENXIO); /* Mappings are only supported on I/O and memory resources. */ switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); map->r_vaddr = pmap_mapdev_attr(start, length, args.memattr); map->r_bustag = &memmap_bus; map->r_size = length; /* * The handle is the virtual address. */ map->r_bushandle = (bus_space_handle_t)map->r_vaddr; return (0); } static int nexus_unmap_resource(device_t bus, device_t child, struct resource *r, struct resource_map *map) { switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: pmap_unmapdev(map->r_vaddr, map->r_size); return (0); default: return (EINVAL); } } #ifdef FDT static device_method_t nexus_fdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nexus_fdt_probe), DEVMETHOD(device_attach, nexus_fdt_attach), /* Bus interface */ DEVMETHOD(bus_activate_resource, nexus_fdt_activate_resource), /* OFW interface */ DEVMETHOD(ofw_bus_map_intr, nexus_ofw_map_intr), DEVMETHOD_END, }; #define nexus_baseclasses nexus_fdt_baseclasses DEFINE_CLASS_1(nexus, nexus_fdt_driver, nexus_fdt_methods, 1, nexus_driver); #undef nexus_baseclasses EARLY_DRIVER_MODULE(nexus_fdt, root, nexus_fdt_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_FIRST); static int nexus_fdt_probe(device_t dev) { if (arm64_bus_method != ARM64_BUS_FDT) return (ENXIO); device_quiet(dev); return (BUS_PROBE_DEFAULT); } static int nexus_fdt_attach(device_t dev) { nexus_add_child(dev, 10, "ofwbus", 0); return (nexus_attach(dev)); } static int -nexus_fdt_activate_resource(device_t bus, device_t child, int type, int rid, - struct resource *r) +nexus_fdt_activate_resource(device_t bus, device_t child, struct resource *r) { phandle_t node, parent; int flags; flags = 0; - switch (type) { + switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: /* * If the fdt parent has the nonposted-mmio property we * need to use non-posted IO to access the device. When * we find this property set the BUS_SPACE_MAP_NONPOSTED * flag to be passed to bus_space_map. */ node = ofw_bus_get_node(child); if (node != -1) { parent = OF_parent(node); if (parent != 0 && OF_hasprop(parent, "nonposted-mmio")) { flags |= BUS_SPACE_MAP_NONPOSTED; } } break; default: break; } - return (nexus_activate_resource_flags(bus, child, type, rid, r, flags)); + return (nexus_activate_resource_flags(bus, child, r, flags)); } static int nexus_ofw_map_intr(device_t dev, device_t child, phandle_t iparent, int icells, pcell_t *intr) { u_int irq; struct intr_map_data_fdt *fdt_data; size_t len; len = sizeof(*fdt_data) + icells * sizeof(pcell_t); fdt_data = (struct intr_map_data_fdt *)intr_alloc_map_data( INTR_MAP_DATA_FDT, len, M_WAITOK | M_ZERO); fdt_data->iparent = iparent; fdt_data->ncells = icells; memcpy(fdt_data->cells, intr, icells * sizeof(pcell_t)); irq = intr_map_irq(NULL, iparent, (struct intr_map_data *)fdt_data); return (irq); } #endif #ifdef DEV_ACPI static int nexus_acpi_map_intr(device_t dev, device_t child, u_int irq, int trig, int pol); static device_method_t nexus_acpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nexus_acpi_probe), DEVMETHOD(device_attach, nexus_acpi_attach), /* ACPI interface */ DEVMETHOD(acpi_bus_map_intr, nexus_acpi_map_intr), DEVMETHOD_END, }; #define nexus_baseclasses nexus_acpi_baseclasses DEFINE_CLASS_1(nexus, nexus_acpi_driver, nexus_acpi_methods, 1, nexus_driver); #undef nexus_baseclasses EARLY_DRIVER_MODULE(nexus_acpi, root, nexus_acpi_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_FIRST); static int nexus_acpi_probe(device_t dev) { if (arm64_bus_method != ARM64_BUS_ACPI || acpi_identify() != 0) return (ENXIO); device_quiet(dev); return (BUS_PROBE_LOW_PRIORITY); } static int nexus_acpi_attach(device_t dev) { nexus_add_child(dev, 10, "acpi", 0); return (nexus_attach(dev)); } static int nexus_acpi_map_intr(device_t dev, device_t child, u_int irq, int trig, int pol) { struct intr_map_data_acpi *acpi_data; size_t len; len = sizeof(*acpi_data); acpi_data = (struct intr_map_data_acpi *)intr_alloc_map_data( INTR_MAP_DATA_ACPI, len, M_WAITOK | M_ZERO); acpi_data->irq = irq; acpi_data->pol = pol; acpi_data->trig = trig; /* * TODO: This will only handle a single interrupt controller. * ACPI will map multiple controllers into a single virtual IRQ * space. Each controller has a System Vector Base to hold the * first irq it handles in this space. As such the correct way * to handle interrupts with ACPI is to search through the * controllers for the largest base value that is no larger than * the IRQ value. */ irq = intr_map_irq(NULL, ACPI_INTR_XREF, (struct intr_map_data *)acpi_data); return (irq); } #endif diff --git a/sys/arm64/cavium/thunder_pcie_pem.c b/sys/arm64/cavium/thunder_pcie_pem.c index b53f70696ff1..f7c3c5ee1c0a 100644 --- a/sys/arm64/cavium/thunder_pcie_pem.c +++ b/sys/arm64/cavium/thunder_pcie_pem.c @@ -1,971 +1,965 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* PCIe external MAC root complex driver (PEM) for Cavium Thunder SOC */ #include #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #include #include #endif #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #define THUNDER_PEM_DEVICE_ID 0xa020 #define THUNDER_PEM_VENDOR_ID 0x177d /* ThunderX specific defines */ #define THUNDER_PEMn_REG_BASE(unit) (0x87e0c0000000UL | ((unit) << 24)) #define PCIERC_CFG002 0x08 #define PCIERC_CFG006 0x18 #define PCIERC_CFG032 0x80 #define PCIERC_CFG006_SEC_BUS(reg) (((reg) >> 8) & 0xFF) #define PEM_CFG_RD_REG_ALIGN(reg) ((reg) & ~0x3) #define PEM_CFG_RD_REG_DATA(val) (((val) >> 32) & 0xFFFFFFFF) #define PEM_CFG_RD 0x30 #define PEM_CFG_LINK_MASK 0x3 #define PEM_CFG_LINK_RDY 0x3 #define PEM_CFG_SLIX_TO_REG(slix) ((slix) << 4) #define SBNUM_OFFSET 0x8 #define SBNUM_MASK 0xFF #define PEM_ON_REG 0x420 #define PEM_CTL_STATUS 0x0 #define PEM_LINK_ENABLE (1 << 4) #define PEM_LINK_DLLA (1 << 29) #define PEM_LINK_LT (1 << 27) #define PEM_BUS_SHIFT (24) #define PEM_SLOT_SHIFT (19) #define PEM_FUNC_SHIFT (16) #define SLIX_S2M_REGX_ACC 0x874001000000UL #define SLIX_S2M_REGX_ACC_SIZE 0x1000 #define SLIX_S2M_REGX_ACC_SPACING 0x001000000000UL #define SLI_BASE 0x880000000000UL #define SLI_WINDOW_SPACING 0x004000000000UL #define SLI_PCI_OFFSET 0x001000000000UL #define SLI_NODE_SHIFT (44) #define SLI_NODE_MASK (3) #define SLI_GROUP_SHIFT (40) #define SLI_ID_SHIFT (24) #define SLI_ID_MASK (7) #define SLI_PEMS_PER_GROUP (3) #define SLI_GROUPS_PER_NODE (2) #define SLI_PEMS_PER_NODE (SLI_PEMS_PER_GROUP * SLI_GROUPS_PER_NODE) #define SLI_ACC_REG_CNT (256) /* * Each PEM device creates its own bus with * own address translation, so we can adjust bus addresses * as we want. To support 32-bit cards let's assume * PCI window assignment looks as following: * * 0x00000000 - 0x000FFFFF IO * 0x00100000 - 0xFFFFFFFF Memory */ #define PCI_IO_BASE 0x00000000UL #define PCI_IO_SIZE 0x00100000UL #define PCI_MEMORY_BASE PCI_IO_SIZE #define PCI_MEMORY_SIZE 0xFFF00000UL #define RID_PEM_SPACE 1 -static int thunder_pem_activate_resource(device_t, device_t, int, int, - struct resource *); +static int thunder_pem_activate_resource(device_t, device_t, struct resource *); static int thunder_pem_adjust_resource(device_t, device_t, struct resource *, rman_res_t, rman_res_t); static struct resource * thunder_pem_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int thunder_pem_alloc_msi(device_t, device_t, int, int, int *); static int thunder_pem_release_msi(device_t, device_t, int, int *); static int thunder_pem_alloc_msix(device_t, device_t, int *); static int thunder_pem_release_msix(device_t, device_t, int); static int thunder_pem_map_msi(device_t, device_t, int, uint64_t *, uint32_t *); static int thunder_pem_get_id(device_t, device_t, enum pci_id_type, uintptr_t *); static int thunder_pem_attach(device_t); -static int thunder_pem_deactivate_resource(device_t, device_t, int, int, +static int thunder_pem_deactivate_resource(device_t, device_t, struct resource *); static int thunder_pem_map_resource(device_t, device_t, struct resource *, struct resource_map_request *, struct resource_map *); static int thunder_pem_unmap_resource(device_t, device_t, struct resource *, struct resource_map *); static bus_dma_tag_t thunder_pem_get_dma_tag(device_t, device_t); static int thunder_pem_detach(device_t); static uint64_t thunder_pem_config_reg_read(struct thunder_pem_softc *, int); static int thunder_pem_link_init(struct thunder_pem_softc *); static int thunder_pem_maxslots(device_t); static int thunder_pem_probe(device_t); static uint32_t thunder_pem_read_config(device_t, u_int, u_int, u_int, u_int, int); static int thunder_pem_read_ivar(device_t, device_t, int, uintptr_t *); static void thunder_pem_release_all(device_t); static int thunder_pem_release_resource(device_t, device_t, int, int, struct resource *); static struct rman * thunder_pem_get_rman(device_t, int, u_int); static void thunder_pem_slix_s2m_regx_acc_modify(struct thunder_pem_softc *, int, int); static void thunder_pem_write_config(device_t, u_int, u_int, u_int, u_int, uint32_t, int); static int thunder_pem_write_ivar(device_t, device_t, int, uintptr_t); /* Global handlers for SLI interface */ static bus_space_handle_t sli0_s2m_regx_base = 0; static bus_space_handle_t sli1_s2m_regx_base = 0; static device_method_t thunder_pem_methods[] = { /* Device interface */ DEVMETHOD(device_probe, thunder_pem_probe), DEVMETHOD(device_attach, thunder_pem_attach), DEVMETHOD(device_detach, thunder_pem_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, thunder_pem_read_ivar), DEVMETHOD(bus_write_ivar, thunder_pem_write_ivar), DEVMETHOD(bus_get_rman, thunder_pem_get_rman), DEVMETHOD(bus_alloc_resource, thunder_pem_alloc_resource), DEVMETHOD(bus_release_resource, thunder_pem_release_resource), DEVMETHOD(bus_adjust_resource, thunder_pem_adjust_resource), DEVMETHOD(bus_activate_resource, thunder_pem_activate_resource), DEVMETHOD(bus_deactivate_resource, thunder_pem_deactivate_resource), DEVMETHOD(bus_map_resource, thunder_pem_map_resource), DEVMETHOD(bus_unmap_resource, thunder_pem_unmap_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_dma_tag, thunder_pem_get_dma_tag), /* pcib interface */ DEVMETHOD(pcib_maxslots, thunder_pem_maxslots), DEVMETHOD(pcib_read_config, thunder_pem_read_config), DEVMETHOD(pcib_write_config, thunder_pem_write_config), DEVMETHOD(pcib_alloc_msix, thunder_pem_alloc_msix), DEVMETHOD(pcib_release_msix, thunder_pem_release_msix), DEVMETHOD(pcib_alloc_msi, thunder_pem_alloc_msi), DEVMETHOD(pcib_release_msi, thunder_pem_release_msi), DEVMETHOD(pcib_map_msi, thunder_pem_map_msi), DEVMETHOD(pcib_get_id, thunder_pem_get_id), DEVMETHOD_END }; DEFINE_CLASS_0(pcib, thunder_pem_driver, thunder_pem_methods, sizeof(struct thunder_pem_softc)); extern struct bus_space memmap_bus; DRIVER_MODULE(thunder_pem, pci, thunder_pem_driver, 0, 0); MODULE_DEPEND(thunder_pem, pci, 1, 1, 1); static int thunder_pem_maxslots(device_t dev) { #if 0 /* max slots per bus acc. to standard */ return (PCI_SLOTMAX); #else /* * ARM64TODO Workaround - otherwise an em(4) interface appears to be * present on every PCI function on the bus to which it is connected */ return (0); #endif } static int thunder_pem_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct thunder_pem_softc *sc; int secondary_bus = 0; sc = device_get_softc(dev); if (index == PCIB_IVAR_BUS) { secondary_bus = thunder_pem_config_reg_read(sc, PCIERC_CFG006); *result = PCIERC_CFG006_SEC_BUS(secondary_bus); return (0); } if (index == PCIB_IVAR_DOMAIN) { *result = sc->id; return (0); } return (ENOENT); } static int thunder_pem_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { return (ENOENT); } static int -thunder_pem_activate_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +thunder_pem_activate_resource(device_t dev, device_t child, struct resource *r) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct thunder_pem_softc *sc; sc = device_get_softc(dev); #endif - switch (type) { + switch (rman_get_type(r)) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: - return (pci_domain_activate_bus(sc->id, child, rid, r)); + return (pci_domain_activate_bus(sc->id, child, r)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: - return (bus_generic_rman_activate_resource(dev, child, type, - rid, r)); + return (bus_generic_rman_activate_resource(dev, child, r)); default: - return (bus_generic_activate_resource(dev, child, type, rid, - r)); + return (bus_generic_activate_resource(dev, child, r)); } } static int -thunder_pem_deactivate_resource(device_t dev, device_t child, int type, int rid, +thunder_pem_deactivate_resource(device_t dev, device_t child, struct resource *r) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct thunder_pem_softc *sc; sc = device_get_softc(dev); #endif - switch (type) { + switch (rman_get_type(r)) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: - return (pci_domain_deactivate_bus(sc->id, child, rid, r)); + return (pci_domain_deactivate_bus(sc->id, child, r)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: - return (bus_generic_rman_deactivate_resource(dev, child, type, - rid, r)); + return (bus_generic_rman_deactivate_resource(dev, child, r)); default: - return (bus_generic_deactivate_resource(dev, child, type, rid, - r)); + return (bus_generic_deactivate_resource(dev, child, r)); } } static int thunder_pem_map_resource(device_t dev, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; struct thunder_pem_softc *sc; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); sc = device_get_softc(dev); start = range_addr_pci_to_phys(sc->ranges, start); error = bus_space_map(&memmap_bus, start, length, 0, &map->r_bushandle); if (error) return (error); map->r_bustag = &memmap_bus; map->r_vaddr = (void *)map->r_bushandle; map->r_size = length; return (0); } static int thunder_pem_unmap_resource(device_t dev, device_t child, struct resource *r, struct resource_map *map) { switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: bus_space_unmap(map->r_bustag, map->r_bushandle, map->r_size); return (0); default: return (EINVAL); } } static int thunder_pem_adjust_resource(device_t dev, device_t child, struct resource *res, rman_res_t start, rman_res_t end) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct thunder_pem_softc *sc; sc = device_get_softc(dev); #endif switch (rman_get_type(res)) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_adjust_bus(sc->id, child, res, start, end)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_adjust_resource(dev, child, res, start, end)); default: return (bus_generic_adjust_resource(dev, child, res, start, end)); } } static bus_dma_tag_t thunder_pem_get_dma_tag(device_t dev, device_t child) { struct thunder_pem_softc *sc; sc = device_get_softc(dev); return (sc->dmat); } static int thunder_pem_alloc_msi(device_t pci, device_t child, int count, int maxcount, int *irqs) { device_t bus; bus = device_get_parent(pci); return (PCIB_ALLOC_MSI(device_get_parent(bus), child, count, maxcount, irqs)); } static int thunder_pem_release_msi(device_t pci, device_t child, int count, int *irqs) { device_t bus; bus = device_get_parent(pci); return (PCIB_RELEASE_MSI(device_get_parent(bus), child, count, irqs)); } static int thunder_pem_alloc_msix(device_t pci, device_t child, int *irq) { device_t bus; bus = device_get_parent(pci); return (PCIB_ALLOC_MSIX(device_get_parent(bus), child, irq)); } static int thunder_pem_release_msix(device_t pci, device_t child, int irq) { device_t bus; bus = device_get_parent(pci); return (PCIB_RELEASE_MSIX(device_get_parent(bus), child, irq)); } static int thunder_pem_map_msi(device_t pci, device_t child, int irq, uint64_t *addr, uint32_t *data) { device_t bus; bus = device_get_parent(pci); return (PCIB_MAP_MSI(device_get_parent(bus), child, irq, addr, data)); } static int thunder_pem_get_id(device_t pci, device_t child, enum pci_id_type type, uintptr_t *id) { int bsf; int pem; if (type != PCI_ID_MSI) return (pcib_get_id(pci, child, type, id)); bsf = pci_get_rid(child); /* PEM (PCIe MAC/root complex) number is equal to domain */ pem = pci_get_domain(child); /* * Set appropriate device ID (passed by the HW along with * the transaction to memory) for different root complex * numbers using hard-coded domain portion for each group. */ if (pem < 3) *id = (0x1 << PCI_RID_DOMAIN_SHIFT) | bsf; else if (pem < 6) *id = (0x3 << PCI_RID_DOMAIN_SHIFT) | bsf; else if (pem < 9) *id = (0x9 << PCI_RID_DOMAIN_SHIFT) | bsf; else if (pem < 12) *id = (0xB << PCI_RID_DOMAIN_SHIFT) | bsf; else return (ENXIO); return (0); } static int thunder_pem_identify(device_t dev) { struct thunder_pem_softc *sc; rman_res_t start; sc = device_get_softc(dev); start = rman_get_start(sc->reg); /* Calculate PEM designations from its address */ sc->node = (start >> SLI_NODE_SHIFT) & SLI_NODE_MASK; sc->id = ((start >> SLI_ID_SHIFT) & SLI_ID_MASK) + (SLI_PEMS_PER_NODE * sc->node); sc->sli = sc->id % SLI_PEMS_PER_GROUP; sc->sli_group = (sc->id / SLI_PEMS_PER_GROUP) % SLI_GROUPS_PER_NODE; sc->sli_window_base = SLI_BASE | (((uint64_t)sc->node) << SLI_NODE_SHIFT) | ((uint64_t)sc->sli_group << SLI_GROUP_SHIFT); sc->sli_window_base += SLI_WINDOW_SPACING * sc->sli; return (0); } static void thunder_pem_slix_s2m_regx_acc_modify(struct thunder_pem_softc *sc, int sli_group, int slix) { uint64_t regval; bus_space_handle_t handle = 0; KASSERT(slix >= 0 && slix <= SLI_ACC_REG_CNT, ("Invalid SLI index")); if (sli_group == 0) handle = sli0_s2m_regx_base; else if (sli_group == 1) handle = sli1_s2m_regx_base; else device_printf(sc->dev, "SLI group is not correct\n"); if (handle) { /* Clear lower 32-bits of the SLIx register */ regval = bus_space_read_8(sc->reg_bst, handle, PEM_CFG_SLIX_TO_REG(slix)); regval &= ~(0xFFFFFFFFUL); bus_space_write_8(sc->reg_bst, handle, PEM_CFG_SLIX_TO_REG(slix), regval); } } static int thunder_pem_link_init(struct thunder_pem_softc *sc) { uint64_t regval; /* check whether PEM is safe to access. */ regval = bus_space_read_8(sc->reg_bst, sc->reg_bsh, PEM_ON_REG); if ((regval & PEM_CFG_LINK_MASK) != PEM_CFG_LINK_RDY) { device_printf(sc->dev, "PEM%d is not ON\n", sc->id); return (ENXIO); } regval = bus_space_read_8(sc->reg_bst, sc->reg_bsh, PEM_CTL_STATUS); regval |= PEM_LINK_ENABLE; bus_space_write_8(sc->reg_bst, sc->reg_bsh, PEM_CTL_STATUS, regval); /* Wait 1ms as per Cavium specification */ DELAY(1000); regval = thunder_pem_config_reg_read(sc, PCIERC_CFG032); if (((regval & PEM_LINK_DLLA) == 0) || ((regval & PEM_LINK_LT) != 0)) { device_printf(sc->dev, "PCIe RC: Port %d Link Timeout\n", sc->id); return (ENXIO); } return (0); } static int thunder_pem_init(struct thunder_pem_softc *sc) { int i, retval = 0; retval = thunder_pem_link_init(sc); if (retval) { device_printf(sc->dev, "%s failed\n", __func__); return retval; } /* To support 32-bit PCIe devices, set S2M_REGx_ACC[BA]=0x0 */ for (i = 0; i < SLI_ACC_REG_CNT; i++) { thunder_pem_slix_s2m_regx_acc_modify(sc, sc->sli_group, i); } return (retval); } static uint64_t thunder_pem_config_reg_read(struct thunder_pem_softc *sc, int reg) { uint64_t data; /* Write to ADDR register */ bus_space_write_8(sc->reg_bst, sc->reg_bsh, PEM_CFG_RD, PEM_CFG_RD_REG_ALIGN(reg)); bus_space_barrier(sc->reg_bst, sc->reg_bsh, PEM_CFG_RD, 8, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* Read from DATA register */ data = PEM_CFG_RD_REG_DATA(bus_space_read_8(sc->reg_bst, sc->reg_bsh, PEM_CFG_RD)); return (data); } static uint32_t thunder_pem_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { uint64_t offset; uint32_t data; struct thunder_pem_softc *sc; bus_space_tag_t t; bus_space_handle_t h; if ((bus > PCI_BUSMAX) || (slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX)) return (~0U); sc = device_get_softc(dev); /* Calculate offset */ offset = (bus << PEM_BUS_SHIFT) | (slot << PEM_SLOT_SHIFT) | (func << PEM_FUNC_SHIFT); t = sc->reg_bst; h = sc->pem_sli_base; bus_space_map(sc->reg_bst, sc->sli_window_base + offset, PCIE_REGMAX, 0, &h); switch (bytes) { case 1: data = bus_space_read_1(t, h, reg); break; case 2: data = le16toh(bus_space_read_2(t, h, reg)); break; case 4: data = le32toh(bus_space_read_4(t, h, reg)); break; default: data = ~0U; break; } bus_space_unmap(sc->reg_bst, h, PCIE_REGMAX); return (data); } static void thunder_pem_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t val, int bytes) { uint64_t offset; struct thunder_pem_softc *sc; bus_space_tag_t t; bus_space_handle_t h; if ((bus > PCI_BUSMAX) || (slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX)) return; sc = device_get_softc(dev); /* Calculate offset */ offset = (bus << PEM_BUS_SHIFT) | (slot << PEM_SLOT_SHIFT) | (func << PEM_FUNC_SHIFT); t = sc->reg_bst; h = sc->pem_sli_base; bus_space_map(sc->reg_bst, sc->sli_window_base + offset, PCIE_REGMAX, 0, &h); switch (bytes) { case 1: bus_space_write_1(t, h, reg, val); break; case 2: bus_space_write_2(t, h, reg, htole16(val)); break; case 4: bus_space_write_4(t, h, reg, htole32(val)); break; default: break; } bus_space_unmap(sc->reg_bst, h, PCIE_REGMAX); } static struct resource * thunder_pem_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct thunder_pem_softc *sc = device_get_softc(dev); struct resource *res; device_t parent_dev; switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_alloc_bus(sc->id, child, rid, start, end, count, flags)); #endif case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: /* Find parent device. On ThunderX we know an exact path. */ parent_dev = device_get_parent(device_get_parent(dev)); return (BUS_ALLOC_RESOURCE(parent_dev, dev, type, rid, start, end, count, flags)); } if (!RMAN_IS_DEFAULT_RANGE(start, end)) { /* * We might get PHYS addresses here inherited from EFI. * Convert to PCI if necessary. */ if (range_addr_is_phys(sc->ranges, start, count)) { start = range_addr_phys_to_pci(sc->ranges, start); end = start + count - 1; } } if (bootverbose) { device_printf(dev, "thunder_pem_alloc_resource: start=%#lx, end=%#lx, count=%#lx\n", start, end, count); } res = bus_generic_rman_alloc_resource(dev, child, type, rid, start, end, count, flags); if (res == NULL && bootverbose) { device_printf(dev, "%s FAIL: type=%d, rid=%d, " "start=%016lx, end=%016lx, count=%016lx, flags=%x\n", __func__, type, *rid, start, end, count, flags); } return (res); } static int thunder_pem_release_resource(device_t dev, device_t child, int type, int rid, struct resource *res) { device_t parent_dev; #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct thunder_pem_softc *sc = device_get_softc(dev); #endif switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_release_bus(sc->id, child, rid, res)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_release_resource(dev, child, type, rid, res)); default: /* Find parent device. On ThunderX we know an exact path. */ parent_dev = device_get_parent(device_get_parent(dev)); return (BUS_RELEASE_RESOURCE(parent_dev, child, type, rid, res)); } } static struct rman * thunder_pem_get_rman(device_t bus, int type, u_int flags) { struct thunder_pem_softc *sc; sc = device_get_softc(bus); switch (type) { case SYS_RES_IOPORT: return (&sc->io_rman); case SYS_RES_MEMORY: return (&sc->mem_rman); default: break; } return (NULL); } static int thunder_pem_probe(device_t dev) { uint16_t pci_vendor_id; uint16_t pci_device_id; pci_vendor_id = pci_get_vendor(dev); pci_device_id = pci_get_device(dev); if ((pci_vendor_id == THUNDER_PEM_VENDOR_ID) && (pci_device_id == THUNDER_PEM_DEVICE_ID)) { device_set_desc_copy(dev, THUNDER_PEM_DESC); return (0); } return (ENXIO); } static int thunder_pem_attach(device_t dev) { struct resource_map_request req; struct resource_map map; devclass_t pci_class; device_t parent; struct thunder_pem_softc *sc; int error; int rid; int tuple; uint64_t base, size; struct rman *rman; sc = device_get_softc(dev); sc->dev = dev; /* Allocate memory for resource */ pci_class = devclass_find("pci"); parent = device_get_parent(dev); if (device_get_devclass(parent) == pci_class) rid = PCIR_BAR(0); else rid = RID_PEM_SPACE; sc->reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE | RF_UNMAPPED); if (sc->reg == NULL) { device_printf(dev, "Failed to allocate resource\n"); return (ENXIO); } resource_init_map_request(&req); req.memattr = VM_MEMATTR_DEVICE_NP; error = bus_map_resource(dev, SYS_RES_MEMORY, sc->reg, &req, &map); if (error != 0) { device_printf(dev, "could not map memory.\n"); return (error); } rman_set_mapping(sc->reg, &map); sc->reg_bst = rman_get_bustag(sc->reg); sc->reg_bsh = rman_get_bushandle(sc->reg); /* Create the parent DMA tag to pass down the coherent flag */ error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE, /* maxsegsize */ BUS_DMA_COHERENT, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->dmat); if (error != 0) return (error); /* Map SLI, do it only once */ if (!sli0_s2m_regx_base) { bus_space_map(sc->reg_bst, SLIX_S2M_REGX_ACC, SLIX_S2M_REGX_ACC_SIZE, 0, &sli0_s2m_regx_base); } if (!sli1_s2m_regx_base) { bus_space_map(sc->reg_bst, SLIX_S2M_REGX_ACC + SLIX_S2M_REGX_ACC_SPACING, SLIX_S2M_REGX_ACC_SIZE, 0, &sli1_s2m_regx_base); } if ((sli0_s2m_regx_base == 0) || (sli1_s2m_regx_base == 0)) { device_printf(dev, "bus_space_map failed to map slix_s2m_regx_base\n"); goto fail; } /* Identify PEM */ if (thunder_pem_identify(dev) != 0) goto fail; /* Initialize rman and allocate regions */ sc->mem_rman.rm_type = RMAN_ARRAY; sc->mem_rman.rm_descr = "PEM PCIe Memory"; error = rman_init(&sc->mem_rman); if (error != 0) { device_printf(dev, "memory rman_init() failed. error = %d\n", error); goto fail; } sc->io_rman.rm_type = RMAN_ARRAY; sc->io_rman.rm_descr = "PEM PCIe IO"; error = rman_init(&sc->io_rman); if (error != 0) { device_printf(dev, "IO rman_init() failed. error = %d\n", error); goto fail_mem; } /* * We ignore the values that may have been provided in FDT * and configure ranges according to the below formula * for all types of devices. This is because some DTBs provided * by EFI do not have proper ranges property or don't have them * at all. */ /* Fill memory window */ sc->ranges[0].pci_base = PCI_MEMORY_BASE; sc->ranges[0].size = PCI_MEMORY_SIZE; sc->ranges[0].phys_base = sc->sli_window_base + SLI_PCI_OFFSET + sc->ranges[0].pci_base; sc->ranges[0].flags = SYS_RES_MEMORY; /* Fill IO window */ sc->ranges[1].pci_base = PCI_IO_BASE; sc->ranges[1].size = PCI_IO_SIZE; sc->ranges[1].phys_base = sc->sli_window_base + SLI_PCI_OFFSET + sc->ranges[1].pci_base; sc->ranges[1].flags = SYS_RES_IOPORT; for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { base = sc->ranges[tuple].pci_base; size = sc->ranges[tuple].size; if (size == 0) continue; /* empty range element */ rman = thunder_pem_get_rman(dev, sc->ranges[tuple].flags, 0); if (rman != NULL) error = rman_manage_region(rman, base, base + size - 1); else error = EINVAL; if (error) { device_printf(dev, "rman_manage_region() failed. error = %d\n", error); rman_fini(&sc->mem_rman); return (error); } if (bootverbose) { device_printf(dev, "\tPCI addr: 0x%jx, CPU addr: 0x%jx, Size: 0x%jx, Flags:0x%jx\n", sc->ranges[tuple].pci_base, sc->ranges[tuple].phys_base, sc->ranges[tuple].size, sc->ranges[tuple].flags); } } if (thunder_pem_init(sc)) { device_printf(dev, "Failure during PEM init\n"); goto fail_io; } device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); fail_io: rman_fini(&sc->io_rman); fail_mem: rman_fini(&sc->mem_rman); fail: bus_free_resource(dev, SYS_RES_MEMORY, sc->reg); return (ENXIO); } static void thunder_pem_release_all(device_t dev) { struct thunder_pem_softc *sc; sc = device_get_softc(dev); rman_fini(&sc->io_rman); rman_fini(&sc->mem_rman); if (sc->reg != NULL) bus_free_resource(dev, SYS_RES_MEMORY, sc->reg); } static int thunder_pem_detach(device_t dev) { thunder_pem_release_all(dev); return (0); } diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c index ab34009bf654..df2017e69a86 100644 --- a/sys/dev/acpica/acpi.c +++ b/sys/dev/acpica/acpi.c @@ -1,4654 +1,4650 @@ /*- * Copyright (c) 2000 Takanori Watanabe * Copyright (c) 2000 Mitsuru IWASAKI * Copyright (c) 2000, 2001 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_acpi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_ACPIDEV, "acpidev", "ACPI devices"); /* Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_BUS ACPI_MODULE_NAME("ACPI") static d_open_t acpiopen; static d_close_t acpiclose; static d_ioctl_t acpiioctl; static struct cdevsw acpi_cdevsw = { .d_version = D_VERSION, .d_open = acpiopen, .d_close = acpiclose, .d_ioctl = acpiioctl, .d_name = "acpi", }; struct acpi_interface { ACPI_STRING *data; int num; }; static char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL }; /* Global mutex for locking access to the ACPI subsystem. */ struct mtx acpi_mutex; struct callout acpi_sleep_timer; /* Bitmap of device quirks. */ int acpi_quirks; /* Supported sleep states. */ static BOOLEAN acpi_sleep_states[ACPI_S_STATE_COUNT]; static void acpi_lookup(void *arg, const char *name, device_t *dev); static int acpi_modevent(struct module *mod, int event, void *junk); static device_probe_t acpi_probe; static device_attach_t acpi_attach; static device_suspend_t acpi_suspend; static device_resume_t acpi_resume; static device_shutdown_t acpi_shutdown; static bus_add_child_t acpi_add_child; static bus_print_child_t acpi_print_child; static bus_probe_nomatch_t acpi_probe_nomatch; static bus_driver_added_t acpi_driver_added; static bus_child_deleted_t acpi_child_deleted; static bus_read_ivar_t acpi_read_ivar; static bus_write_ivar_t acpi_write_ivar; static bus_get_resource_list_t acpi_get_rlist; static bus_get_rman_t acpi_get_rman; static bus_set_resource_t acpi_set_resource; static bus_alloc_resource_t acpi_alloc_resource; static bus_adjust_resource_t acpi_adjust_resource; static bus_release_resource_t acpi_release_resource; static bus_delete_resource_t acpi_delete_resource; static bus_activate_resource_t acpi_activate_resource; static bus_deactivate_resource_t acpi_deactivate_resource; static bus_map_resource_t acpi_map_resource; static bus_unmap_resource_t acpi_unmap_resource; static bus_child_pnpinfo_t acpi_child_pnpinfo_method; static bus_child_location_t acpi_child_location_method; static bus_hint_device_unit_t acpi_hint_device_unit; static bus_get_property_t acpi_bus_get_prop; static bus_get_device_path_t acpi_get_device_path; static acpi_id_probe_t acpi_device_id_probe; static acpi_evaluate_object_t acpi_device_eval_obj; static acpi_get_property_t acpi_device_get_prop; static acpi_scan_children_t acpi_device_scan_children; static isa_pnp_probe_t acpi_isa_pnp_probe; static void acpi_reserve_resources(device_t dev); static int acpi_sysres_alloc(device_t dev); static uint32_t acpi_isa_get_logicalid(device_t dev); static int acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count); static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *context, void **retval); static ACPI_STATUS acpi_find_dsd(struct acpi_device *ad); static void acpi_platform_osc(device_t dev); static void acpi_probe_children(device_t bus); static void acpi_probe_order(ACPI_HANDLE handle, int *order); static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status); static void acpi_sleep_enable(void *arg); static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc); static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state); static void acpi_shutdown_final(void *arg, int howto); static void acpi_enable_fixed_events(struct acpi_softc *sc); static void acpi_resync_clock(struct acpi_softc *sc); static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate); static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate); static int acpi_wake_prep_walk(int sstate); static int acpi_wake_sysctl_walk(device_t dev); static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS); static void acpi_system_eventhandler_sleep(void *arg, int state); static void acpi_system_eventhandler_wakeup(void *arg, int state); static int acpi_sname2sstate(const char *sname); static const char *acpi_sstate2sname(int sstate); static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_pm_func(u_long cmd, void *arg, ...); static void acpi_enable_pcie(void); static void acpi_reset_interfaces(device_t dev); static device_method_t acpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_probe), DEVMETHOD(device_attach, acpi_attach), DEVMETHOD(device_shutdown, acpi_shutdown), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_suspend, acpi_suspend), DEVMETHOD(device_resume, acpi_resume), /* Bus interface */ DEVMETHOD(bus_add_child, acpi_add_child), DEVMETHOD(bus_print_child, acpi_print_child), DEVMETHOD(bus_probe_nomatch, acpi_probe_nomatch), DEVMETHOD(bus_driver_added, acpi_driver_added), DEVMETHOD(bus_child_deleted, acpi_child_deleted), DEVMETHOD(bus_read_ivar, acpi_read_ivar), DEVMETHOD(bus_write_ivar, acpi_write_ivar), DEVMETHOD(bus_get_resource_list, acpi_get_rlist), DEVMETHOD(bus_get_rman, acpi_get_rman), DEVMETHOD(bus_set_resource, acpi_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_alloc_resource, acpi_alloc_resource), DEVMETHOD(bus_adjust_resource, acpi_adjust_resource), DEVMETHOD(bus_release_resource, acpi_release_resource), DEVMETHOD(bus_delete_resource, acpi_delete_resource), DEVMETHOD(bus_activate_resource, acpi_activate_resource), DEVMETHOD(bus_deactivate_resource, acpi_deactivate_resource), DEVMETHOD(bus_map_resource, acpi_map_resource), DEVMETHOD(bus_unmap_resource, acpi_unmap_resource), DEVMETHOD(bus_child_pnpinfo, acpi_child_pnpinfo_method), DEVMETHOD(bus_child_location, acpi_child_location_method), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_hint_device_unit, acpi_hint_device_unit), DEVMETHOD(bus_get_cpus, acpi_get_cpus), DEVMETHOD(bus_get_domain, acpi_get_domain), DEVMETHOD(bus_get_property, acpi_bus_get_prop), DEVMETHOD(bus_get_device_path, acpi_get_device_path), /* ACPI bus */ DEVMETHOD(acpi_id_probe, acpi_device_id_probe), DEVMETHOD(acpi_evaluate_object, acpi_device_eval_obj), DEVMETHOD(acpi_get_property, acpi_device_get_prop), DEVMETHOD(acpi_pwr_for_sleep, acpi_device_pwr_for_sleep), DEVMETHOD(acpi_scan_children, acpi_device_scan_children), /* ISA emulation */ DEVMETHOD(isa_pnp_probe, acpi_isa_pnp_probe), DEVMETHOD_END }; static driver_t acpi_driver = { "acpi", acpi_methods, sizeof(struct acpi_softc), }; EARLY_DRIVER_MODULE(acpi, nexus, acpi_driver, acpi_modevent, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(acpi, 1); ACPI_SERIAL_DECL(acpi, "ACPI root bus"); /* Local pools for managing system resources for ACPI child devices. */ static struct rman acpi_rman_io, acpi_rman_mem; #define ACPI_MINIMUM_AWAKETIME 5 /* Holds the description of the acpi0 device. */ static char acpi_desc[ACPI_OEM_ID_SIZE + ACPI_OEM_TABLE_ID_SIZE + 2]; SYSCTL_NODE(_debug, OID_AUTO, acpi, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ACPI debugging"); static char acpi_ca_version[12]; SYSCTL_STRING(_debug_acpi, OID_AUTO, acpi_ca_version, CTLFLAG_RD, acpi_ca_version, 0, "Version of Intel ACPI-CA"); /* * Allow overriding _OSI methods. */ static char acpi_install_interface[256]; TUNABLE_STR("hw.acpi.install_interface", acpi_install_interface, sizeof(acpi_install_interface)); static char acpi_remove_interface[256]; TUNABLE_STR("hw.acpi.remove_interface", acpi_remove_interface, sizeof(acpi_remove_interface)); /* Allow users to dump Debug objects without ACPI debugger. */ static int acpi_debug_objects; TUNABLE_INT("debug.acpi.enable_debug_objects", &acpi_debug_objects); SYSCTL_PROC(_debug_acpi, OID_AUTO, enable_debug_objects, CTLFLAG_RW | CTLTYPE_INT | CTLFLAG_MPSAFE, NULL, 0, acpi_debug_objects_sysctl, "I", "Enable Debug objects"); /* Allow the interpreter to ignore common mistakes in BIOS. */ static int acpi_interpreter_slack = 1; TUNABLE_INT("debug.acpi.interpreter_slack", &acpi_interpreter_slack); SYSCTL_INT(_debug_acpi, OID_AUTO, interpreter_slack, CTLFLAG_RDTUN, &acpi_interpreter_slack, 1, "Turn on interpreter slack mode."); /* Ignore register widths set by FADT and use default widths instead. */ static int acpi_ignore_reg_width = 1; TUNABLE_INT("debug.acpi.default_register_width", &acpi_ignore_reg_width); SYSCTL_INT(_debug_acpi, OID_AUTO, default_register_width, CTLFLAG_RDTUN, &acpi_ignore_reg_width, 1, "Ignore register widths set by FADT"); /* Allow users to override quirks. */ TUNABLE_INT("debug.acpi.quirks", &acpi_quirks); int acpi_susp_bounce; SYSCTL_INT(_debug_acpi, OID_AUTO, suspend_bounce, CTLFLAG_RW, &acpi_susp_bounce, 0, "Don't actually suspend, just test devices."); /* * ACPI standard UUID for Device Specific Data Package * "Device Properties UUID for _DSD" Rev. 2.0 */ static const struct uuid acpi_dsd_uuid = { 0xdaffd814, 0x6eba, 0x4d8c, 0x8a, 0x91, { 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01 } }; /* * ACPI can only be loaded as a module by the loader; activating it after * system bootstrap time is not useful, and can be fatal to the system. * It also cannot be unloaded, since the entire system bus hierarchy hangs * off it. */ static int acpi_modevent(struct module *mod, int event, void *junk) { switch (event) { case MOD_LOAD: if (!cold) { printf("The ACPI driver cannot be loaded after boot.\n"); return (EPERM); } break; case MOD_UNLOAD: if (!cold && power_pm_get_type() == POWER_PM_TYPE_ACPI) return (EBUSY); break; default: break; } return (0); } /* * Perform early initialization. */ ACPI_STATUS acpi_Startup(void) { static int started = 0; ACPI_STATUS status; int val; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* Only run the startup code once. The MADT driver also calls this. */ if (started) return_VALUE (AE_OK); started = 1; /* * Initialize the ACPICA subsystem. */ if (ACPI_FAILURE(status = AcpiInitializeSubsystem())) { printf("ACPI: Could not initialize Subsystem: %s\n", AcpiFormatException(status)); return_VALUE (status); } /* * Pre-allocate space for RSDT/XSDT and DSDT tables and allow resizing * if more tables exist. */ if (ACPI_FAILURE(status = AcpiInitializeTables(NULL, 2, TRUE))) { printf("ACPI: Table initialisation failed: %s\n", AcpiFormatException(status)); return_VALUE (status); } /* Set up any quirks we have for this system. */ if (acpi_quirks == ACPI_Q_OK) acpi_table_quirks(&acpi_quirks); /* If the user manually set the disabled hint to 0, force-enable ACPI. */ if (resource_int_value("acpi", 0, "disabled", &val) == 0 && val == 0) acpi_quirks &= ~ACPI_Q_BROKEN; if (acpi_quirks & ACPI_Q_BROKEN) { printf("ACPI disabled by blacklist. Contact your BIOS vendor.\n"); status = AE_SUPPORT; } return_VALUE (status); } /* * Detect ACPI and perform early initialisation. */ int acpi_identify(void) { ACPI_TABLE_RSDP *rsdp; ACPI_TABLE_HEADER *rsdt; ACPI_PHYSICAL_ADDRESS paddr; struct sbuf sb; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (!cold) return (ENXIO); /* Check that we haven't been disabled with a hint. */ if (resource_disabled("acpi", 0)) return (ENXIO); /* Check for other PM systems. */ if (power_pm_get_type() != POWER_PM_TYPE_NONE && power_pm_get_type() != POWER_PM_TYPE_ACPI) { printf("ACPI identify failed, other PM system enabled.\n"); return (ENXIO); } /* Initialize root tables. */ if (ACPI_FAILURE(acpi_Startup())) { printf("ACPI: Try disabling either ACPI or apic support.\n"); return (ENXIO); } if ((paddr = AcpiOsGetRootPointer()) == 0 || (rsdp = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_RSDP))) == NULL) return (ENXIO); if (rsdp->Revision > 1 && rsdp->XsdtPhysicalAddress != 0) paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->XsdtPhysicalAddress; else paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->RsdtPhysicalAddress; AcpiOsUnmapMemory(rsdp, sizeof(ACPI_TABLE_RSDP)); if ((rsdt = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_HEADER))) == NULL) return (ENXIO); sbuf_new(&sb, acpi_desc, sizeof(acpi_desc), SBUF_FIXEDLEN); sbuf_bcat(&sb, rsdt->OemId, ACPI_OEM_ID_SIZE); sbuf_trim(&sb); sbuf_putc(&sb, ' '); sbuf_bcat(&sb, rsdt->OemTableId, ACPI_OEM_TABLE_ID_SIZE); sbuf_trim(&sb); sbuf_finish(&sb); sbuf_delete(&sb); AcpiOsUnmapMemory(rsdt, sizeof(ACPI_TABLE_HEADER)); snprintf(acpi_ca_version, sizeof(acpi_ca_version), "%x", ACPI_CA_VERSION); return (0); } /* * Fetch some descriptive data from ACPI to put in our attach message. */ static int acpi_probe(device_t dev) { ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); device_set_desc(dev, acpi_desc); return_VALUE (BUS_PROBE_NOWILDCARD); } static int acpi_attach(device_t dev) { struct acpi_softc *sc; ACPI_STATUS status; int error, state; UINT32 flags; UINT8 TypeA, TypeB; char *env; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = device_get_softc(dev); sc->acpi_dev = dev; callout_init(&sc->susp_force_to, 1); error = ENXIO; /* Initialize resource manager. */ acpi_rman_io.rm_type = RMAN_ARRAY; acpi_rman_io.rm_start = 0; acpi_rman_io.rm_end = 0xffff; acpi_rman_io.rm_descr = "ACPI I/O ports"; if (rman_init(&acpi_rman_io) != 0) panic("acpi rman_init IO ports failed"); acpi_rman_mem.rm_type = RMAN_ARRAY; acpi_rman_mem.rm_descr = "ACPI I/O memory addresses"; if (rman_init(&acpi_rman_mem) != 0) panic("acpi rman_init memory failed"); resource_list_init(&sc->sysres_rl); /* Initialise the ACPI mutex */ mtx_init(&acpi_mutex, "ACPI global lock", NULL, MTX_DEF); /* * Set the globals from our tunables. This is needed because ACPI-CA * uses UINT8 for some values and we have no tunable_byte. */ AcpiGbl_EnableInterpreterSlack = acpi_interpreter_slack ? TRUE : FALSE; AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE; AcpiGbl_UseDefaultRegisterWidths = acpi_ignore_reg_width ? TRUE : FALSE; #ifndef ACPI_DEBUG /* * Disable all debugging layers and levels. */ AcpiDbgLayer = 0; AcpiDbgLevel = 0; #endif /* Override OS interfaces if the user requested. */ acpi_reset_interfaces(dev); /* Load ACPI name space. */ status = AcpiLoadTables(); if (ACPI_FAILURE(status)) { device_printf(dev, "Could not load Namespace: %s\n", AcpiFormatException(status)); goto out; } /* Handle MCFG table if present. */ acpi_enable_pcie(); /* * Note that some systems (specifically, those with namespace evaluation * issues that require the avoidance of parts of the namespace) must * avoid running _INI and _STA on everything, as well as dodging the final * object init pass. * * For these devices, we set ACPI_NO_DEVICE_INIT and ACPI_NO_OBJECT_INIT). * * XXX We should arrange for the object init pass after we have attached * all our child devices, but on many systems it works here. */ flags = 0; if (testenv("debug.acpi.avoid")) flags = ACPI_NO_DEVICE_INIT | ACPI_NO_OBJECT_INIT; /* Bring the hardware and basic handlers online. */ if (ACPI_FAILURE(status = AcpiEnableSubsystem(flags))) { device_printf(dev, "Could not enable ACPI: %s\n", AcpiFormatException(status)); goto out; } /* * Call the ECDT probe function to provide EC functionality before * the namespace has been evaluated. * * XXX This happens before the sysresource devices have been probed and * attached so its resources come from nexus0. In practice, this isn't * a problem but should be addressed eventually. */ acpi_ec_ecdt_probe(dev); /* Bring device objects and regions online. */ if (ACPI_FAILURE(status = AcpiInitializeObjects(flags))) { device_printf(dev, "Could not initialize ACPI objects: %s\n", AcpiFormatException(status)); goto out; } /* * Setup our sysctl tree. * * XXX: This doesn't check to make sure that none of these fail. */ sysctl_ctx_init(&sc->acpi_sysctl_ctx); sc->acpi_sysctl_tree = SYSCTL_ADD_NODE(&sc->acpi_sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_name(dev), CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "supported_sleep_state", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0, acpi_supported_sleep_state_sysctl, "A", "List supported ACPI sleep states."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "power_button_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A", "Power button ACPI sleep state."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "sleep_button_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A", "Sleep button ACPI sleep state."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "lid_switch_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A", "Lid ACPI sleep state. Set to S3 if you want to suspend your laptop when close the Lid."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "standby_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A", ""); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "suspend_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_suspend_sx, 0, acpi_sleep_state_sysctl, "A", ""); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "sleep_delay", CTLFLAG_RW, &sc->acpi_sleep_delay, 0, "sleep delay in seconds"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0, "S4BIOS mode"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "verbose", CTLFLAG_RW, &sc->acpi_verbose, 0, "verbose mode"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "disable_on_reboot", CTLFLAG_RW, &sc->acpi_do_disable, 0, "Disable ACPI when rebooting/halting system"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "handle_reboot", CTLFLAG_RW, &sc->acpi_handle_reboot, 0, "Use ACPI Reset Register to reboot"); /* * Default to 1 second before sleeping to give some machines time to * stabilize. */ sc->acpi_sleep_delay = 1; if (bootverbose) sc->acpi_verbose = 1; if ((env = kern_getenv("hw.acpi.verbose")) != NULL) { if (strcmp(env, "0") != 0) sc->acpi_verbose = 1; freeenv(env); } /* Only enable reboot by default if the FADT says it is available. */ if (AcpiGbl_FADT.Flags & ACPI_FADT_RESET_REGISTER) sc->acpi_handle_reboot = 1; #if !ACPI_REDUCED_HARDWARE /* Only enable S4BIOS by default if the FACS says it is available. */ if (AcpiGbl_FACS != NULL && AcpiGbl_FACS->Flags & ACPI_FACS_S4_BIOS_PRESENT) sc->acpi_s4bios = 1; #endif /* Probe all supported sleep states. */ acpi_sleep_states[ACPI_STATE_S0] = TRUE; for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++) if (ACPI_SUCCESS(AcpiEvaluateObject(ACPI_ROOT_OBJECT, __DECONST(char *, AcpiGbl_SleepStateNames[state]), NULL, NULL)) && ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB))) acpi_sleep_states[state] = TRUE; /* * Dispatch the default sleep state to devices. The lid switch is set * to UNKNOWN by default to avoid surprising users. */ sc->acpi_power_button_sx = acpi_sleep_states[ACPI_STATE_S5] ? ACPI_STATE_S5 : ACPI_STATE_UNKNOWN; sc->acpi_lid_switch_sx = ACPI_STATE_UNKNOWN; sc->acpi_standby_sx = acpi_sleep_states[ACPI_STATE_S1] ? ACPI_STATE_S1 : ACPI_STATE_UNKNOWN; sc->acpi_suspend_sx = acpi_sleep_states[ACPI_STATE_S3] ? ACPI_STATE_S3 : ACPI_STATE_UNKNOWN; /* Pick the first valid sleep state for the sleep button default. */ sc->acpi_sleep_button_sx = ACPI_STATE_UNKNOWN; for (state = ACPI_STATE_S1; state <= ACPI_STATE_S4; state++) if (acpi_sleep_states[state]) { sc->acpi_sleep_button_sx = state; break; } acpi_enable_fixed_events(sc); /* * Scan the namespace and attach/initialise children. */ /* Register our shutdown handler. */ EVENTHANDLER_REGISTER(shutdown_final, acpi_shutdown_final, sc, SHUTDOWN_PRI_LAST + 150); /* * Register our acpi event handlers. * XXX should be configurable eg. via userland policy manager. */ EVENTHANDLER_REGISTER(acpi_sleep_event, acpi_system_eventhandler_sleep, sc, ACPI_EVENT_PRI_LAST); EVENTHANDLER_REGISTER(acpi_wakeup_event, acpi_system_eventhandler_wakeup, sc, ACPI_EVENT_PRI_LAST); /* Flag our initial states. */ sc->acpi_enabled = TRUE; sc->acpi_sstate = ACPI_STATE_S0; sc->acpi_sleep_disabled = TRUE; /* Create the control device */ sc->acpi_dev_t = make_dev(&acpi_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0664, "acpi"); sc->acpi_dev_t->si_drv1 = sc; if ((error = acpi_machdep_init(dev))) goto out; /* Register ACPI again to pass the correct argument of pm_func. */ power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc); acpi_platform_osc(dev); if (!acpi_disabled("bus")) { EVENTHANDLER_REGISTER(dev_lookup, acpi_lookup, NULL, 1000); acpi_probe_children(dev); } /* Update all GPEs and enable runtime GPEs. */ status = AcpiUpdateAllGpes(); if (ACPI_FAILURE(status)) device_printf(dev, "Could not update all GPEs: %s\n", AcpiFormatException(status)); /* Allow sleep request after a while. */ callout_init_mtx(&acpi_sleep_timer, &acpi_mutex, 0); callout_reset(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME, acpi_sleep_enable, sc); error = 0; out: return_VALUE (error); } static void acpi_set_power_children(device_t dev, int state) { device_t child; device_t *devlist; int dstate, i, numdevs; if (device_get_children(dev, &devlist, &numdevs) != 0) return; /* * Retrieve and set D-state for the sleep state if _SxD is present. * Skip children who aren't attached since they are handled separately. */ for (i = 0; i < numdevs; i++) { child = devlist[i]; dstate = state; if (device_is_attached(child) && acpi_device_pwr_for_sleep(dev, child, &dstate) == 0) acpi_set_powerstate(child, dstate); } free(devlist, M_TEMP); } static int acpi_suspend(device_t dev) { int error; bus_topo_assert(); error = bus_generic_suspend(dev); if (error == 0) acpi_set_power_children(dev, ACPI_STATE_D3); return (error); } static int acpi_resume(device_t dev) { bus_topo_assert(); acpi_set_power_children(dev, ACPI_STATE_D0); return (bus_generic_resume(dev)); } static int acpi_shutdown(device_t dev) { bus_topo_assert(); /* Allow children to shutdown first. */ bus_generic_shutdown(dev); /* * Enable any GPEs that are able to power-on the system (i.e., RTC). * Also, disable any that are not valid for this state (most). */ acpi_wake_prep_walk(ACPI_STATE_S5); return (0); } /* * Handle a new device being added */ static device_t acpi_add_child(device_t bus, u_int order, const char *name, int unit) { struct acpi_device *ad; device_t child; if ((ad = malloc(sizeof(*ad), M_ACPIDEV, M_NOWAIT | M_ZERO)) == NULL) return (NULL); resource_list_init(&ad->ad_rl); child = device_add_child_ordered(bus, order, name, unit); if (child != NULL) device_set_ivars(child, ad); else free(ad, M_ACPIDEV); return (child); } static int acpi_print_child(device_t bus, device_t child) { struct acpi_device *adev = device_get_ivars(child); struct resource_list *rl = &adev->ad_rl; int retval = 0; retval += bus_print_child_header(bus, child); retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx"); retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); retval += resource_list_print_type(rl, "drq", SYS_RES_DRQ, "%jd"); if (device_get_flags(child)) retval += printf(" flags %#x", device_get_flags(child)); retval += bus_print_child_domain(bus, child); retval += bus_print_child_footer(bus, child); return (retval); } /* * If this device is an ACPI child but no one claimed it, attempt * to power it off. We'll power it back up when a driver is added. * * XXX Disabled for now since many necessary devices (like fdc and * ATA) don't claim the devices we created for them but still expect * them to be powered up. */ static void acpi_probe_nomatch(device_t bus, device_t child) { #ifdef ACPI_ENABLE_POWERDOWN_NODRIVER acpi_set_powerstate(child, ACPI_STATE_D3); #endif } /* * If a new driver has a chance to probe a child, first power it up. * * XXX Disabled for now (see acpi_probe_nomatch for details). */ static void acpi_driver_added(device_t dev, driver_t *driver) { device_t child, *devlist; int i, numdevs; DEVICE_IDENTIFY(driver, dev); if (device_get_children(dev, &devlist, &numdevs)) return; for (i = 0; i < numdevs; i++) { child = devlist[i]; if (device_get_state(child) == DS_NOTPRESENT) { #ifdef ACPI_ENABLE_POWERDOWN_NODRIVER acpi_set_powerstate(child, ACPI_STATE_D0); if (device_probe_and_attach(child) != 0) acpi_set_powerstate(child, ACPI_STATE_D3); #else device_probe_and_attach(child); #endif } } free(devlist, M_TEMP); } /* Location hint for devctl(8) */ static int acpi_child_location_method(device_t cbdev, device_t child, struct sbuf *sb) { struct acpi_device *dinfo = device_get_ivars(child); int pxm; if (dinfo->ad_handle) { sbuf_printf(sb, "handle=%s", acpi_name(dinfo->ad_handle)); if (ACPI_SUCCESS(acpi_GetInteger(dinfo->ad_handle, "_PXM", &pxm))) { sbuf_printf(sb, " _PXM=%d", pxm); } } return (0); } /* PnP information for devctl(8) */ int acpi_pnpinfo(ACPI_HANDLE handle, struct sbuf *sb) { ACPI_DEVICE_INFO *adinfo; if (ACPI_FAILURE(AcpiGetObjectInfo(handle, &adinfo))) { sbuf_printf(sb, "unknown"); return (0); } sbuf_printf(sb, "_HID=%s _UID=%lu _CID=%s", (adinfo->Valid & ACPI_VALID_HID) ? adinfo->HardwareId.String : "none", (adinfo->Valid & ACPI_VALID_UID) ? strtoul(adinfo->UniqueId.String, NULL, 10) : 0UL, ((adinfo->Valid & ACPI_VALID_CID) && adinfo->CompatibleIdList.Count > 0) ? adinfo->CompatibleIdList.Ids[0].String : "none"); AcpiOsFree(adinfo); return (0); } static int acpi_child_pnpinfo_method(device_t cbdev, device_t child, struct sbuf *sb) { struct acpi_device *dinfo = device_get_ivars(child); return (acpi_pnpinfo(dinfo->ad_handle, sb)); } /* * Note: the check for ACPI locator may be redundant. However, this routine is * suitable for both busses whose only locator is ACPI and as a building block * for busses that have multiple locators to cope with. */ int acpi_get_acpi_device_path(device_t bus, device_t child, const char *locator, struct sbuf *sb) { if (strcmp(locator, BUS_LOCATOR_ACPI) == 0) { ACPI_HANDLE *handle = acpi_get_handle(child); if (handle != NULL) sbuf_printf(sb, "%s", acpi_name(handle)); return (0); } return (bus_generic_get_device_path(bus, child, locator, sb)); } static int acpi_get_device_path(device_t bus, device_t child, const char *locator, struct sbuf *sb) { struct acpi_device *dinfo = device_get_ivars(child); if (strcmp(locator, BUS_LOCATOR_ACPI) == 0) return (acpi_get_acpi_device_path(bus, child, locator, sb)); if (strcmp(locator, BUS_LOCATOR_UEFI) == 0) { ACPI_DEVICE_INFO *adinfo; if (!ACPI_FAILURE(AcpiGetObjectInfo(dinfo->ad_handle, &adinfo)) && dinfo->ad_handle != 0 && (adinfo->Valid & ACPI_VALID_HID)) { const char *hid = adinfo->HardwareId.String; u_long uid = (adinfo->Valid & ACPI_VALID_UID) ? strtoul(adinfo->UniqueId.String, NULL, 10) : 0UL; u_long hidval; /* * In UEFI Stanard Version 2.6, Section 9.6.1.6 Text * Device Node Reference, there's an insanely long table * 98. This implements the relevant bits from that * table. Newer versions appear to have not required * anything new. The EDK2 firmware presents both PciRoot * and PcieRoot as PciRoot. Follow the EDK2 standard. */ if (strncmp("PNP", hid, 3) != 0) goto nomatch; hidval = strtoul(hid + 3, NULL, 16); switch (hidval) { case 0x0301: sbuf_printf(sb, "Keyboard(0x%lx)", uid); break; case 0x0401: sbuf_printf(sb, "ParallelPort(0x%lx)", uid); break; case 0x0501: sbuf_printf(sb, "Serial(0x%lx)", uid); break; case 0x0604: sbuf_printf(sb, "Floppy(0x%lx)", uid); break; case 0x0a03: case 0x0a08: sbuf_printf(sb, "PciRoot(0x%lx)", uid); break; default: /* Everything else gets a generic encode */ nomatch: sbuf_printf(sb, "Acpi(%s,0x%lx)", hid, uid); break; } } /* Not handled: AcpiAdr... unsure how to know it's one */ } /* For the rest, punt to the default handler */ return (bus_generic_get_device_path(bus, child, locator, sb)); } /* * Handle device deletion. */ static void acpi_child_deleted(device_t dev, device_t child) { struct acpi_device *dinfo = device_get_ivars(child); if (acpi_get_device(dinfo->ad_handle) == child) AcpiDetachData(dinfo->ad_handle, acpi_fake_objhandler); } /* * Handle per-device ivars */ static int acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct acpi_device *ad; if ((ad = device_get_ivars(child)) == NULL) { device_printf(child, "device has no ivars\n"); return (ENOENT); } /* ACPI and ISA compatibility ivars */ switch(index) { case ACPI_IVAR_HANDLE: *(ACPI_HANDLE *)result = ad->ad_handle; break; case ACPI_IVAR_PRIVATE: *(void **)result = ad->ad_private; break; case ACPI_IVAR_FLAGS: *(int *)result = ad->ad_flags; break; case ISA_IVAR_VENDORID: case ISA_IVAR_SERIAL: case ISA_IVAR_COMPATID: *(int *)result = -1; break; case ISA_IVAR_LOGICALID: *(int *)result = acpi_isa_get_logicalid(child); break; case PCI_IVAR_CLASS: *(uint8_t*)result = (ad->ad_cls_class >> 16) & 0xff; break; case PCI_IVAR_SUBCLASS: *(uint8_t*)result = (ad->ad_cls_class >> 8) & 0xff; break; case PCI_IVAR_PROGIF: *(uint8_t*)result = (ad->ad_cls_class >> 0) & 0xff; break; default: return (ENOENT); } return (0); } static int acpi_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { struct acpi_device *ad; if ((ad = device_get_ivars(child)) == NULL) { device_printf(child, "device has no ivars\n"); return (ENOENT); } switch(index) { case ACPI_IVAR_HANDLE: ad->ad_handle = (ACPI_HANDLE)value; break; case ACPI_IVAR_PRIVATE: ad->ad_private = (void *)value; break; case ACPI_IVAR_FLAGS: ad->ad_flags = (int)value; break; default: panic("bad ivar write request (%d)", index); return (ENOENT); } return (0); } /* * Handle child resource allocation/removal */ static struct resource_list * acpi_get_rlist(device_t dev, device_t child) { struct acpi_device *ad; ad = device_get_ivars(child); return (&ad->ad_rl); } static int acpi_match_resource_hint(device_t dev, int type, long value) { struct acpi_device *ad = device_get_ivars(dev); struct resource_list *rl = &ad->ad_rl; struct resource_list_entry *rle; STAILQ_FOREACH(rle, rl, link) { if (rle->type != type) continue; if (rle->start <= value && rle->end >= value) return (1); } return (0); } /* * Does this device match because the resources match? */ static bool acpi_hint_device_matches_resources(device_t child, const char *name, int unit) { long value; bool matches; /* * Check for matching resources. We must have at least one match. * Since I/O and memory resources cannot be shared, if we get a * match on either of those, ignore any mismatches in IRQs or DRQs. * * XXX: We may want to revisit this to be more lenient and wire * as long as it gets one match. */ matches = false; if (resource_long_value(name, unit, "port", &value) == 0) { /* * Floppy drive controllers are notorious for having a * wide variety of resources not all of which include the * first port that is specified by the hint (typically * 0x3f0) (see the comment above fdc_isa_alloc_resources() * in fdc_isa.c). However, they do all seem to include * port + 2 (e.g. 0x3f2) so for a floppy device, look for * 'value + 2' in the port resources instead of the hint * value. */ if (strcmp(name, "fdc") == 0) value += 2; if (acpi_match_resource_hint(child, SYS_RES_IOPORT, value)) matches = true; else return false; } if (resource_long_value(name, unit, "maddr", &value) == 0) { if (acpi_match_resource_hint(child, SYS_RES_MEMORY, value)) matches = true; else return false; } /* * If either the I/O address and/or the memory address matched, then * assumed this devices matches and that any mismatch in other resources * will be resolved by siltently ignoring those other resources. Otherwise * all further resources must match. */ if (matches) { return (true); } if (resource_long_value(name, unit, "irq", &value) == 0) { if (acpi_match_resource_hint(child, SYS_RES_IRQ, value)) matches = true; else return false; } if (resource_long_value(name, unit, "drq", &value) == 0) { if (acpi_match_resource_hint(child, SYS_RES_DRQ, value)) matches = true; else return false; } return matches; } /* * Wire device unit numbers based on resource matches in hints. */ static void acpi_hint_device_unit(device_t acdev, device_t child, const char *name, int *unitp) { device_location_cache_t *cache; const char *s; int line, unit; bool matches; /* * Iterate over all the hints for the devices with the specified * name to see if one's resources are a subset of this device. */ line = 0; cache = dev_wired_cache_init(); while (resource_find_dev(&line, name, &unit, "at", NULL) == 0) { /* Must have an "at" for acpi or isa. */ resource_string_value(name, unit, "at", &s); matches = false; if (strcmp(s, "acpi0") == 0 || strcmp(s, "acpi") == 0 || strcmp(s, "isa0") == 0 || strcmp(s, "isa") == 0) matches = acpi_hint_device_matches_resources(child, name, unit); else matches = dev_wired_cache_match(cache, child, s); if (matches) { /* We have a winner! */ *unitp = unit; break; } } dev_wired_cache_fini(cache); } /* * Fetch the NUMA domain for a device by mapping the value returned by * _PXM to a NUMA domain. If the device does not have a _PXM method, * -2 is returned. If any other error occurs, -1 is returned. */ static int acpi_parse_pxm(device_t dev) { #ifdef NUMA #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) ACPI_HANDLE handle; ACPI_STATUS status; int pxm; handle = acpi_get_handle(dev); if (handle == NULL) return (-2); status = acpi_GetInteger(handle, "_PXM", &pxm); if (ACPI_SUCCESS(status)) return (acpi_map_pxm_to_vm_domainid(pxm)); if (status == AE_NOT_FOUND) return (-2); #endif #endif return (-1); } int acpi_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { int d, error; d = acpi_parse_pxm(child); if (d < 0) return (bus_generic_get_cpus(dev, child, op, setsize, cpuset)); switch (op) { case LOCAL_CPUS: if (setsize != sizeof(cpuset_t)) return (EINVAL); *cpuset = cpuset_domain[d]; return (0); case INTR_CPUS: error = bus_generic_get_cpus(dev, child, op, setsize, cpuset); if (error != 0) return (error); if (setsize != sizeof(cpuset_t)) return (EINVAL); CPU_AND(cpuset, cpuset, &cpuset_domain[d]); return (0); default: return (bus_generic_get_cpus(dev, child, op, setsize, cpuset)); } } /* * Fetch the NUMA domain for the given device 'dev'. * * If a device has a _PXM method, map that to a NUMA domain. * Otherwise, pass the request up to the parent. * If there's no matching domain or the domain cannot be * determined, return ENOENT. */ int acpi_get_domain(device_t dev, device_t child, int *domain) { int d; d = acpi_parse_pxm(child); if (d >= 0) { *domain = d; return (0); } if (d == -1) return (ENOENT); /* No _PXM node; go up a level */ return (bus_generic_get_domain(dev, child, domain)); } static struct rman * acpi_get_rman(device_t bus, int type, u_int flags) { /* Only memory and IO resources are managed. */ switch (type) { case SYS_RES_IOPORT: return (&acpi_rman_io); case SYS_RES_MEMORY: return (&acpi_rman_mem); default: return (NULL); } } /* * Pre-allocate/manage all memory and IO resources. Since rman can't handle * duplicates, we merge any in the sysresource attach routine. */ static int acpi_sysres_alloc(device_t dev) { struct acpi_softc *sc = device_get_softc(dev); struct resource *res; struct resource_list_entry *rle; struct rman *rm; device_t *children; int child_count, i; /* * Probe/attach any sysresource devices. This would be unnecessary if we * had multi-pass probe/attach. */ if (device_get_children(dev, &children, &child_count) != 0) return (ENXIO); for (i = 0; i < child_count; i++) { if (ACPI_ID_PROBE(dev, children[i], sysres_ids, NULL) <= 0) device_probe_and_attach(children[i]); } free(children, M_TEMP); STAILQ_FOREACH(rle, &sc->sysres_rl, link) { if (rle->res != NULL) { device_printf(dev, "duplicate resource for %jx\n", rle->start); continue; } /* Only memory and IO resources are valid here. */ rm = acpi_get_rman(dev, rle->type, 0); if (rm == NULL) continue; /* Pre-allocate resource and add to our rman pool. */ res = bus_alloc_resource(dev, rle->type, &rle->rid, rle->start, rle->start + rle->count - 1, rle->count, RF_ACTIVE | RF_UNMAPPED); if (res != NULL) { rman_manage_region(rm, rman_get_start(res), rman_get_end(res)); rle->res = res; } else if (bootverbose) device_printf(dev, "reservation of %jx, %jx (%d) failed\n", rle->start, rle->count, rle->type); } return (0); } /* * Reserve declared resources for active devices found during the * namespace scan once the boot-time attach of devices has completed. * * Ideally reserving firmware-assigned resources would work in a * depth-first traversal of the device namespace, but this is * complicated. In particular, not all resources are enumerated by * ACPI (e.g. PCI bridges and devices enumerate their resources via * other means). Some systems also enumerate devices via ACPI behind * PCI bridges but without a matching a PCI device_t enumerated via * PCI bus scanning, the device_t's end up as direct children of * acpi0. Doing this scan late is not ideal, but works for now. */ static void acpi_reserve_resources(device_t dev) { struct resource_list_entry *rle; struct resource_list *rl; struct acpi_device *ad; device_t *children; int child_count, i; if (device_get_children(dev, &children, &child_count) != 0) return; for (i = 0; i < child_count; i++) { ad = device_get_ivars(children[i]); rl = &ad->ad_rl; /* Don't reserve system resources. */ if (ACPI_ID_PROBE(dev, children[i], sysres_ids, NULL) <= 0) continue; STAILQ_FOREACH(rle, rl, link) { /* * Don't reserve IRQ resources. There are many sticky things * to get right otherwise (e.g. IRQs for psm, atkbd, and HPET * when using legacy routing). */ if (rle->type == SYS_RES_IRQ) continue; /* * Don't reserve the resource if it is already allocated. * The acpi_ec(4) driver can allocate its resources early * if ECDT is present. */ if (rle->res != NULL) continue; /* * Try to reserve the resource from our parent. If this * fails because the resource is a system resource, just * let it be. The resource range is already reserved so * that other devices will not use it. If the driver * needs to allocate the resource, then * acpi_alloc_resource() will sub-alloc from the system * resource. */ resource_list_reserve(rl, dev, children[i], rle->type, &rle->rid, rle->start, rle->end, rle->count, 0); } } free(children, M_TEMP); } static int acpi_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct acpi_device *ad = device_get_ivars(child); struct resource_list *rl = &ad->ad_rl; rman_res_t end; #ifdef INTRNG /* map with default for now */ if (type == SYS_RES_IRQ) start = (rman_res_t)acpi_map_intr(child, (u_int)start, acpi_get_handle(child)); #endif /* If the resource is already allocated, fail. */ if (resource_list_busy(rl, type, rid)) return (EBUSY); /* If the resource is already reserved, release it. */ if (resource_list_reserved(rl, type, rid)) resource_list_unreserve(rl, dev, child, type, rid); /* Add the resource. */ end = (start + count - 1); resource_list_add(rl, type, rid, start, end, count); return (0); } static struct resource * acpi_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #ifndef INTRNG ACPI_RESOURCE ares; #endif struct acpi_device *ad; struct resource_list_entry *rle; struct resource_list *rl; struct resource *res; int isdefault = RMAN_IS_DEFAULT_RANGE(start, end); /* * First attempt at allocating the resource. For direct children, * use resource_list_alloc() to handle reserved resources. For * other devices, pass the request up to our parent. */ if (bus == device_get_parent(child)) { ad = device_get_ivars(child); rl = &ad->ad_rl; /* * Simulate the behavior of the ISA bus for direct children * devices. That is, if a non-default range is specified for * a resource that doesn't exist, use bus_set_resource() to * add the resource before allocating it. Note that these * resources will not be reserved. */ if (!isdefault && resource_list_find(rl, type, *rid) == NULL) resource_list_add(rl, type, *rid, start, end, count); res = resource_list_alloc(rl, bus, child, type, rid, start, end, count, flags); #ifndef INTRNG if (res != NULL && type == SYS_RES_IRQ) { /* * Since bus_config_intr() takes immediate effect, we cannot * configure the interrupt associated with a device when we * parse the resources but have to defer it until a driver * actually allocates the interrupt via bus_alloc_resource(). * * XXX: Should we handle the lookup failing? */ if (ACPI_SUCCESS(acpi_lookup_irq_resource(child, *rid, res, &ares))) acpi_config_intr(child, &ares); } #endif /* * If this is an allocation of the "default" range for a given * RID, fetch the exact bounds for this resource from the * resource list entry to try to allocate the range from the * system resource regions. */ if (res == NULL && isdefault) { rle = resource_list_find(rl, type, *rid); if (rle != NULL) { start = rle->start; end = rle->end; count = rle->count; } } } else res = bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags); /* * If the first attempt failed and this is an allocation of a * specific range, try to satisfy the request via a suballocation * from our system resource regions. */ if (res == NULL && start + count - 1 == end) res = bus_generic_rman_alloc_resource(bus, child, type, rid, start, end, count, flags); return (res); } static bool acpi_is_resource_managed(device_t bus, struct resource *r) { struct rman *rm; rm = acpi_get_rman(bus, rman_get_type(r), rman_get_flags(r)); if (rm == NULL) return (false); return (rman_is_region_manager(r, rm)); } static struct resource * acpi_managed_resource(device_t bus, struct resource *r) { struct acpi_softc *sc = device_get_softc(bus); struct resource_list_entry *rle; KASSERT(acpi_is_resource_managed(bus, r), ("resource %p is not suballocated", r)); STAILQ_FOREACH(rle, &sc->sysres_rl, link) { if (rle->type != rman_get_type(r) || rle->res == NULL) continue; if (rman_get_start(r) >= rman_get_start(rle->res) && rman_get_end(r) <= rman_get_end(rle->res)) return (rle->res); } return (NULL); } static int acpi_adjust_resource(device_t bus, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { if (acpi_is_resource_managed(bus, r)) return (rman_adjust_resource(r, start, end)); return (bus_generic_adjust_resource(bus, child, r, start, end)); } static int acpi_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { /* * If this resource belongs to one of our internal managers, * deactivate it and release it to the local pool. */ if (acpi_is_resource_managed(bus, r)) return (bus_generic_rman_release_resource(bus, child, type, rid, r)); return (bus_generic_rl_release_resource(bus, child, type, rid, r)); } static void acpi_delete_resource(device_t bus, device_t child, int type, int rid) { struct resource_list *rl; rl = acpi_get_rlist(bus, child); if (resource_list_busy(rl, type, rid)) { device_printf(bus, "delete_resource: Resource still owned by child" " (type=%d, rid=%d)\n", type, rid); return; } if (resource_list_reserved(rl, type, rid)) resource_list_unreserve(rl, bus, child, type, rid); resource_list_delete(rl, type, rid); } static int -acpi_activate_resource(device_t bus, device_t child, int type, int rid, - struct resource *r) +acpi_activate_resource(device_t bus, device_t child, struct resource *r) { if (acpi_is_resource_managed(bus, r)) - return (bus_generic_rman_activate_resource(bus, child, type, - rid, r)); - return (bus_generic_activate_resource(bus, child, type, rid, r)); + return (bus_generic_rman_activate_resource(bus, child, r)); + return (bus_generic_activate_resource(bus, child, r)); } static int -acpi_deactivate_resource(device_t bus, device_t child, int type, int rid, - struct resource *r) +acpi_deactivate_resource(device_t bus, device_t child, struct resource *r) { if (acpi_is_resource_managed(bus, r)) - return (bus_generic_rman_deactivate_resource(bus, child, type, - rid, r)); - return (bus_generic_deactivate_resource(bus, child, type, rid, r)); + return (bus_generic_rman_deactivate_resource(bus, child, r)); + return (bus_generic_deactivate_resource(bus, child, r)); } static int acpi_map_resource(device_t bus, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; struct resource *sysres; rman_res_t length, start; int error; if (!acpi_is_resource_managed(bus, r)) return (bus_generic_map_resource(bus, child, r, argsp, map)); /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); sysres = acpi_managed_resource(bus, r); if (sysres == NULL) return (ENOENT); args.offset = start - rman_get_start(sysres); args.length = length; return (bus_generic_map_resource(bus, child, sysres, &args, map)); } static int acpi_unmap_resource(device_t bus, device_t child, struct resource *r, struct resource_map *map) { if (acpi_is_resource_managed(bus, r)) { r = acpi_managed_resource(bus, r); if (r == NULL) return (ENOENT); } return (bus_generic_unmap_resource(bus, child, r, map)); } /* Allocate an IO port or memory resource, given its GAS. */ int acpi_bus_alloc_gas(device_t dev, int *type, int *rid, ACPI_GENERIC_ADDRESS *gas, struct resource **res, u_int flags) { int error, res_type; error = ENOMEM; if (type == NULL || rid == NULL || gas == NULL || res == NULL) return (EINVAL); /* We only support memory and IO spaces. */ switch (gas->SpaceId) { case ACPI_ADR_SPACE_SYSTEM_MEMORY: res_type = SYS_RES_MEMORY; break; case ACPI_ADR_SPACE_SYSTEM_IO: res_type = SYS_RES_IOPORT; break; default: return (EOPNOTSUPP); } /* * If the register width is less than 8, assume the BIOS author means * it is a bit field and just allocate a byte. */ if (gas->BitWidth && gas->BitWidth < 8) gas->BitWidth = 8; /* Validate the address after we're sure we support the space. */ if (gas->Address == 0 || gas->BitWidth == 0) return (EINVAL); bus_set_resource(dev, res_type, *rid, gas->Address, gas->BitWidth / 8); *res = bus_alloc_resource_any(dev, res_type, rid, RF_ACTIVE | flags); if (*res != NULL) { *type = res_type; error = 0; } else bus_delete_resource(dev, res_type, *rid); return (error); } /* Probe _HID and _CID for compatible ISA PNP ids. */ static uint32_t acpi_isa_get_logicalid(device_t dev) { ACPI_DEVICE_INFO *devinfo; ACPI_HANDLE h; uint32_t pnpid; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* Fetch and validate the HID. */ if ((h = acpi_get_handle(dev)) == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return_VALUE (0); pnpid = (devinfo->Valid & ACPI_VALID_HID) != 0 && devinfo->HardwareId.Length >= ACPI_EISAID_STRING_SIZE ? PNP_EISAID(devinfo->HardwareId.String) : 0; AcpiOsFree(devinfo); return_VALUE (pnpid); } static int acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count) { ACPI_DEVICE_INFO *devinfo; ACPI_PNP_DEVICE_ID *ids; ACPI_HANDLE h; uint32_t *pnpid; int i, valid; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); pnpid = cids; /* Fetch and validate the CID */ if ((h = acpi_get_handle(dev)) == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return_VALUE (0); if ((devinfo->Valid & ACPI_VALID_CID) == 0) { AcpiOsFree(devinfo); return_VALUE (0); } if (devinfo->CompatibleIdList.Count < count) count = devinfo->CompatibleIdList.Count; ids = devinfo->CompatibleIdList.Ids; for (i = 0, valid = 0; i < count; i++) if (ids[i].Length >= ACPI_EISAID_STRING_SIZE && strncmp(ids[i].String, "PNP", 3) == 0) { *pnpid++ = PNP_EISAID(ids[i].String); valid++; } AcpiOsFree(devinfo); return_VALUE (valid); } static int acpi_device_id_probe(device_t bus, device_t dev, char **ids, char **match) { ACPI_HANDLE h; ACPI_OBJECT_TYPE t; int rv; int i; h = acpi_get_handle(dev); if (ids == NULL || h == NULL) return (ENXIO); t = acpi_get_type(dev); if (t != ACPI_TYPE_DEVICE && t != ACPI_TYPE_PROCESSOR) return (ENXIO); /* Try to match one of the array of IDs with a HID or CID. */ for (i = 0; ids[i] != NULL; i++) { rv = acpi_MatchHid(h, ids[i]); if (rv == ACPI_MATCHHID_NOMATCH) continue; if (match != NULL) { *match = ids[i]; } return ((rv == ACPI_MATCHHID_HID)? BUS_PROBE_DEFAULT : BUS_PROBE_LOW_PRIORITY); } return (ENXIO); } static ACPI_STATUS acpi_device_eval_obj(device_t bus, device_t dev, ACPI_STRING pathname, ACPI_OBJECT_LIST *parameters, ACPI_BUFFER *ret) { ACPI_HANDLE h; if (dev == NULL) h = ACPI_ROOT_OBJECT; else if ((h = acpi_get_handle(dev)) == NULL) return (AE_BAD_PARAMETER); return (AcpiEvaluateObject(h, pathname, parameters, ret)); } static ACPI_STATUS acpi_device_get_prop(device_t bus, device_t dev, ACPI_STRING propname, const ACPI_OBJECT **value) { const ACPI_OBJECT *pkg, *name, *val; struct acpi_device *ad; ACPI_STATUS status; int i; ad = device_get_ivars(dev); if (ad == NULL || propname == NULL) return (AE_BAD_PARAMETER); if (ad->dsd_pkg == NULL) { if (ad->dsd.Pointer == NULL) { status = acpi_find_dsd(ad); if (ACPI_FAILURE(status)) return (status); } else { return (AE_NOT_FOUND); } } for (i = 0; i < ad->dsd_pkg->Package.Count; i ++) { pkg = &ad->dsd_pkg->Package.Elements[i]; if (pkg->Type != ACPI_TYPE_PACKAGE || pkg->Package.Count != 2) continue; name = &pkg->Package.Elements[0]; val = &pkg->Package.Elements[1]; if (name->Type != ACPI_TYPE_STRING) continue; if (strncmp(propname, name->String.Pointer, name->String.Length) == 0) { if (value != NULL) *value = val; return (AE_OK); } } return (AE_NOT_FOUND); } static ACPI_STATUS acpi_find_dsd(struct acpi_device *ad) { const ACPI_OBJECT *dsd, *guid, *pkg; ACPI_STATUS status; ad->dsd.Length = ACPI_ALLOCATE_BUFFER; ad->dsd.Pointer = NULL; ad->dsd_pkg = NULL; status = AcpiEvaluateObject(ad->ad_handle, "_DSD", NULL, &ad->dsd); if (ACPI_FAILURE(status)) return (status); dsd = ad->dsd.Pointer; guid = &dsd->Package.Elements[0]; pkg = &dsd->Package.Elements[1]; if (guid->Type != ACPI_TYPE_BUFFER || pkg->Type != ACPI_TYPE_PACKAGE || guid->Buffer.Length != sizeof(acpi_dsd_uuid)) return (AE_NOT_FOUND); if (memcmp(guid->Buffer.Pointer, &acpi_dsd_uuid, sizeof(acpi_dsd_uuid)) == 0) { ad->dsd_pkg = pkg; return (AE_OK); } return (AE_NOT_FOUND); } static ssize_t acpi_bus_get_prop_handle(const ACPI_OBJECT *hobj, void *propvalue, size_t size) { ACPI_OBJECT *pobj; ACPI_HANDLE h; if (hobj->Type != ACPI_TYPE_PACKAGE) goto err; if (hobj->Package.Count != 1) goto err; pobj = &hobj->Package.Elements[0]; if (pobj == NULL) goto err; if (pobj->Type != ACPI_TYPE_LOCAL_REFERENCE) goto err; h = acpi_GetReference(NULL, pobj); if (h == NULL) goto err; if (propvalue != NULL && size >= sizeof(ACPI_HANDLE)) *(ACPI_HANDLE *)propvalue = h; return (sizeof(ACPI_HANDLE)); err: return (-1); } static ssize_t acpi_bus_get_prop(device_t bus, device_t child, const char *propname, void *propvalue, size_t size, device_property_type_t type) { ACPI_STATUS status; const ACPI_OBJECT *obj; status = acpi_device_get_prop(bus, child, __DECONST(char *, propname), &obj); if (ACPI_FAILURE(status)) return (-1); switch (type) { case DEVICE_PROP_ANY: case DEVICE_PROP_BUFFER: case DEVICE_PROP_UINT32: case DEVICE_PROP_UINT64: break; case DEVICE_PROP_HANDLE: return (acpi_bus_get_prop_handle(obj, propvalue, size)); default: return (-1); } switch (obj->Type) { case ACPI_TYPE_INTEGER: if (type == DEVICE_PROP_UINT32) { if (propvalue != NULL && size >= sizeof(uint32_t)) *((uint32_t *)propvalue) = obj->Integer.Value; return (sizeof(uint32_t)); } if (propvalue != NULL && size >= sizeof(uint64_t)) *((uint64_t *) propvalue) = obj->Integer.Value; return (sizeof(uint64_t)); case ACPI_TYPE_STRING: if (type != DEVICE_PROP_ANY && type != DEVICE_PROP_BUFFER) return (-1); if (propvalue != NULL && size > 0) memcpy(propvalue, obj->String.Pointer, MIN(size, obj->String.Length)); return (obj->String.Length); case ACPI_TYPE_BUFFER: if (propvalue != NULL && size > 0) memcpy(propvalue, obj->Buffer.Pointer, MIN(size, obj->Buffer.Length)); return (obj->Buffer.Length); case ACPI_TYPE_PACKAGE: if (propvalue != NULL && size >= sizeof(ACPI_OBJECT *)) { *((ACPI_OBJECT **) propvalue) = __DECONST(ACPI_OBJECT *, obj); } return (sizeof(ACPI_OBJECT *)); case ACPI_TYPE_LOCAL_REFERENCE: if (propvalue != NULL && size >= sizeof(ACPI_HANDLE)) { ACPI_HANDLE h; h = acpi_GetReference(NULL, __DECONST(ACPI_OBJECT *, obj)); memcpy(propvalue, h, sizeof(ACPI_HANDLE)); } return (sizeof(ACPI_HANDLE)); default: return (0); } } int acpi_device_pwr_for_sleep(device_t bus, device_t dev, int *dstate) { struct acpi_softc *sc; ACPI_HANDLE handle; ACPI_STATUS status; char sxd[8]; handle = acpi_get_handle(dev); /* * XXX If we find these devices, don't try to power them down. * The serial and IRDA ports on my T23 hang the system when * set to D3 and it appears that such legacy devices may * need special handling in their drivers. */ if (dstate == NULL || handle == NULL || acpi_MatchHid(handle, "PNP0500") || acpi_MatchHid(handle, "PNP0501") || acpi_MatchHid(handle, "PNP0502") || acpi_MatchHid(handle, "PNP0510") || acpi_MatchHid(handle, "PNP0511")) return (ENXIO); /* * Override next state with the value from _SxD, if present. * Note illegal _S0D is evaluated because some systems expect this. */ sc = device_get_softc(bus); snprintf(sxd, sizeof(sxd), "_S%dD", sc->acpi_sstate); status = acpi_GetInteger(handle, sxd, dstate); if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { device_printf(dev, "failed to get %s on %s: %s\n", sxd, acpi_name(handle), AcpiFormatException(status)); return (ENXIO); } return (0); } /* Callback arg for our implementation of walking the namespace. */ struct acpi_device_scan_ctx { acpi_scan_cb_t user_fn; void *arg; ACPI_HANDLE parent; }; static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *arg, void **retval) { struct acpi_device_scan_ctx *ctx; device_t dev, old_dev; ACPI_STATUS status; ACPI_OBJECT_TYPE type; /* * Skip this device if we think we'll have trouble with it or it is * the parent where the scan began. */ ctx = (struct acpi_device_scan_ctx *)arg; if (acpi_avoid(h) || h == ctx->parent) return (AE_OK); /* If this is not a valid device type (e.g., a method), skip it. */ if (ACPI_FAILURE(AcpiGetType(h, &type))) return (AE_OK); if (type != ACPI_TYPE_DEVICE && type != ACPI_TYPE_PROCESSOR && type != ACPI_TYPE_THERMAL && type != ACPI_TYPE_POWER) return (AE_OK); /* * Call the user function with the current device. If it is unchanged * afterwards, return. Otherwise, we update the handle to the new dev. */ old_dev = acpi_get_device(h); dev = old_dev; status = ctx->user_fn(h, &dev, level, ctx->arg); if (ACPI_FAILURE(status) || old_dev == dev) return (status); /* Remove the old child and its connection to the handle. */ if (old_dev != NULL) device_delete_child(device_get_parent(old_dev), old_dev); /* Recreate the handle association if the user created a device. */ if (dev != NULL) AcpiAttachData(h, acpi_fake_objhandler, dev); return (AE_OK); } static ACPI_STATUS acpi_device_scan_children(device_t bus, device_t dev, int max_depth, acpi_scan_cb_t user_fn, void *arg) { ACPI_HANDLE h; struct acpi_device_scan_ctx ctx; if (acpi_disabled("children")) return (AE_OK); if (dev == NULL) h = ACPI_ROOT_OBJECT; else if ((h = acpi_get_handle(dev)) == NULL) return (AE_BAD_PARAMETER); ctx.user_fn = user_fn; ctx.arg = arg; ctx.parent = h; return (AcpiWalkNamespace(ACPI_TYPE_ANY, h, max_depth, acpi_device_scan_cb, NULL, &ctx, NULL)); } /* * Even though ACPI devices are not PCI, we use the PCI approach for setting * device power states since it's close enough to ACPI. */ int acpi_set_powerstate(device_t child, int state) { ACPI_HANDLE h; ACPI_STATUS status; h = acpi_get_handle(child); if (state < ACPI_STATE_D0 || state > ACPI_D_STATES_MAX) return (EINVAL); if (h == NULL) return (0); /* Ignore errors if the power methods aren't present. */ status = acpi_pwr_switch_consumer(h, state); if (ACPI_SUCCESS(status)) { if (bootverbose) device_printf(child, "set ACPI power state D%d on %s\n", state, acpi_name(h)); } else if (status != AE_NOT_FOUND) device_printf(child, "failed to set ACPI power state D%d on %s: %s\n", state, acpi_name(h), AcpiFormatException(status)); return (0); } static int acpi_isa_pnp_probe(device_t bus, device_t child, struct isa_pnp_id *ids) { int result, cid_count, i; uint32_t lid, cids[8]; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* * ISA-style drivers attached to ACPI may persist and * probe manually if we return ENOENT. We never want * that to happen, so don't ever return it. */ result = ENXIO; /* Scan the supplied IDs for a match */ lid = acpi_isa_get_logicalid(child); cid_count = acpi_isa_get_compatid(child, cids, 8); while (ids && ids->ip_id) { if (lid == ids->ip_id) { result = 0; goto out; } for (i = 0; i < cid_count; i++) { if (cids[i] == ids->ip_id) { result = 0; goto out; } } ids++; } out: if (result == 0 && ids->ip_desc) device_set_desc(child, ids->ip_desc); return_VALUE (result); } /* * Look for a MCFG table. If it is present, use the settings for * domain (segment) 0 to setup PCI config space access via the memory * map. * * On non-x86 architectures (arm64 for now), this will be done from the * PCI host bridge driver. */ static void acpi_enable_pcie(void) { #if defined(__i386__) || defined(__amd64__) ACPI_TABLE_HEADER *hdr; ACPI_MCFG_ALLOCATION *alloc, *end; ACPI_STATUS status; status = AcpiGetTable(ACPI_SIG_MCFG, 1, &hdr); if (ACPI_FAILURE(status)) return; end = (ACPI_MCFG_ALLOCATION *)((char *)hdr + hdr->Length); alloc = (ACPI_MCFG_ALLOCATION *)((ACPI_TABLE_MCFG *)hdr + 1); while (alloc < end) { pcie_cfgregopen(alloc->Address, alloc->PciSegment, alloc->StartBusNumber, alloc->EndBusNumber); alloc++; } #endif } static void acpi_platform_osc(device_t dev) { ACPI_HANDLE sb_handle; ACPI_STATUS status; uint32_t cap_set[2]; /* 0811B06E-4A27-44F9-8D60-3CBBC22E7B48 */ static uint8_t acpi_platform_uuid[ACPI_UUID_LENGTH] = { 0x6e, 0xb0, 0x11, 0x08, 0x27, 0x4a, 0xf9, 0x44, 0x8d, 0x60, 0x3c, 0xbb, 0xc2, 0x2e, 0x7b, 0x48 }; if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle))) return; cap_set[1] = 0x10; /* APEI Support */ status = acpi_EvaluateOSC(sb_handle, acpi_platform_uuid, 1, nitems(cap_set), cap_set, cap_set, false); if (ACPI_FAILURE(status)) { if (status == AE_NOT_FOUND) return; device_printf(dev, "_OSC failed: %s\n", AcpiFormatException(status)); return; } } /* * Scan all of the ACPI namespace and attach child devices. * * We should only expect to find devices in the \_PR, \_TZ, \_SI, and * \_SB scopes, and \_PR and \_TZ became obsolete in the ACPI 2.0 spec. * However, in violation of the spec, some systems place their PCI link * devices in \, so we have to walk the whole namespace. We check the * type of namespace nodes, so this should be ok. */ static void acpi_probe_children(device_t bus) { ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* * Scan the namespace and insert placeholders for all the devices that * we find. We also probe/attach any early devices. * * Note that we use AcpiWalkNamespace rather than AcpiGetDevices because * we want to create nodes for all devices, not just those that are * currently present. (This assumes that we don't want to create/remove * devices as they appear, which might be smarter.) */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "namespace scan\n")); AcpiWalkNamespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, 100, acpi_probe_child, NULL, bus, NULL); /* Pre-allocate resources for our rman from any sysresource devices. */ acpi_sysres_alloc(bus); /* Create any static children by calling device identify methods. */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "device identify routines\n")); bus_generic_probe(bus); /* Probe/attach all children, created statically and from the namespace. */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "acpi bus_generic_attach\n")); bus_generic_attach(bus); /* * Reserve resources allocated to children but not yet allocated * by a driver. */ acpi_reserve_resources(bus); /* Attach wake sysctls. */ acpi_wake_sysctl_walk(bus); ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "done attaching children\n")); return_VOID; } /* * Determine the probe order for a given device. */ static void acpi_probe_order(ACPI_HANDLE handle, int *order) { ACPI_OBJECT_TYPE type; /* * 0. CPUs * 1. I/O port and memory system resource holders * 2. Clocks and timers (to handle early accesses) * 3. Embedded controllers (to handle early accesses) * 4. PCI Link Devices */ AcpiGetType(handle, &type); if (type == ACPI_TYPE_PROCESSOR) *order = 0; else if (acpi_MatchHid(handle, "PNP0C01") || acpi_MatchHid(handle, "PNP0C02")) *order = 1; else if (acpi_MatchHid(handle, "PNP0100") || acpi_MatchHid(handle, "PNP0103") || acpi_MatchHid(handle, "PNP0B00")) *order = 2; else if (acpi_MatchHid(handle, "PNP0C09")) *order = 3; else if (acpi_MatchHid(handle, "PNP0C0F")) *order = 4; } /* * Evaluate a child device and determine whether we might attach a device to * it. */ static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { ACPI_DEVICE_INFO *devinfo; struct acpi_device *ad; struct acpi_prw_data prw; ACPI_OBJECT_TYPE type; ACPI_HANDLE h; device_t bus, child; char *handle_str; int order; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (acpi_disabled("children")) return_ACPI_STATUS (AE_OK); /* Skip this device if we think we'll have trouble with it. */ if (acpi_avoid(handle)) return_ACPI_STATUS (AE_OK); bus = (device_t)context; if (ACPI_SUCCESS(AcpiGetType(handle, &type))) { handle_str = acpi_name(handle); switch (type) { case ACPI_TYPE_DEVICE: /* * Since we scan from \, be sure to skip system scope objects. * \_SB_ and \_TZ_ are defined in ACPICA as devices to work around * BIOS bugs. For example, \_SB_ is to allow \_SB_._INI to be run * during the initialization and \_TZ_ is to support Notify() on it. */ if (strcmp(handle_str, "\\_SB_") == 0 || strcmp(handle_str, "\\_TZ_") == 0) break; if (acpi_parse_prw(handle, &prw) == 0) AcpiSetupGpeForWake(handle, prw.gpe_handle, prw.gpe_bit); /* * Ignore devices that do not have a _HID or _CID. They should * be discovered by other buses (e.g. the PCI bus driver). */ if (!acpi_has_hid(handle)) break; /* FALLTHROUGH */ case ACPI_TYPE_PROCESSOR: case ACPI_TYPE_THERMAL: case ACPI_TYPE_POWER: /* * Create a placeholder device for this node. Sort the * placeholder so that the probe/attach passes will run * breadth-first. Orders less than ACPI_DEV_BASE_ORDER * are reserved for special objects (i.e., system * resources). */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "scanning '%s'\n", handle_str)); order = level * 10 + ACPI_DEV_BASE_ORDER; acpi_probe_order(handle, &order); child = BUS_ADD_CHILD(bus, order, NULL, -1); if (child == NULL) break; /* Associate the handle with the device_t and vice versa. */ acpi_set_handle(child, handle); AcpiAttachData(handle, acpi_fake_objhandler, child); /* * Check that the device is present. If it's not present, * leave it disabled (so that we have a device_t attached to * the handle, but we don't probe it). * * XXX PCI link devices sometimes report "present" but not * "functional" (i.e. if disabled). Go ahead and probe them * anyway since we may enable them later. */ if (type == ACPI_TYPE_DEVICE && !acpi_DeviceIsPresent(child)) { /* Never disable PCI link devices. */ if (acpi_MatchHid(handle, "PNP0C0F")) break; /* * RTC Device should be enabled for CMOS register space * unless FADT indicate it is not present. * (checked in RTC probe routine.) */ if (acpi_MatchHid(handle, "PNP0B00")) break; /* * Docking stations should remain enabled since the system * may be undocked at boot. */ if (ACPI_SUCCESS(AcpiGetHandle(handle, "_DCK", &h))) break; device_disable(child); break; } /* * Get the device's resource settings and attach them. * Note that if the device has _PRS but no _CRS, we need * to decide when it's appropriate to try to configure the * device. Ignore the return value here; it's OK for the * device not to have any resources. */ acpi_parse_resources(child, handle, &acpi_res_parse_set, NULL); ad = device_get_ivars(child); ad->ad_cls_class = 0xffffff; if (ACPI_SUCCESS(AcpiGetObjectInfo(handle, &devinfo))) { if ((devinfo->Valid & ACPI_VALID_CLS) != 0 && devinfo->ClassCode.Length >= ACPI_PCICLS_STRING_SIZE) { ad->ad_cls_class = strtoul(devinfo->ClassCode.String, NULL, 16); } AcpiOsFree(devinfo); } break; } } return_ACPI_STATUS (AE_OK); } /* * AcpiAttachData() requires an object handler but never uses it. This is a * placeholder object handler so we can store a device_t in an ACPI_HANDLE. */ void acpi_fake_objhandler(ACPI_HANDLE h, void *data) { } static void acpi_shutdown_final(void *arg, int howto) { struct acpi_softc *sc = (struct acpi_softc *)arg; register_t intr; ACPI_STATUS status; /* * XXX Shutdown code should only run on the BSP (cpuid 0). * Some chipsets do not power off the system correctly if called from * an AP. */ if ((howto & RB_POWEROFF) != 0) { status = AcpiEnterSleepStatePrep(ACPI_STATE_S5); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n", AcpiFormatException(status)); return; } device_printf(sc->acpi_dev, "Powering system off\n"); intr = intr_disable(); status = AcpiEnterSleepState(ACPI_STATE_S5); if (ACPI_FAILURE(status)) { intr_restore(intr); device_printf(sc->acpi_dev, "power-off failed - %s\n", AcpiFormatException(status)); } else { DELAY(1000000); intr_restore(intr); device_printf(sc->acpi_dev, "power-off failed - timeout\n"); } } else if ((howto & RB_HALT) == 0 && sc->acpi_handle_reboot) { /* Reboot using the reset register. */ status = AcpiReset(); if (ACPI_SUCCESS(status)) { DELAY(1000000); device_printf(sc->acpi_dev, "reset failed - timeout\n"); } else if (status != AE_NOT_EXIST) device_printf(sc->acpi_dev, "reset failed - %s\n", AcpiFormatException(status)); } else if (sc->acpi_do_disable && !KERNEL_PANICKED()) { /* * Only disable ACPI if the user requested. On some systems, writing * the disable value to SMI_CMD hangs the system. */ device_printf(sc->acpi_dev, "Shutting down\n"); AcpiTerminate(); } } static void acpi_enable_fixed_events(struct acpi_softc *sc) { static int first_time = 1; /* Enable and clear fixed events and install handlers. */ if ((AcpiGbl_FADT.Flags & ACPI_FADT_POWER_BUTTON) == 0) { AcpiClearEvent(ACPI_EVENT_POWER_BUTTON); AcpiInstallFixedEventHandler(ACPI_EVENT_POWER_BUTTON, acpi_event_power_button_sleep, sc); if (first_time) device_printf(sc->acpi_dev, "Power Button (fixed)\n"); } if ((AcpiGbl_FADT.Flags & ACPI_FADT_SLEEP_BUTTON) == 0) { AcpiClearEvent(ACPI_EVENT_SLEEP_BUTTON); AcpiInstallFixedEventHandler(ACPI_EVENT_SLEEP_BUTTON, acpi_event_sleep_button_sleep, sc); if (first_time) device_printf(sc->acpi_dev, "Sleep Button (fixed)\n"); } first_time = 0; } /* * Returns true if the device is actually present and should * be attached to. This requires the present, enabled, UI-visible * and diagnostics-passed bits to be set. */ BOOLEAN acpi_DeviceIsPresent(device_t dev) { ACPI_HANDLE h; UINT32 s; ACPI_STATUS status; h = acpi_get_handle(dev); if (h == NULL) return (FALSE); #ifdef ACPI_EARLY_EPYC_WAR /* * Certain Treadripper boards always returns 0 for FreeBSD because it * only returns non-zero for the OS string "Windows 2015". Otherwise it * will return zero. Force them to always be treated as present. * Beata versions were worse: they always returned 0. */ if (acpi_MatchHid(h, "AMDI0020") || acpi_MatchHid(h, "AMDI0010")) return (TRUE); #endif status = acpi_GetInteger(h, "_STA", &s); /* * If no _STA method or if it failed, then assume that * the device is present. */ if (ACPI_FAILURE(status)) return (TRUE); return (ACPI_DEVICE_PRESENT(s) ? TRUE : FALSE); } /* * Returns true if the battery is actually present and inserted. */ BOOLEAN acpi_BatteryIsPresent(device_t dev) { ACPI_HANDLE h; UINT32 s; ACPI_STATUS status; h = acpi_get_handle(dev); if (h == NULL) return (FALSE); status = acpi_GetInteger(h, "_STA", &s); /* * If no _STA method or if it failed, then assume that * the device is present. */ if (ACPI_FAILURE(status)) return (TRUE); return (ACPI_BATTERY_PRESENT(s) ? TRUE : FALSE); } /* * Returns true if a device has at least one valid device ID. */ BOOLEAN acpi_has_hid(ACPI_HANDLE h) { ACPI_DEVICE_INFO *devinfo; BOOLEAN ret; if (h == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return (FALSE); ret = FALSE; if ((devinfo->Valid & ACPI_VALID_HID) != 0) ret = TRUE; else if ((devinfo->Valid & ACPI_VALID_CID) != 0) if (devinfo->CompatibleIdList.Count > 0) ret = TRUE; AcpiOsFree(devinfo); return (ret); } /* * Match a HID string against a handle * returns ACPI_MATCHHID_HID if _HID match * ACPI_MATCHHID_CID if _CID match and not _HID match. * ACPI_MATCHHID_NOMATCH=0 if no match. */ int acpi_MatchHid(ACPI_HANDLE h, const char *hid) { ACPI_DEVICE_INFO *devinfo; BOOLEAN ret; int i; if (hid == NULL || h == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return (ACPI_MATCHHID_NOMATCH); ret = ACPI_MATCHHID_NOMATCH; if ((devinfo->Valid & ACPI_VALID_HID) != 0 && strcmp(hid, devinfo->HardwareId.String) == 0) ret = ACPI_MATCHHID_HID; else if ((devinfo->Valid & ACPI_VALID_CID) != 0) for (i = 0; i < devinfo->CompatibleIdList.Count; i++) { if (strcmp(hid, devinfo->CompatibleIdList.Ids[i].String) == 0) { ret = ACPI_MATCHHID_CID; break; } } AcpiOsFree(devinfo); return (ret); } /* * Return the handle of a named object within our scope, ie. that of (parent) * or one if its parents. */ ACPI_STATUS acpi_GetHandleInScope(ACPI_HANDLE parent, char *path, ACPI_HANDLE *result) { ACPI_HANDLE r; ACPI_STATUS status; /* Walk back up the tree to the root */ for (;;) { status = AcpiGetHandle(parent, path, &r); if (ACPI_SUCCESS(status)) { *result = r; return (AE_OK); } /* XXX Return error here? */ if (status != AE_NOT_FOUND) return (AE_OK); if (ACPI_FAILURE(AcpiGetParent(parent, &r))) return (AE_NOT_FOUND); parent = r; } } ACPI_STATUS acpi_GetProperty(device_t dev, ACPI_STRING propname, const ACPI_OBJECT **value) { device_t bus = device_get_parent(dev); return (ACPI_GET_PROPERTY(bus, dev, propname, value)); } /* * Allocate a buffer with a preset data size. */ ACPI_BUFFER * acpi_AllocBuffer(int size) { ACPI_BUFFER *buf; if ((buf = malloc(size + sizeof(*buf), M_ACPIDEV, M_NOWAIT)) == NULL) return (NULL); buf->Length = size; buf->Pointer = (void *)(buf + 1); return (buf); } ACPI_STATUS acpi_SetInteger(ACPI_HANDLE handle, char *path, UINT32 number) { ACPI_OBJECT arg1; ACPI_OBJECT_LIST args; arg1.Type = ACPI_TYPE_INTEGER; arg1.Integer.Value = number; args.Count = 1; args.Pointer = &arg1; return (AcpiEvaluateObject(handle, path, &args, NULL)); } /* * Evaluate a path that should return an integer. */ ACPI_STATUS acpi_GetInteger(ACPI_HANDLE handle, char *path, UINT32 *number) { ACPI_STATUS status; ACPI_BUFFER buf; ACPI_OBJECT param; if (handle == NULL) handle = ACPI_ROOT_OBJECT; /* * Assume that what we've been pointed at is an Integer object, or * a method that will return an Integer. */ buf.Pointer = ¶m; buf.Length = sizeof(param); status = AcpiEvaluateObject(handle, path, NULL, &buf); if (ACPI_SUCCESS(status)) { if (param.Type == ACPI_TYPE_INTEGER) *number = param.Integer.Value; else status = AE_TYPE; } /* * In some applications, a method that's expected to return an Integer * may instead return a Buffer (probably to simplify some internal * arithmetic). We'll try to fetch whatever it is, and if it's a Buffer, * convert it into an Integer as best we can. * * This is a hack. */ if (status == AE_BUFFER_OVERFLOW) { if ((buf.Pointer = AcpiOsAllocate(buf.Length)) == NULL) { status = AE_NO_MEMORY; } else { status = AcpiEvaluateObject(handle, path, NULL, &buf); if (ACPI_SUCCESS(status)) status = acpi_ConvertBufferToInteger(&buf, number); AcpiOsFree(buf.Pointer); } } return (status); } ACPI_STATUS acpi_ConvertBufferToInteger(ACPI_BUFFER *bufp, UINT32 *number) { ACPI_OBJECT *p; UINT8 *val; int i; p = (ACPI_OBJECT *)bufp->Pointer; if (p->Type == ACPI_TYPE_INTEGER) { *number = p->Integer.Value; return (AE_OK); } if (p->Type != ACPI_TYPE_BUFFER) return (AE_TYPE); if (p->Buffer.Length > sizeof(int)) return (AE_BAD_DATA); *number = 0; val = p->Buffer.Pointer; for (i = 0; i < p->Buffer.Length; i++) *number += val[i] << (i * 8); return (AE_OK); } /* * Iterate over the elements of an a package object, calling the supplied * function for each element. * * XXX possible enhancement might be to abort traversal on error. */ ACPI_STATUS acpi_ForeachPackageObject(ACPI_OBJECT *pkg, void (*func)(ACPI_OBJECT *comp, void *arg), void *arg) { ACPI_OBJECT *comp; int i; if (pkg == NULL || pkg->Type != ACPI_TYPE_PACKAGE) return (AE_BAD_PARAMETER); /* Iterate over components */ i = 0; comp = pkg->Package.Elements; for (; i < pkg->Package.Count; i++, comp++) func(comp, arg); return (AE_OK); } /* * Find the (index)th resource object in a set. */ ACPI_STATUS acpi_FindIndexedResource(ACPI_BUFFER *buf, int index, ACPI_RESOURCE **resp) { ACPI_RESOURCE *rp; int i; rp = (ACPI_RESOURCE *)buf->Pointer; i = index; while (i-- > 0) { /* Range check */ if (rp > (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length)) return (AE_BAD_PARAMETER); /* Check for terminator */ if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0) return (AE_NOT_FOUND); rp = ACPI_NEXT_RESOURCE(rp); } if (resp != NULL) *resp = rp; return (AE_OK); } /* * Append an ACPI_RESOURCE to an ACPI_BUFFER. * * Given a pointer to an ACPI_RESOURCE structure, expand the ACPI_BUFFER * provided to contain it. If the ACPI_BUFFER is empty, allocate a sensible * backing block. If the ACPI_RESOURCE is NULL, return an empty set of * resources. */ #define ACPI_INITIAL_RESOURCE_BUFFER_SIZE 512 ACPI_STATUS acpi_AppendBufferResource(ACPI_BUFFER *buf, ACPI_RESOURCE *res) { ACPI_RESOURCE *rp; void *newp; /* Initialise the buffer if necessary. */ if (buf->Pointer == NULL) { buf->Length = ACPI_INITIAL_RESOURCE_BUFFER_SIZE; if ((buf->Pointer = AcpiOsAllocate(buf->Length)) == NULL) return (AE_NO_MEMORY); rp = (ACPI_RESOURCE *)buf->Pointer; rp->Type = ACPI_RESOURCE_TYPE_END_TAG; rp->Length = ACPI_RS_SIZE_MIN; } if (res == NULL) return (AE_OK); /* * Scan the current buffer looking for the terminator. * This will either find the terminator or hit the end * of the buffer and return an error. */ rp = (ACPI_RESOURCE *)buf->Pointer; for (;;) { /* Range check, don't go outside the buffer */ if (rp >= (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length)) return (AE_BAD_PARAMETER); if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0) break; rp = ACPI_NEXT_RESOURCE(rp); } /* * Check the size of the buffer and expand if required. * * Required size is: * size of existing resources before terminator + * size of new resource and header + * size of terminator. * * Note that this loop should really only run once, unless * for some reason we are stuffing a *really* huge resource. */ while ((((u_int8_t *)rp - (u_int8_t *)buf->Pointer) + res->Length + ACPI_RS_SIZE_NO_DATA + ACPI_RS_SIZE_MIN) >= buf->Length) { if ((newp = AcpiOsAllocate(buf->Length * 2)) == NULL) return (AE_NO_MEMORY); bcopy(buf->Pointer, newp, buf->Length); rp = (ACPI_RESOURCE *)((u_int8_t *)newp + ((u_int8_t *)rp - (u_int8_t *)buf->Pointer)); AcpiOsFree(buf->Pointer); buf->Pointer = newp; buf->Length += buf->Length; } /* Insert the new resource. */ bcopy(res, rp, res->Length + ACPI_RS_SIZE_NO_DATA); /* And add the terminator. */ rp = ACPI_NEXT_RESOURCE(rp); rp->Type = ACPI_RESOURCE_TYPE_END_TAG; rp->Length = ACPI_RS_SIZE_MIN; return (AE_OK); } UINT64 acpi_DSMQuery(ACPI_HANDLE h, const uint8_t *uuid, int revision) { /* * ACPI spec 9.1.1 defines this. * * "Arg2: Function Index Represents a specific function whose meaning is * specific to the UUID and Revision ID. Function indices should start * with 1. Function number zero is a query function (see the special * return code defined below)." */ ACPI_BUFFER buf; ACPI_OBJECT *obj; UINT64 ret = 0; int i; if (!ACPI_SUCCESS(acpi_EvaluateDSM(h, uuid, revision, 0, NULL, &buf))) { ACPI_INFO(("Failed to enumerate DSM functions\n")); return (0); } obj = (ACPI_OBJECT *)buf.Pointer; KASSERT(obj, ("Object not allowed to be NULL\n")); /* * From ACPI 6.2 spec 9.1.1: * If Function Index = 0, a Buffer containing a function index bitfield. * Otherwise, the return value and type depends on the UUID and revision * ID (see below). */ switch (obj->Type) { case ACPI_TYPE_BUFFER: for (i = 0; i < MIN(obj->Buffer.Length, sizeof(ret)); i++) ret |= (((uint64_t)obj->Buffer.Pointer[i]) << (i * 8)); break; case ACPI_TYPE_INTEGER: ACPI_BIOS_WARNING((AE_INFO, "Possibly buggy BIOS with ACPI_TYPE_INTEGER for function enumeration\n")); ret = obj->Integer.Value; break; default: ACPI_WARNING((AE_INFO, "Unexpected return type %u\n", obj->Type)); }; AcpiOsFree(obj); return ret; } /* * DSM may return multiple types depending on the function. It is therefore * unsafe to use the typed evaluation. It is highly recommended that the caller * check the type of the returned object. */ ACPI_STATUS acpi_EvaluateDSM(ACPI_HANDLE handle, const uint8_t *uuid, int revision, UINT64 function, ACPI_OBJECT *package, ACPI_BUFFER *out_buf) { return (acpi_EvaluateDSMTyped(handle, uuid, revision, function, package, out_buf, ACPI_TYPE_ANY)); } ACPI_STATUS acpi_EvaluateDSMTyped(ACPI_HANDLE handle, const uint8_t *uuid, int revision, UINT64 function, ACPI_OBJECT *package, ACPI_BUFFER *out_buf, ACPI_OBJECT_TYPE type) { ACPI_OBJECT arg[4]; ACPI_OBJECT_LIST arglist; ACPI_BUFFER buf; ACPI_STATUS status; if (out_buf == NULL) return (AE_NO_MEMORY); arg[0].Type = ACPI_TYPE_BUFFER; arg[0].Buffer.Length = ACPI_UUID_LENGTH; arg[0].Buffer.Pointer = __DECONST(uint8_t *, uuid); arg[1].Type = ACPI_TYPE_INTEGER; arg[1].Integer.Value = revision; arg[2].Type = ACPI_TYPE_INTEGER; arg[2].Integer.Value = function; if (package) { arg[3] = *package; } else { arg[3].Type = ACPI_TYPE_PACKAGE; arg[3].Package.Count = 0; arg[3].Package.Elements = NULL; } arglist.Pointer = arg; arglist.Count = 4; buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObjectTyped(handle, "_DSM", &arglist, &buf, type); if (ACPI_FAILURE(status)) return (status); KASSERT(ACPI_SUCCESS(status), ("Unexpected status")); *out_buf = buf; return (status); } ACPI_STATUS acpi_EvaluateOSC(ACPI_HANDLE handle, uint8_t *uuid, int revision, int count, uint32_t *caps_in, uint32_t *caps_out, bool query) { ACPI_OBJECT arg[4], *ret; ACPI_OBJECT_LIST arglist; ACPI_BUFFER buf; ACPI_STATUS status; arglist.Pointer = arg; arglist.Count = 4; arg[0].Type = ACPI_TYPE_BUFFER; arg[0].Buffer.Length = ACPI_UUID_LENGTH; arg[0].Buffer.Pointer = uuid; arg[1].Type = ACPI_TYPE_INTEGER; arg[1].Integer.Value = revision; arg[2].Type = ACPI_TYPE_INTEGER; arg[2].Integer.Value = count; arg[3].Type = ACPI_TYPE_BUFFER; arg[3].Buffer.Length = count * sizeof(*caps_in); arg[3].Buffer.Pointer = (uint8_t *)caps_in; caps_in[0] = query ? 1 : 0; buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObjectTyped(handle, "_OSC", &arglist, &buf, ACPI_TYPE_BUFFER); if (ACPI_FAILURE(status)) return (status); if (caps_out != NULL) { ret = buf.Pointer; if (ret->Buffer.Length != count * sizeof(*caps_out)) { AcpiOsFree(buf.Pointer); return (AE_BUFFER_OVERFLOW); } bcopy(ret->Buffer.Pointer, caps_out, ret->Buffer.Length); } AcpiOsFree(buf.Pointer); return (status); } /* * Set interrupt model. */ ACPI_STATUS acpi_SetIntrModel(int model) { return (acpi_SetInteger(ACPI_ROOT_OBJECT, "_PIC", model)); } /* * Walk subtables of a table and call a callback routine for each * subtable. The caller should provide the first subtable and a * pointer to the end of the table. This can be used to walk tables * such as MADT and SRAT that use subtable entries. */ void acpi_walk_subtables(void *first, void *end, acpi_subtable_handler *handler, void *arg) { ACPI_SUBTABLE_HEADER *entry; for (entry = first; (void *)entry < end; ) { /* Avoid an infinite loop if we hit a bogus entry. */ if (entry->Length < sizeof(ACPI_SUBTABLE_HEADER)) return; handler(entry, arg); entry = ACPI_ADD_PTR(ACPI_SUBTABLE_HEADER, entry, entry->Length); } } /* * DEPRECATED. This interface has serious deficiencies and will be * removed. * * Immediately enter the sleep state. In the old model, acpiconf(8) ran * rc.suspend and rc.resume so we don't have to notify devd(8) to do this. */ ACPI_STATUS acpi_SetSleepState(struct acpi_softc *sc, int state) { static int once; if (!once) { device_printf(sc->acpi_dev, "warning: acpi_SetSleepState() deprecated, need to update your software\n"); once = 1; } return (acpi_EnterSleepState(sc, state)); } #if defined(__amd64__) || defined(__i386__) static void acpi_sleep_force_task(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate))) device_printf(sc->acpi_dev, "force sleep state S%d failed\n", sc->acpi_next_sstate); } static void acpi_sleep_force(void *arg) { struct acpi_softc *sc = (struct acpi_softc *)arg; device_printf(sc->acpi_dev, "suspend request timed out, forcing sleep now\n"); /* * XXX Suspending from callout causes freezes in DEVICE_SUSPEND(). * Suspend from acpi_task thread instead. */ if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_sleep_force_task, sc))) device_printf(sc->acpi_dev, "AcpiOsExecute() for sleeping failed\n"); } #endif /* * Request that the system enter the given suspend state. All /dev/apm * devices and devd(8) will be notified. Userland then has a chance to * save state and acknowledge the request. The system sleeps once all * acks are in. */ int acpi_ReqSleepState(struct acpi_softc *sc, int state) { #if defined(__amd64__) || defined(__i386__) struct apm_clone_data *clone; ACPI_STATUS status; if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX) return (EINVAL); if (!acpi_sleep_states[state]) return (EOPNOTSUPP); /* * If a reboot/shutdown/suspend request is already in progress or * suspend is blocked due to an upcoming shutdown, just return. */ if (rebooting || sc->acpi_next_sstate != 0 || suspend_blocked) { return (0); } /* Wait until sleep is enabled. */ while (sc->acpi_sleep_disabled) { AcpiOsSleep(1000); } ACPI_LOCK(acpi); sc->acpi_next_sstate = state; /* S5 (soft-off) should be entered directly with no waiting. */ if (state == ACPI_STATE_S5) { ACPI_UNLOCK(acpi); status = acpi_EnterSleepState(sc, state); return (ACPI_SUCCESS(status) ? 0 : ENXIO); } /* Record the pending state and notify all apm devices. */ STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) { clone->notify_status = APM_EV_NONE; if ((clone->flags & ACPI_EVF_DEVD) == 0) { selwakeuppri(&clone->sel_read, PZERO); KNOTE_LOCKED(&clone->sel_read.si_note, 0); } } /* If devd(8) is not running, immediately enter the sleep state. */ if (!devctl_process_running()) { ACPI_UNLOCK(acpi); status = acpi_EnterSleepState(sc, state); return (ACPI_SUCCESS(status) ? 0 : ENXIO); } /* * Set a timeout to fire if userland doesn't ack the suspend request * in time. This way we still eventually go to sleep if we were * overheating or running low on battery, even if userland is hung. * We cancel this timeout once all userland acks are in or the * suspend request is aborted. */ callout_reset(&sc->susp_force_to, 10 * hz, acpi_sleep_force, sc); ACPI_UNLOCK(acpi); /* Now notify devd(8) also. */ acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, state); return (0); #else /* This platform does not support acpi suspend/resume. */ return (EOPNOTSUPP); #endif } /* * Acknowledge (or reject) a pending sleep state. The caller has * prepared for suspend and is now ready for it to proceed. If the * error argument is non-zero, it indicates suspend should be cancelled * and gives an errno value describing why. Once all votes are in, * we suspend the system. */ int acpi_AckSleepState(struct apm_clone_data *clone, int error) { #if defined(__amd64__) || defined(__i386__) struct acpi_softc *sc; int ret, sleeping; /* If no pending sleep state, return an error. */ ACPI_LOCK(acpi); sc = clone->acpi_sc; if (sc->acpi_next_sstate == 0) { ACPI_UNLOCK(acpi); return (ENXIO); } /* Caller wants to abort suspend process. */ if (error) { sc->acpi_next_sstate = 0; callout_stop(&sc->susp_force_to); device_printf(sc->acpi_dev, "listener on %s cancelled the pending suspend\n", devtoname(clone->cdev)); ACPI_UNLOCK(acpi); return (0); } /* * Mark this device as acking the suspend request. Then, walk through * all devices, seeing if they agree yet. We only count devices that * are writable since read-only devices couldn't ack the request. */ sleeping = TRUE; clone->notify_status = APM_EV_ACKED; STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) { if ((clone->flags & ACPI_EVF_WRITE) != 0 && clone->notify_status != APM_EV_ACKED) { sleeping = FALSE; break; } } /* If all devices have voted "yes", we will suspend now. */ if (sleeping) callout_stop(&sc->susp_force_to); ACPI_UNLOCK(acpi); ret = 0; if (sleeping) { if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate))) ret = ENODEV; } return (ret); #else /* This platform does not support acpi suspend/resume. */ return (EOPNOTSUPP); #endif } static void acpi_sleep_enable(void *arg) { struct acpi_softc *sc = (struct acpi_softc *)arg; ACPI_LOCK_ASSERT(acpi); /* Reschedule if the system is not fully up and running. */ if (!AcpiGbl_SystemAwakeAndRunning) { callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME); return; } sc->acpi_sleep_disabled = FALSE; } static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc) { ACPI_STATUS status; /* Fail if the system is not fully up and running. */ if (!AcpiGbl_SystemAwakeAndRunning) return (AE_ERROR); ACPI_LOCK(acpi); status = sc->acpi_sleep_disabled ? AE_ERROR : AE_OK; sc->acpi_sleep_disabled = TRUE; ACPI_UNLOCK(acpi); return (status); } enum acpi_sleep_state { ACPI_SS_NONE, ACPI_SS_GPE_SET, ACPI_SS_DEV_SUSPEND, ACPI_SS_SLP_PREP, ACPI_SS_SLEPT, }; /* * Enter the desired system sleep state. * * Currently we support S1-S5 but S4 is only S4BIOS */ static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state) { register_t intr; ACPI_STATUS status; ACPI_EVENT_STATUS power_button_status; enum acpi_sleep_state slp_state; int sleep_result; ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX) return_ACPI_STATUS (AE_BAD_PARAMETER); if (!acpi_sleep_states[state]) { device_printf(sc->acpi_dev, "Sleep state S%d not supported by BIOS\n", state); return (AE_SUPPORT); } /* Re-entry once we're suspending is not allowed. */ status = acpi_sleep_disable(sc); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "suspend request ignored (not ready yet)\n"); return (status); } if (state == ACPI_STATE_S5) { /* * Shut down cleanly and power off. This will call us back through the * shutdown handlers. */ shutdown_nice(RB_POWEROFF); return_ACPI_STATUS (AE_OK); } EVENTHANDLER_INVOKE(power_suspend_early); stop_all_proc(); suspend_all_fs(); EVENTHANDLER_INVOKE(power_suspend); #ifdef EARLY_AP_STARTUP MPASS(mp_ncpus == 1 || smp_started); thread_lock(curthread); sched_bind(curthread, 0); thread_unlock(curthread); #else if (smp_started) { thread_lock(curthread); sched_bind(curthread, 0); thread_unlock(curthread); } #endif /* * Be sure to hold Giant across DEVICE_SUSPEND/RESUME */ bus_topo_lock(); slp_state = ACPI_SS_NONE; sc->acpi_sstate = state; /* Enable any GPEs as appropriate and requested by the user. */ acpi_wake_prep_walk(state); slp_state = ACPI_SS_GPE_SET; /* * Inform all devices that we are going to sleep. If at least one * device fails, DEVICE_SUSPEND() automatically resumes the tree. * * XXX Note that a better two-pass approach with a 'veto' pass * followed by a "real thing" pass would be better, but the current * bus interface does not provide for this. */ if (DEVICE_SUSPEND(root_bus) != 0) { device_printf(sc->acpi_dev, "device_suspend failed\n"); goto backout; } slp_state = ACPI_SS_DEV_SUSPEND; status = AcpiEnterSleepStatePrep(state); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n", AcpiFormatException(status)); goto backout; } slp_state = ACPI_SS_SLP_PREP; if (sc->acpi_sleep_delay > 0) DELAY(sc->acpi_sleep_delay * 1000000); suspendclock(); intr = intr_disable(); if (state != ACPI_STATE_S1) { sleep_result = acpi_sleep_machdep(sc, state); acpi_wakeup_machdep(sc, state, sleep_result, 0); /* * XXX According to ACPI specification SCI_EN bit should be restored * by ACPI platform (BIOS, firmware) to its pre-sleep state. * Unfortunately some BIOSes fail to do that and that leads to * unexpected and serious consequences during wake up like a system * getting stuck in SMI handlers. * This hack is picked up from Linux, which claims that it follows * Windows behavior. */ if (sleep_result == 1 && state != ACPI_STATE_S4) AcpiWriteBitRegister(ACPI_BITREG_SCI_ENABLE, ACPI_ENABLE_EVENT); if (sleep_result == 1 && state == ACPI_STATE_S3) { /* * Prevent mis-interpretation of the wakeup by power button * as a request for power off. * Ideally we should post an appropriate wakeup event, * perhaps using acpi_event_power_button_wake or alike. * * Clearing of power button status after wakeup is mandated * by ACPI specification in section "Fixed Power Button". * * XXX As of ACPICA 20121114 AcpiGetEventStatus provides * status as 0/1 corressponding to inactive/active despite * its type being ACPI_EVENT_STATUS. In other words, * we should not test for ACPI_EVENT_FLAG_SET for time being. */ if (ACPI_SUCCESS(AcpiGetEventStatus(ACPI_EVENT_POWER_BUTTON, &power_button_status)) && power_button_status != 0) { AcpiClearEvent(ACPI_EVENT_POWER_BUTTON); device_printf(sc->acpi_dev, "cleared fixed power button status\n"); } } intr_restore(intr); /* call acpi_wakeup_machdep() again with interrupt enabled */ acpi_wakeup_machdep(sc, state, sleep_result, 1); AcpiLeaveSleepStatePrep(state); if (sleep_result == -1) goto backout; /* Re-enable ACPI hardware on wakeup from sleep state 4. */ if (state == ACPI_STATE_S4) AcpiEnable(); } else { status = AcpiEnterSleepState(state); intr_restore(intr); AcpiLeaveSleepStatePrep(state); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n", AcpiFormatException(status)); goto backout; } } slp_state = ACPI_SS_SLEPT; /* * Back out state according to how far along we got in the suspend * process. This handles both the error and success cases. */ backout: if (slp_state >= ACPI_SS_SLP_PREP) resumeclock(); if (slp_state >= ACPI_SS_GPE_SET) { acpi_wake_prep_walk(state); sc->acpi_sstate = ACPI_STATE_S0; } if (slp_state >= ACPI_SS_DEV_SUSPEND) DEVICE_RESUME(root_bus); if (slp_state >= ACPI_SS_SLP_PREP) AcpiLeaveSleepState(state); if (slp_state >= ACPI_SS_SLEPT) { #if defined(__i386__) || defined(__amd64__) /* NB: we are still using ACPI timecounter at this point. */ resume_TSC(); #endif acpi_resync_clock(sc); acpi_enable_fixed_events(sc); } sc->acpi_next_sstate = 0; bus_topo_unlock(); #ifdef EARLY_AP_STARTUP thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); #else if (smp_started) { thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); } #endif resume_all_fs(); resume_all_proc(); EVENTHANDLER_INVOKE(power_resume); /* Allow another sleep request after a while. */ callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME); /* Run /etc/rc.resume after we are back. */ if (devctl_process_running()) acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, state); return_ACPI_STATUS (status); } static void acpi_resync_clock(struct acpi_softc *sc) { /* * Warm up timecounter again and reset system clock. */ (void)timecounter->tc_get_timecount(timecounter); inittodr(time_second + sc->acpi_sleep_delay); } /* Enable or disable the device's wake GPE. */ int acpi_wake_set_enable(device_t dev, int enable) { struct acpi_prw_data prw; ACPI_STATUS status; int flags; /* Make sure the device supports waking the system and get the GPE. */ if (acpi_parse_prw(acpi_get_handle(dev), &prw) != 0) return (ENXIO); flags = acpi_get_flags(dev); if (enable) { status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_ENABLE); if (ACPI_FAILURE(status)) { device_printf(dev, "enable wake failed\n"); return (ENXIO); } acpi_set_flags(dev, flags | ACPI_FLAG_WAKE_ENABLED); } else { status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE); if (ACPI_FAILURE(status)) { device_printf(dev, "disable wake failed\n"); return (ENXIO); } acpi_set_flags(dev, flags & ~ACPI_FLAG_WAKE_ENABLED); } return (0); } static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate) { struct acpi_prw_data prw; device_t dev; /* Check that this is a wake-capable device and get its GPE. */ if (acpi_parse_prw(handle, &prw) != 0) return (ENXIO); dev = acpi_get_device(handle); /* * The destination sleep state must be less than (i.e., higher power) * or equal to the value specified by _PRW. If this GPE cannot be * enabled for the next sleep state, then disable it. If it can and * the user requested it be enabled, turn on any required power resources * and set _PSW. */ if (sstate > prw.lowest_wake) { AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE); if (bootverbose) device_printf(dev, "wake_prep disabled wake for %s (S%d)\n", acpi_name(handle), sstate); } else if (dev && (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) != 0) { acpi_pwr_wake_enable(handle, 1); acpi_SetInteger(handle, "_PSW", 1); if (bootverbose) device_printf(dev, "wake_prep enabled for %s (S%d)\n", acpi_name(handle), sstate); } return (0); } static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate) { struct acpi_prw_data prw; device_t dev; /* * Check that this is a wake-capable device and get its GPE. Return * now if the user didn't enable this device for wake. */ if (acpi_parse_prw(handle, &prw) != 0) return (ENXIO); dev = acpi_get_device(handle); if (dev == NULL || (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) == 0) return (0); /* * If this GPE couldn't be enabled for the previous sleep state, it was * disabled before going to sleep so re-enable it. If it was enabled, * clear _PSW and turn off any power resources it used. */ if (sstate > prw.lowest_wake) { AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_ENABLE); if (bootverbose) device_printf(dev, "run_prep re-enabled %s\n", acpi_name(handle)); } else { acpi_SetInteger(handle, "_PSW", 0); acpi_pwr_wake_enable(handle, 0); if (bootverbose) device_printf(dev, "run_prep cleaned up for %s\n", acpi_name(handle)); } return (0); } static ACPI_STATUS acpi_wake_prep(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { int sstate; /* If suspending, run the sleep prep function, otherwise wake. */ sstate = *(int *)context; if (AcpiGbl_SystemAwakeAndRunning) acpi_wake_sleep_prep(handle, sstate); else acpi_wake_run_prep(handle, sstate); return (AE_OK); } /* Walk the tree rooted at acpi0 to prep devices for suspend/resume. */ static int acpi_wake_prep_walk(int sstate) { ACPI_HANDLE sb_handle; if (ACPI_SUCCESS(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle))) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle, 100, acpi_wake_prep, NULL, &sstate, NULL); return (0); } /* Walk the tree rooted at acpi0 to attach per-device wake sysctls. */ static int acpi_wake_sysctl_walk(device_t dev) { int error, i, numdevs; device_t *devlist; device_t child; ACPI_STATUS status; error = device_get_children(dev, &devlist, &numdevs); if (error != 0 || numdevs == 0) { if (numdevs == 0) free(devlist, M_TEMP); return (error); } for (i = 0; i < numdevs; i++) { child = devlist[i]; acpi_wake_sysctl_walk(child); if (!device_is_attached(child)) continue; status = AcpiEvaluateObject(acpi_get_handle(child), "_PRW", NULL, NULL); if (ACPI_SUCCESS(status)) { SYSCTL_ADD_PROC(device_get_sysctl_ctx(child), SYSCTL_CHILDREN(device_get_sysctl_tree(child)), OID_AUTO, "wake", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, child, 0, acpi_wake_set_sysctl, "I", "Device set to wake the system"); } } free(devlist, M_TEMP); return (0); } /* Enable or disable wake from userland. */ static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS) { int enable, error; device_t dev; dev = (device_t)arg1; enable = (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) ? 1 : 0; error = sysctl_handle_int(oidp, &enable, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (enable != 0 && enable != 1) return (EINVAL); return (acpi_wake_set_enable(dev, enable)); } /* Parse a device's _PRW into a structure. */ int acpi_parse_prw(ACPI_HANDLE h, struct acpi_prw_data *prw) { ACPI_STATUS status; ACPI_BUFFER prw_buffer; ACPI_OBJECT *res, *res2; int error, i, power_count; if (h == NULL || prw == NULL) return (EINVAL); /* * The _PRW object (7.2.9) is only required for devices that have the * ability to wake the system from a sleeping state. */ error = EINVAL; prw_buffer.Pointer = NULL; prw_buffer.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObject(h, "_PRW", NULL, &prw_buffer); if (ACPI_FAILURE(status)) return (ENOENT); res = (ACPI_OBJECT *)prw_buffer.Pointer; if (res == NULL) return (ENOENT); if (!ACPI_PKG_VALID(res, 2)) goto out; /* * Element 1 of the _PRW object: * The lowest power system sleeping state that can be entered while still * providing wake functionality. The sleeping state being entered must * be less than (i.e., higher power) or equal to this value. */ if (acpi_PkgInt32(res, 1, &prw->lowest_wake) != 0) goto out; /* * Element 0 of the _PRW object: */ switch (res->Package.Elements[0].Type) { case ACPI_TYPE_INTEGER: /* * If the data type of this package element is numeric, then this * _PRW package element is the bit index in the GPEx_EN, in the * GPE blocks described in the FADT, of the enable bit that is * enabled for the wake event. */ prw->gpe_handle = NULL; prw->gpe_bit = res->Package.Elements[0].Integer.Value; error = 0; break; case ACPI_TYPE_PACKAGE: /* * If the data type of this package element is a package, then this * _PRW package element is itself a package containing two * elements. The first is an object reference to the GPE Block * device that contains the GPE that will be triggered by the wake * event. The second element is numeric and it contains the bit * index in the GPEx_EN, in the GPE Block referenced by the * first element in the package, of the enable bit that is enabled for * the wake event. * * For example, if this field is a package then it is of the form: * Package() {\_SB.PCI0.ISA.GPE, 2} */ res2 = &res->Package.Elements[0]; if (!ACPI_PKG_VALID(res2, 2)) goto out; prw->gpe_handle = acpi_GetReference(NULL, &res2->Package.Elements[0]); if (prw->gpe_handle == NULL) goto out; if (acpi_PkgInt32(res2, 1, &prw->gpe_bit) != 0) goto out; error = 0; break; default: goto out; } /* Elements 2 to N of the _PRW object are power resources. */ power_count = res->Package.Count - 2; if (power_count > ACPI_PRW_MAX_POWERRES) { printf("ACPI device %s has too many power resources\n", acpi_name(h)); power_count = 0; } prw->power_res_count = power_count; for (i = 0; i < power_count; i++) prw->power_res[i] = res->Package.Elements[i]; out: if (prw_buffer.Pointer != NULL) AcpiOsFree(prw_buffer.Pointer); return (error); } /* * ACPI Event Handlers */ /* System Event Handlers (registered by EVENTHANDLER_REGISTER) */ static void acpi_system_eventhandler_sleep(void *arg, int state) { struct acpi_softc *sc = (struct acpi_softc *)arg; int ret; ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); /* Check if button action is disabled or unknown. */ if (state == ACPI_STATE_UNKNOWN) return; /* Request that the system prepare to enter the given suspend state. */ ret = acpi_ReqSleepState(sc, state); if (ret != 0) device_printf(sc->acpi_dev, "request to enter state S%d failed (err %d)\n", state, ret); return_VOID; } static void acpi_system_eventhandler_wakeup(void *arg, int state) { ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); /* Currently, nothing to do for wakeup. */ return_VOID; } /* * ACPICA Event Handlers (FixedEvent, also called from button notify handler) */ static void acpi_invoke_sleep_eventhandler(void *context) { EVENTHANDLER_INVOKE(acpi_sleep_event, *(int *)context); } static void acpi_invoke_wake_eventhandler(void *context) { EVENTHANDLER_INVOKE(acpi_wakeup_event, *(int *)context); } UINT32 acpi_event_power_button_sleep(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_sleep_eventhandler, &sc->acpi_power_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_power_button_wake(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_wake_eventhandler, &sc->acpi_power_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_sleep_button_sleep(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_sleep_eventhandler, &sc->acpi_sleep_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_sleep_button_wake(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_wake_eventhandler, &sc->acpi_sleep_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } /* * XXX This static buffer is suboptimal. There is no locking so only * use this for single-threaded callers. */ char * acpi_name(ACPI_HANDLE handle) { ACPI_BUFFER buf; static char data[256]; buf.Length = sizeof(data); buf.Pointer = data; if (handle && ACPI_SUCCESS(AcpiGetName(handle, ACPI_FULL_PATHNAME, &buf))) return (data); return ("(unknown)"); } /* * Debugging/bug-avoidance. Avoid trying to fetch info on various * parts of the namespace. */ int acpi_avoid(ACPI_HANDLE handle) { char *cp, *env, *np; int len; np = acpi_name(handle); if (*np == '\\') np++; if ((env = kern_getenv("debug.acpi.avoid")) == NULL) return (0); /* Scan the avoid list checking for a match */ cp = env; for (;;) { while (*cp != 0 && isspace(*cp)) cp++; if (*cp == 0) break; len = 0; while (cp[len] != 0 && !isspace(cp[len])) len++; if (!strncmp(cp, np, len)) { freeenv(env); return(1); } cp += len; } freeenv(env); return (0); } /* * Debugging/bug-avoidance. Disable ACPI subsystem components. */ int acpi_disabled(char *subsys) { char *cp, *env; int len; if ((env = kern_getenv("debug.acpi.disabled")) == NULL) return (0); if (strcmp(env, "all") == 0) { freeenv(env); return (1); } /* Scan the disable list, checking for a match. */ cp = env; for (;;) { while (*cp != '\0' && isspace(*cp)) cp++; if (*cp == '\0') break; len = 0; while (cp[len] != '\0' && !isspace(cp[len])) len++; if (strncmp(cp, subsys, len) == 0) { freeenv(env); return (1); } cp += len; } freeenv(env); return (0); } static void acpi_lookup(void *arg, const char *name, device_t *dev) { ACPI_HANDLE handle; if (*dev != NULL) return; /* * Allow any handle name that is specified as an absolute path and * starts with '\'. We could restrict this to \_SB and friends, * but see acpi_probe_children() for notes on why we scan the entire * namespace for devices. * * XXX: The pathname argument to AcpiGetHandle() should be fixed to * be const. */ if (name[0] != '\\') return; if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, __DECONST(char *, name), &handle))) return; *dev = acpi_get_device(handle); } /* * Control interface. * * We multiplex ioctls for all participating ACPI devices here. Individual * drivers wanting to be accessible via /dev/acpi should use the * register/deregister interface to make their handlers visible. */ struct acpi_ioctl_hook { TAILQ_ENTRY(acpi_ioctl_hook) link; u_long cmd; acpi_ioctl_fn fn; void *arg; }; static TAILQ_HEAD(,acpi_ioctl_hook) acpi_ioctl_hooks; static int acpi_ioctl_hooks_initted; int acpi_register_ioctl(u_long cmd, acpi_ioctl_fn fn, void *arg) { struct acpi_ioctl_hook *hp; if ((hp = malloc(sizeof(*hp), M_ACPIDEV, M_NOWAIT)) == NULL) return (ENOMEM); hp->cmd = cmd; hp->fn = fn; hp->arg = arg; ACPI_LOCK(acpi); if (acpi_ioctl_hooks_initted == 0) { TAILQ_INIT(&acpi_ioctl_hooks); acpi_ioctl_hooks_initted = 1; } TAILQ_INSERT_TAIL(&acpi_ioctl_hooks, hp, link); ACPI_UNLOCK(acpi); return (0); } void acpi_deregister_ioctl(u_long cmd, acpi_ioctl_fn fn) { struct acpi_ioctl_hook *hp; ACPI_LOCK(acpi); TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) if (hp->cmd == cmd && hp->fn == fn) break; if (hp != NULL) { TAILQ_REMOVE(&acpi_ioctl_hooks, hp, link); free(hp, M_ACPIDEV); } ACPI_UNLOCK(acpi); } static int acpiopen(struct cdev *dev, int flag, int fmt, struct thread *td) { return (0); } static int acpiclose(struct cdev *dev, int flag, int fmt, struct thread *td) { return (0); } static int acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct acpi_softc *sc; struct acpi_ioctl_hook *hp; int error, state; error = 0; hp = NULL; sc = dev->si_drv1; /* * Scan the list of registered ioctls, looking for handlers. */ ACPI_LOCK(acpi); if (acpi_ioctl_hooks_initted) TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) { if (hp->cmd == cmd) break; } ACPI_UNLOCK(acpi); if (hp) return (hp->fn(cmd, addr, hp->arg)); /* * Core ioctls are not permitted for non-writable user. * Currently, other ioctls just fetch information. * Not changing system behavior. */ if ((flag & FWRITE) == 0) return (EPERM); /* Core system ioctls. */ switch (cmd) { case ACPIIO_REQSLPSTATE: state = *(int *)addr; if (state != ACPI_STATE_S5) return (acpi_ReqSleepState(sc, state)); device_printf(sc->acpi_dev, "power off via acpi ioctl not supported\n"); error = EOPNOTSUPP; break; case ACPIIO_ACKSLPSTATE: error = *(int *)addr; error = acpi_AckSleepState(sc->acpi_clone, error); break; case ACPIIO_SETSLPSTATE: /* DEPRECATED */ state = *(int *)addr; if (state < ACPI_STATE_S0 || state > ACPI_S_STATES_MAX) return (EINVAL); if (!acpi_sleep_states[state]) return (EOPNOTSUPP); if (ACPI_FAILURE(acpi_SetSleepState(sc, state))) error = ENXIO; break; default: error = ENXIO; break; } return (error); } static int acpi_sname2sstate(const char *sname) { int sstate; if (toupper(sname[0]) == 'S') { sstate = sname[1] - '0'; if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5 && sname[2] == '\0') return (sstate); } else if (strcasecmp(sname, "NONE") == 0) return (ACPI_STATE_UNKNOWN); return (-1); } static const char * acpi_sstate2sname(int sstate) { static const char *snames[] = { "S0", "S1", "S2", "S3", "S4", "S5" }; if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5) return (snames[sstate]); else if (sstate == ACPI_STATE_UNKNOWN) return ("NONE"); return (NULL); } static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS) { int error; struct sbuf sb; UINT8 state; sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++) if (acpi_sleep_states[state]) sbuf_printf(&sb, "%s ", acpi_sstate2sname(state)); sbuf_trim(&sb); sbuf_finish(&sb); error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); sbuf_delete(&sb); return (error); } static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS) { char sleep_state[10]; int error, new_state, old_state; old_state = *(int *)oidp->oid_arg1; strlcpy(sleep_state, acpi_sstate2sname(old_state), sizeof(sleep_state)); error = sysctl_handle_string(oidp, sleep_state, sizeof(sleep_state), req); if (error == 0 && req->newptr != NULL) { new_state = acpi_sname2sstate(sleep_state); if (new_state < ACPI_STATE_S1) return (EINVAL); if (new_state < ACPI_S_STATE_COUNT && !acpi_sleep_states[new_state]) return (EOPNOTSUPP); if (new_state != old_state) *(int *)oidp->oid_arg1 = new_state; } return (error); } /* Inform devctl(4) when we receive a Notify. */ void acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify) { char notify_buf[16]; ACPI_BUFFER handle_buf; ACPI_STATUS status; if (subsystem == NULL) return; handle_buf.Pointer = NULL; handle_buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiNsHandleToPathname(h, &handle_buf, FALSE); if (ACPI_FAILURE(status)) return; snprintf(notify_buf, sizeof(notify_buf), "notify=0x%02x", notify); devctl_notify("ACPI", subsystem, handle_buf.Pointer, notify_buf); AcpiOsFree(handle_buf.Pointer); } #ifdef ACPI_DEBUG /* * Support for parsing debug options from the kernel environment. * * Bits may be set in the AcpiDbgLayer and AcpiDbgLevel debug registers * by specifying the names of the bits in the debug.acpi.layer and * debug.acpi.level environment variables. Bits may be unset by * prefixing the bit name with !. */ struct debugtag { char *name; UINT32 value; }; static struct debugtag dbg_layer[] = { {"ACPI_UTILITIES", ACPI_UTILITIES}, {"ACPI_HARDWARE", ACPI_HARDWARE}, {"ACPI_EVENTS", ACPI_EVENTS}, {"ACPI_TABLES", ACPI_TABLES}, {"ACPI_NAMESPACE", ACPI_NAMESPACE}, {"ACPI_PARSER", ACPI_PARSER}, {"ACPI_DISPATCHER", ACPI_DISPATCHER}, {"ACPI_EXECUTER", ACPI_EXECUTER}, {"ACPI_RESOURCES", ACPI_RESOURCES}, {"ACPI_CA_DEBUGGER", ACPI_CA_DEBUGGER}, {"ACPI_OS_SERVICES", ACPI_OS_SERVICES}, {"ACPI_CA_DISASSEMBLER", ACPI_CA_DISASSEMBLER}, {"ACPI_ALL_COMPONENTS", ACPI_ALL_COMPONENTS}, {"ACPI_AC_ADAPTER", ACPI_AC_ADAPTER}, {"ACPI_BATTERY", ACPI_BATTERY}, {"ACPI_BUS", ACPI_BUS}, {"ACPI_BUTTON", ACPI_BUTTON}, {"ACPI_EC", ACPI_EC}, {"ACPI_FAN", ACPI_FAN}, {"ACPI_POWERRES", ACPI_POWERRES}, {"ACPI_PROCESSOR", ACPI_PROCESSOR}, {"ACPI_THERMAL", ACPI_THERMAL}, {"ACPI_TIMER", ACPI_TIMER}, {"ACPI_ALL_DRIVERS", ACPI_ALL_DRIVERS}, {NULL, 0} }; static struct debugtag dbg_level[] = { {"ACPI_LV_INIT", ACPI_LV_INIT}, {"ACPI_LV_DEBUG_OBJECT", ACPI_LV_DEBUG_OBJECT}, {"ACPI_LV_INFO", ACPI_LV_INFO}, {"ACPI_LV_REPAIR", ACPI_LV_REPAIR}, {"ACPI_LV_ALL_EXCEPTIONS", ACPI_LV_ALL_EXCEPTIONS}, /* Trace verbosity level 1 [Standard Trace Level] */ {"ACPI_LV_INIT_NAMES", ACPI_LV_INIT_NAMES}, {"ACPI_LV_PARSE", ACPI_LV_PARSE}, {"ACPI_LV_LOAD", ACPI_LV_LOAD}, {"ACPI_LV_DISPATCH", ACPI_LV_DISPATCH}, {"ACPI_LV_EXEC", ACPI_LV_EXEC}, {"ACPI_LV_NAMES", ACPI_LV_NAMES}, {"ACPI_LV_OPREGION", ACPI_LV_OPREGION}, {"ACPI_LV_BFIELD", ACPI_LV_BFIELD}, {"ACPI_LV_TABLES", ACPI_LV_TABLES}, {"ACPI_LV_VALUES", ACPI_LV_VALUES}, {"ACPI_LV_OBJECTS", ACPI_LV_OBJECTS}, {"ACPI_LV_RESOURCES", ACPI_LV_RESOURCES}, {"ACPI_LV_USER_REQUESTS", ACPI_LV_USER_REQUESTS}, {"ACPI_LV_PACKAGE", ACPI_LV_PACKAGE}, {"ACPI_LV_VERBOSITY1", ACPI_LV_VERBOSITY1}, /* Trace verbosity level 2 [Function tracing and memory allocation] */ {"ACPI_LV_ALLOCATIONS", ACPI_LV_ALLOCATIONS}, {"ACPI_LV_FUNCTIONS", ACPI_LV_FUNCTIONS}, {"ACPI_LV_OPTIMIZATIONS", ACPI_LV_OPTIMIZATIONS}, {"ACPI_LV_VERBOSITY2", ACPI_LV_VERBOSITY2}, {"ACPI_LV_ALL", ACPI_LV_ALL}, /* Trace verbosity level 3 [Threading, I/O, and Interrupts] */ {"ACPI_LV_MUTEX", ACPI_LV_MUTEX}, {"ACPI_LV_THREADS", ACPI_LV_THREADS}, {"ACPI_LV_IO", ACPI_LV_IO}, {"ACPI_LV_INTERRUPTS", ACPI_LV_INTERRUPTS}, {"ACPI_LV_VERBOSITY3", ACPI_LV_VERBOSITY3}, /* Exceptionally verbose output -- also used in the global "DebugLevel" */ {"ACPI_LV_AML_DISASSEMBLE", ACPI_LV_AML_DISASSEMBLE}, {"ACPI_LV_VERBOSE_INFO", ACPI_LV_VERBOSE_INFO}, {"ACPI_LV_FULL_TABLES", ACPI_LV_FULL_TABLES}, {"ACPI_LV_EVENTS", ACPI_LV_EVENTS}, {"ACPI_LV_VERBOSE", ACPI_LV_VERBOSE}, {NULL, 0} }; static void acpi_parse_debug(char *cp, struct debugtag *tag, UINT32 *flag) { char *ep; int i, l; int set; while (*cp) { if (isspace(*cp)) { cp++; continue; } ep = cp; while (*ep && !isspace(*ep)) ep++; if (*cp == '!') { set = 0; cp++; if (cp == ep) continue; } else { set = 1; } l = ep - cp; for (i = 0; tag[i].name != NULL; i++) { if (!strncmp(cp, tag[i].name, l)) { if (set) *flag |= tag[i].value; else *flag &= ~tag[i].value; } } cp = ep; } } static void acpi_set_debugging(void *junk) { char *layer, *level; if (cold) { AcpiDbgLayer = 0; AcpiDbgLevel = 0; } layer = kern_getenv("debug.acpi.layer"); level = kern_getenv("debug.acpi.level"); if (layer == NULL && level == NULL) return; printf("ACPI set debug"); if (layer != NULL) { if (strcmp("NONE", layer) != 0) printf(" layer '%s'", layer); acpi_parse_debug(layer, &dbg_layer[0], &AcpiDbgLayer); freeenv(layer); } if (level != NULL) { if (strcmp("NONE", level) != 0) printf(" level '%s'", level); acpi_parse_debug(level, &dbg_level[0], &AcpiDbgLevel); freeenv(level); } printf("\n"); } SYSINIT(acpi_debugging, SI_SUB_TUNABLES, SI_ORDER_ANY, acpi_set_debugging, NULL); static int acpi_debug_sysctl(SYSCTL_HANDLER_ARGS) { int error, *dbg; struct debugtag *tag; struct sbuf sb; char temp[128]; if (sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND) == NULL) return (ENOMEM); if (strcmp(oidp->oid_arg1, "debug.acpi.layer") == 0) { tag = &dbg_layer[0]; dbg = &AcpiDbgLayer; } else { tag = &dbg_level[0]; dbg = &AcpiDbgLevel; } /* Get old values if this is a get request. */ ACPI_SERIAL_BEGIN(acpi); if (*dbg == 0) { sbuf_cpy(&sb, "NONE"); } else if (req->newptr == NULL) { for (; tag->name != NULL; tag++) { if ((*dbg & tag->value) == tag->value) sbuf_printf(&sb, "%s ", tag->name); } } sbuf_trim(&sb); sbuf_finish(&sb); strlcpy(temp, sbuf_data(&sb), sizeof(temp)); sbuf_delete(&sb); error = sysctl_handle_string(oidp, temp, sizeof(temp), req); /* Check for error or no change */ if (error == 0 && req->newptr != NULL) { *dbg = 0; kern_setenv((char *)oidp->oid_arg1, temp); acpi_set_debugging(NULL); } ACPI_SERIAL_END(acpi); return (error); } SYSCTL_PROC(_debug_acpi, OID_AUTO, layer, CTLFLAG_RW | CTLTYPE_STRING | CTLFLAG_MPSAFE, "debug.acpi.layer", 0, acpi_debug_sysctl, "A", ""); SYSCTL_PROC(_debug_acpi, OID_AUTO, level, CTLFLAG_RW | CTLTYPE_STRING | CTLFLAG_MPSAFE, "debug.acpi.level", 0, acpi_debug_sysctl, "A", ""); #endif /* ACPI_DEBUG */ static int acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS) { int error; int old; old = acpi_debug_objects; error = sysctl_handle_int(oidp, &acpi_debug_objects, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (old == acpi_debug_objects || (old && acpi_debug_objects)) return (0); ACPI_SERIAL_BEGIN(acpi); AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE; ACPI_SERIAL_END(acpi); return (0); } static int acpi_parse_interfaces(char *str, struct acpi_interface *iface) { char *p; size_t len; int i, j; p = str; while (isspace(*p) || *p == ',') p++; len = strlen(p); if (len == 0) return (0); p = strdup(p, M_TEMP); for (i = 0; i < len; i++) if (p[i] == ',') p[i] = '\0'; i = j = 0; while (i < len) if (isspace(p[i]) || p[i] == '\0') i++; else { i += strlen(p + i) + 1; j++; } if (j == 0) { free(p, M_TEMP); return (0); } iface->data = malloc(sizeof(*iface->data) * j, M_TEMP, M_WAITOK); iface->num = j; i = j = 0; while (i < len) if (isspace(p[i]) || p[i] == '\0') i++; else { iface->data[j] = p + i; i += strlen(p + i) + 1; j++; } return (j); } static void acpi_free_interfaces(struct acpi_interface *iface) { free(iface->data[0], M_TEMP); free(iface->data, M_TEMP); } static void acpi_reset_interfaces(device_t dev) { struct acpi_interface list; ACPI_STATUS status; int i; if (acpi_parse_interfaces(acpi_install_interface, &list) > 0) { for (i = 0; i < list.num; i++) { status = AcpiInstallInterface(list.data[i]); if (ACPI_FAILURE(status)) device_printf(dev, "failed to install _OSI(\"%s\"): %s\n", list.data[i], AcpiFormatException(status)); else if (bootverbose) device_printf(dev, "installed _OSI(\"%s\")\n", list.data[i]); } acpi_free_interfaces(&list); } if (acpi_parse_interfaces(acpi_remove_interface, &list) > 0) { for (i = 0; i < list.num; i++) { status = AcpiRemoveInterface(list.data[i]); if (ACPI_FAILURE(status)) device_printf(dev, "failed to remove _OSI(\"%s\"): %s\n", list.data[i], AcpiFormatException(status)); else if (bootverbose) device_printf(dev, "removed _OSI(\"%s\")\n", list.data[i]); } acpi_free_interfaces(&list); } } static int acpi_pm_func(u_long cmd, void *arg, ...) { int state, acpi_state; int error; struct acpi_softc *sc; va_list ap; error = 0; switch (cmd) { case POWER_CMD_SUSPEND: sc = (struct acpi_softc *)arg; if (sc == NULL) { error = EINVAL; goto out; } va_start(ap, arg); state = va_arg(ap, int); va_end(ap); switch (state) { case POWER_SLEEP_STATE_STANDBY: acpi_state = sc->acpi_standby_sx; break; case POWER_SLEEP_STATE_SUSPEND: acpi_state = sc->acpi_suspend_sx; break; case POWER_SLEEP_STATE_HIBERNATE: acpi_state = ACPI_STATE_S4; break; default: error = EINVAL; goto out; } if (ACPI_FAILURE(acpi_EnterSleepState(sc, acpi_state))) error = ENXIO; break; default: error = EINVAL; goto out; } out: return (error); } static void acpi_pm_register(void *arg) { if (!cold || resource_disabled("acpi", 0)) return; power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL); } SYSINIT(power, SI_SUB_KLD, SI_ORDER_ANY, acpi_pm_register, NULL); diff --git a/sys/dev/acpica/acpi_pcib_acpi.c b/sys/dev/acpica/acpi_pcib_acpi.c index 235670076dae..fdf8e84d14e0 100644 --- a/sys/dev/acpica/acpi_pcib_acpi.c +++ b/sys/dev/acpica/acpi_pcib_acpi.c @@ -1,832 +1,829 @@ /*- * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_acpi.h" #include "opt_pci.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include /* Hooks for the ACPI CA debugging infrastructure. */ #define _COMPONENT ACPI_BUS ACPI_MODULE_NAME("PCI_ACPI") struct acpi_hpcib_softc { device_t ap_dev; ACPI_HANDLE ap_handle; bus_dma_tag_t ap_dma_tag; int ap_flags; uint32_t ap_osc_ctl; int ap_segment; /* PCI domain */ int ap_bus; /* bios-assigned bus number */ int ap_addr; /* device/func of PCI-Host bridge */ ACPI_BUFFER ap_prt; /* interrupt routing table */ #ifdef NEW_PCIB struct pcib_host_resources ap_host_res; #endif }; static int acpi_pcib_acpi_probe(device_t bus); static int acpi_pcib_acpi_attach(device_t bus); static int acpi_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); static int acpi_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value); static uint32_t acpi_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes); static void acpi_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes); static int acpi_pcib_acpi_route_interrupt(device_t pcib, device_t dev, int pin); static int acpi_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs); static int acpi_pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data); static int acpi_pcib_alloc_msix(device_t pcib, device_t dev, int *irq); static struct resource *acpi_pcib_acpi_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); #ifdef NEW_PCIB static int acpi_pcib_acpi_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end); #ifdef PCI_RES_BUS static int acpi_pcib_acpi_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r); static int acpi_pcib_acpi_activate_resource(device_t dev, - device_t child, int type, int rid, - struct resource *r); + device_t child, struct resource *r); static int acpi_pcib_acpi_deactivate_resource(device_t dev, - device_t child, int type, int rid, - struct resource *r); + device_t child, struct resource *r); #endif #endif static int acpi_pcib_request_feature(device_t pcib, device_t dev, enum pci_feature feature); static bus_dma_tag_t acpi_pcib_get_dma_tag(device_t bus, device_t child); static device_method_t acpi_pcib_acpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_pcib_acpi_probe), DEVMETHOD(device_attach, acpi_pcib_acpi_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, acpi_pcib_read_ivar), DEVMETHOD(bus_write_ivar, acpi_pcib_write_ivar), DEVMETHOD(bus_alloc_resource, acpi_pcib_acpi_alloc_resource), #ifdef NEW_PCIB DEVMETHOD(bus_adjust_resource, acpi_pcib_acpi_adjust_resource), #else DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), #endif #if defined(NEW_PCIB) && defined(PCI_RES_BUS) DEVMETHOD(bus_release_resource, acpi_pcib_acpi_release_resource), DEVMETHOD(bus_activate_resource, acpi_pcib_acpi_activate_resource), DEVMETHOD(bus_deactivate_resource, acpi_pcib_acpi_deactivate_resource), #else DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), #endif DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_cpus, acpi_pcib_get_cpus), DEVMETHOD(bus_get_dma_tag, acpi_pcib_get_dma_tag), /* pcib interface */ DEVMETHOD(pcib_maxslots, pcib_maxslots), DEVMETHOD(pcib_read_config, acpi_pcib_read_config), DEVMETHOD(pcib_write_config, acpi_pcib_write_config), DEVMETHOD(pcib_route_interrupt, acpi_pcib_acpi_route_interrupt), DEVMETHOD(pcib_alloc_msi, acpi_pcib_alloc_msi), DEVMETHOD(pcib_release_msi, pcib_release_msi), DEVMETHOD(pcib_alloc_msix, acpi_pcib_alloc_msix), DEVMETHOD(pcib_release_msix, pcib_release_msix), DEVMETHOD(pcib_map_msi, acpi_pcib_map_msi), DEVMETHOD(pcib_power_for_sleep, acpi_pcib_power_for_sleep), DEVMETHOD(pcib_request_feature, acpi_pcib_request_feature), DEVMETHOD_END }; DEFINE_CLASS_0(pcib, acpi_pcib_acpi_driver, acpi_pcib_acpi_methods, sizeof(struct acpi_hpcib_softc)); DRIVER_MODULE(acpi_pcib, acpi, acpi_pcib_acpi_driver, 0, 0); MODULE_DEPEND(acpi_pcib, acpi, 1, 1, 1); static int acpi_pcib_acpi_probe(device_t dev) { ACPI_DEVICE_INFO *devinfo; ACPI_HANDLE h; int root; if (acpi_disabled("pcib") || (h = acpi_get_handle(dev)) == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return (ENXIO); root = (devinfo->Flags & ACPI_PCI_ROOT_BRIDGE) != 0; AcpiOsFree(devinfo); if (!root || pci_cfgregopen() == 0) return (ENXIO); device_set_desc(dev, "ACPI Host-PCI bridge"); return (0); } #ifdef NEW_PCIB static ACPI_STATUS acpi_pcib_producer_handler(ACPI_RESOURCE *res, void *context) { struct acpi_hpcib_softc *sc; UINT64 length, min, max; u_int flags; int error, type; sc = context; switch (res->Type) { case ACPI_RESOURCE_TYPE_START_DEPENDENT: case ACPI_RESOURCE_TYPE_END_DEPENDENT: panic("host bridge has depenedent resources"); case ACPI_RESOURCE_TYPE_ADDRESS16: case ACPI_RESOURCE_TYPE_ADDRESS32: case ACPI_RESOURCE_TYPE_ADDRESS64: case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: if (res->Data.Address.ProducerConsumer != ACPI_PRODUCER) break; switch (res->Type) { case ACPI_RESOURCE_TYPE_ADDRESS16: min = res->Data.Address16.Address.Minimum; max = res->Data.Address16.Address.Maximum; length = res->Data.Address16.Address.AddressLength; break; case ACPI_RESOURCE_TYPE_ADDRESS32: min = res->Data.Address32.Address.Minimum; max = res->Data.Address32.Address.Maximum; length = res->Data.Address32.Address.AddressLength; break; case ACPI_RESOURCE_TYPE_ADDRESS64: min = res->Data.Address64.Address.Minimum; max = res->Data.Address64.Address.Maximum; length = res->Data.Address64.Address.AddressLength; break; default: KASSERT(res->Type == ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64, ("should never happen")); min = res->Data.ExtAddress64.Address.Minimum; max = res->Data.ExtAddress64.Address.Maximum; length = res->Data.ExtAddress64.Address.AddressLength; break; } if (length == 0) break; if (min + length - 1 != max && (res->Data.Address.MinAddressFixed != ACPI_ADDRESS_FIXED || res->Data.Address.MaxAddressFixed != ACPI_ADDRESS_FIXED)) break; flags = 0; switch (res->Data.Address.ResourceType) { case ACPI_MEMORY_RANGE: type = SYS_RES_MEMORY; if (res->Type != ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64) { if (res->Data.Address.Info.Mem.Caching == ACPI_PREFETCHABLE_MEMORY) flags |= RF_PREFETCHABLE; } else { /* * XXX: Parse prefetch flag out of * TypeSpecific. */ } break; case ACPI_IO_RANGE: type = SYS_RES_IOPORT; break; #ifdef PCI_RES_BUS case ACPI_BUS_NUMBER_RANGE: type = PCI_RES_BUS; break; #endif default: return (AE_OK); } if (min + length - 1 != max) device_printf(sc->ap_dev, "Length mismatch for %d range: %jx vs %jx\n", type, (uintmax_t)(max - min + 1), (uintmax_t)length); #ifdef __i386__ if (min > ULONG_MAX) { device_printf(sc->ap_dev, "Ignoring %d range above 4GB (%#jx-%#jx)\n", type, (uintmax_t)min, (uintmax_t)max); break; } if (max > ULONG_MAX) { device_printf(sc->ap_dev, "Truncating end of %d range above 4GB (%#jx-%#jx)\n", type, (uintmax_t)min, (uintmax_t)max); max = ULONG_MAX; } #endif error = pcib_host_res_decodes(&sc->ap_host_res, type, min, max, flags); if (error) panic("Failed to manage %d range (%#jx-%#jx): %d", type, (uintmax_t)min, (uintmax_t)max, error); break; default: break; } return (AE_OK); } #endif #if defined(NEW_PCIB) && defined(PCI_RES_BUS) static bool get_decoded_bus_range(struct acpi_hpcib_softc *sc, rman_res_t *startp, rman_res_t *endp) { struct resource_list_entry *rle; rle = resource_list_find(&sc->ap_host_res.hr_rl, PCI_RES_BUS, 0); if (rle == NULL) return (false); *startp = rle->start; *endp = rle->end; return (true); } #endif static int acpi_pcib_osc(struct acpi_hpcib_softc *sc, uint32_t osc_ctl) { ACPI_STATUS status; uint32_t cap_set[3]; static uint8_t pci_host_bridge_uuid[ACPI_UUID_LENGTH] = { 0x5b, 0x4d, 0xdb, 0x33, 0xf7, 0x1f, 0x1c, 0x40, 0x96, 0x57, 0x74, 0x41, 0xc0, 0x3d, 0xd7, 0x66 }; /* * Don't invoke _OSC if a control is already granted. * However, always invoke _OSC during attach when 0 is passed. */ if (osc_ctl != 0 && (sc->ap_osc_ctl & osc_ctl) == osc_ctl) return (0); /* Support Field: Extended PCI Config Space, PCI Segment Groups, MSI */ cap_set[PCI_OSC_SUPPORT] = PCIM_OSC_SUPPORT_EXT_PCI_CONF | PCIM_OSC_SUPPORT_SEG_GROUP | PCIM_OSC_SUPPORT_MSI; /* Active State Power Management, Clock Power Management Capability */ if (pci_enable_aspm) cap_set[PCI_OSC_SUPPORT] |= PCIM_OSC_SUPPORT_ASPM | PCIM_OSC_SUPPORT_CPMC; /* Control Field */ cap_set[PCI_OSC_CTL] = sc->ap_osc_ctl | osc_ctl; status = acpi_EvaluateOSC(sc->ap_handle, pci_host_bridge_uuid, 1, nitems(cap_set), cap_set, cap_set, false); if (ACPI_FAILURE(status)) { if (status == AE_NOT_FOUND) { sc->ap_osc_ctl |= osc_ctl; return (0); } device_printf(sc->ap_dev, "_OSC failed: %s\n", AcpiFormatException(status)); return (EIO); } /* * _OSC may return an error in the status word, but will * update the control mask always. _OSC should not revoke * previously-granted controls. */ if ((cap_set[PCI_OSC_CTL] & sc->ap_osc_ctl) != sc->ap_osc_ctl) device_printf(sc->ap_dev, "_OSC revoked %#x\n", (cap_set[PCI_OSC_CTL] & sc->ap_osc_ctl) ^ sc->ap_osc_ctl); sc->ap_osc_ctl = cap_set[PCI_OSC_CTL]; if ((sc->ap_osc_ctl & osc_ctl) != osc_ctl) return (EIO); return (0); } static int acpi_pcib_acpi_attach(device_t dev) { struct acpi_hpcib_softc *sc; ACPI_STATUS status; static int bus0_seen = 0; u_int slot, func, busok; #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct resource *bus_res; rman_res_t end, start; int rid; #endif int error, domain; uint8_t busno; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = device_get_softc(dev); sc->ap_dev = dev; sc->ap_handle = acpi_get_handle(dev); /* * Don't attach if we're not really there. */ if (!acpi_DeviceIsPresent(dev)) return (ENXIO); acpi_pcib_osc(sc, 0); /* * Get our segment number by evaluating _SEG. * It's OK for this to not exist. */ status = acpi_GetInteger(sc->ap_handle, "_SEG", &sc->ap_segment); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) { device_printf(dev, "could not evaluate _SEG - %s\n", AcpiFormatException(status)); return_VALUE (ENXIO); } /* If it's not found, assume 0. */ sc->ap_segment = 0; } /* * Get the address (device and function) of the associated * PCI-Host bridge device from _ADR. Assume we don't have one if * it doesn't exist. */ status = acpi_GetInteger(sc->ap_handle, "_ADR", &sc->ap_addr); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) device_printf(dev, "could not evaluate _ADR - %s\n", AcpiFormatException(status)); sc->ap_addr = -1; } #ifdef NEW_PCIB /* * Determine which address ranges this bridge decodes and setup * resource managers for those ranges. */ if (pcib_host_res_init(sc->ap_dev, &sc->ap_host_res) != 0) panic("failed to init hostb resources"); if (!acpi_disabled("hostres")) { status = AcpiWalkResources(sc->ap_handle, "_CRS", acpi_pcib_producer_handler, sc); if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) device_printf(sc->ap_dev, "failed to parse resources: %s\n", AcpiFormatException(status)); } #endif /* * Get our base bus number by evaluating _BBN. * If this doesn't work, we assume we're bus number 0. * * XXX note that it may also not exist in the case where we are * meant to use a private configuration space mechanism for this bus, * so we should dig out our resources and check to see if we have * anything like that. How do we do this? * XXX If we have the requisite information, and if we don't think the * default PCI configuration space handlers can deal with this bus, * we should attach our own handler. * XXX invoke _REG on this for the PCI config space address space? * XXX It seems many BIOS's with multiple Host-PCI bridges do not set * _BBN correctly. They set _BBN to zero for all bridges. Thus, * if _BBN is zero and PCI bus 0 already exists, we try to read our * bus number from the configuration registers at address _ADR. * We only do this for domain/segment 0 in the hopes that this is * only needed for old single-domain machines. */ status = acpi_GetInteger(sc->ap_handle, "_BBN", &sc->ap_bus); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) { device_printf(dev, "could not evaluate _BBN - %s\n", AcpiFormatException(status)); return (ENXIO); } else { /* If it's not found, assume 0. */ sc->ap_bus = 0; } } /* * If this is segment 0, the bus is zero, and PCI bus 0 already * exists, read the bus number via PCI config space. */ busok = 1; if (sc->ap_segment == 0 && sc->ap_bus == 0 && bus0_seen) { busok = 0; if (sc->ap_addr != -1) { /* XXX: We assume bus 0. */ slot = ACPI_ADR_PCI_SLOT(sc->ap_addr); func = ACPI_ADR_PCI_FUNC(sc->ap_addr); if (bootverbose) device_printf(dev, "reading config registers from 0:%d:%d\n", slot, func); if (host_pcib_get_busno(pci_cfgregread, 0, slot, func, &busno) == 0) device_printf(dev, "couldn't read bus number from cfg space\n"); else { sc->ap_bus = busno; busok = 1; } } } #if defined(NEW_PCIB) && defined(PCI_RES_BUS) /* * If nothing else worked, hope that ACPI at least lays out the * Host-PCI bridges in order and that as a result the next free * bus number is our bus number. */ if (busok == 0) { /* * If we have a region of bus numbers, use the first * number for our bus. */ if (get_decoded_bus_range(sc, &start, &end)) sc->ap_bus = start; else { rid = 0; bus_res = pci_domain_alloc_bus(sc->ap_segment, dev, &rid, 0, PCI_BUSMAX, 1, 0); if (bus_res == NULL) { device_printf(dev, "could not allocate bus number\n"); pcib_host_res_free(dev, &sc->ap_host_res); return (ENXIO); } sc->ap_bus = rman_get_start(bus_res); pci_domain_release_bus(sc->ap_segment, dev, rid, bus_res); } } else { /* * If there is a decoded bus range, assume the bus number is * the first value in the range. Warn if _BBN doesn't match. */ if (get_decoded_bus_range(sc, &start, &end)) { if (sc->ap_bus != start) { device_printf(dev, "WARNING: BIOS configured bus number (%d) is " "not within decoded bus number range " "(%ju - %ju).\n", sc->ap_bus, (uintmax_t)start, (uintmax_t)end); device_printf(dev, "Using range start (%ju) as bus number.\n", (uintmax_t)start); sc->ap_bus = start; } } } #else /* * If nothing else worked, hope that ACPI at least lays out the * host-PCI bridges in order and that as a result our unit number * is actually our bus number. There are several reasons this * might not be true. */ if (busok == 0) { sc->ap_bus = device_get_unit(dev); device_printf(dev, "trying bus number %d\n", sc->ap_bus); } #endif /* If this is bus 0 on segment 0, note that it has been seen already. */ if (sc->ap_segment == 0 && sc->ap_bus == 0) bus0_seen = 1; acpi_pcib_fetch_prt(dev, &sc->ap_prt); error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->ap_dma_tag); if (error != 0) goto errout; error = bus_get_domain(dev, &domain); if (error == 0) error = bus_dma_tag_set_domain(sc->ap_dma_tag, domain); /* Don't fail to attach if the domain can't be queried or set. */ error = 0; bus_generic_probe(dev); if (device_add_child(dev, "pci", -1) == NULL) { bus_dma_tag_destroy(sc->ap_dma_tag); sc->ap_dma_tag = NULL; error = ENXIO; goto errout; } return (bus_generic_attach(dev)); errout: device_printf(device_get_parent(dev), "couldn't attach pci bus\n"); #if defined(NEW_PCIB) && defined(PCI_RES_BUS) pcib_host_res_free(dev, &sc->ap_host_res); #endif return (error); } /* * Support for standard PCI bridge ivars. */ static int acpi_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct acpi_hpcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = sc->ap_segment; return (0); case PCIB_IVAR_BUS: *result = sc->ap_bus; return (0); case ACPI_IVAR_HANDLE: *result = (uintptr_t)sc->ap_handle; return (0); case ACPI_IVAR_FLAGS: *result = (uintptr_t)sc->ap_flags; return (0); } return (ENOENT); } static int acpi_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct acpi_hpcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: return (EINVAL); case PCIB_IVAR_BUS: sc->ap_bus = value; return (0); case ACPI_IVAR_HANDLE: sc->ap_handle = (ACPI_HANDLE)value; return (0); case ACPI_IVAR_FLAGS: sc->ap_flags = (int)value; return (0); } return (ENOENT); } static uint32_t acpi_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct acpi_hpcib_softc *sc = device_get_softc(dev); return (pci_cfgregread(sc->ap_segment, bus, slot, func, reg, bytes)); } static void acpi_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { struct acpi_hpcib_softc *sc = device_get_softc(dev); pci_cfgregwrite(sc->ap_segment, bus, slot, func, reg, data, bytes); } static int acpi_pcib_acpi_route_interrupt(device_t pcib, device_t dev, int pin) { struct acpi_hpcib_softc *sc = device_get_softc(pcib); return (acpi_pcib_route_interrupt(pcib, dev, pin, &sc->ap_prt)); } static int acpi_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs) { device_t bus; bus = device_get_parent(pcib); return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount, irqs)); } static int acpi_pcib_alloc_msix(device_t pcib, device_t dev, int *irq) { device_t bus; bus = device_get_parent(pcib); return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq)); } static int acpi_pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data) { struct acpi_hpcib_softc *sc; device_t bus, hostb; int error; bus = device_get_parent(pcib); error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data); if (error) return (error); sc = device_get_softc(pcib); if (sc->ap_addr == -1) return (0); /* XXX: Assumes all bridges are on bus 0. */ hostb = pci_find_dbsf(sc->ap_segment, 0, ACPI_ADR_PCI_SLOT(sc->ap_addr), ACPI_ADR_PCI_FUNC(sc->ap_addr)); if (hostb != NULL) pci_ht_map_msi(hostb, *addr); return (0); } struct resource * acpi_pcib_acpi_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #ifdef NEW_PCIB struct acpi_hpcib_softc *sc; struct resource *res; #endif #if defined(__i386__) || defined(__amd64__) start = hostb_alloc_start(type, start, end, count); #endif #ifdef NEW_PCIB sc = device_get_softc(dev); #ifdef PCI_RES_BUS if (type == PCI_RES_BUS) return (pci_domain_alloc_bus(sc->ap_segment, child, rid, start, end, count, flags)); #endif res = pcib_host_res_alloc(&sc->ap_host_res, child, type, rid, start, end, count, flags); /* * XXX: If this is a request for a specific range, assume it is * correct and pass it up to the parent. What we probably want to * do long-term is explicitly trust any firmware-configured * resources during the initial bus scan on boot and then disable * this after that. */ if (res == NULL && start + count - 1 == end) res = bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags); return (res); #else return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); #endif } #ifdef NEW_PCIB int acpi_pcib_acpi_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct acpi_hpcib_softc *sc; sc = device_get_softc(dev); #ifdef PCI_RES_BUS if (rman_get_type(r) == PCI_RES_BUS) return (pci_domain_adjust_bus(sc->ap_segment, child, r, start, end)); #endif return (pcib_host_res_adjust(&sc->ap_host_res, child, r, start, end)); } #ifdef PCI_RES_BUS int acpi_pcib_acpi_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct acpi_hpcib_softc *sc; sc = device_get_softc(dev); if (type == PCI_RES_BUS) return (pci_domain_release_bus(sc->ap_segment, child, rid, r)); return (bus_generic_release_resource(dev, child, type, rid, r)); } int -acpi_pcib_acpi_activate_resource(device_t dev, device_t child, int type, int rid, +acpi_pcib_acpi_activate_resource(device_t dev, device_t child, struct resource *r) { struct acpi_hpcib_softc *sc; sc = device_get_softc(dev); - if (type == PCI_RES_BUS) - return (pci_domain_activate_bus(sc->ap_segment, child, rid, r)); - return (bus_generic_activate_resource(dev, child, type, rid, r)); + if (rman_get_type(r) == PCI_RES_BUS) + return (pci_domain_activate_bus(sc->ap_segment, child, r)); + return (bus_generic_activate_resource(dev, child, r)); } int -acpi_pcib_acpi_deactivate_resource(device_t dev, device_t child, int type, - int rid, struct resource *r) +acpi_pcib_acpi_deactivate_resource(device_t dev, device_t child, + struct resource *r) { struct acpi_hpcib_softc *sc; sc = device_get_softc(dev); - if (type == PCI_RES_BUS) - return (pci_domain_deactivate_bus(sc->ap_segment, child, rid, - r)); - return (bus_generic_deactivate_resource(dev, child, type, rid, r)); + if (rman_get_type(r) == PCI_RES_BUS) + return (pci_domain_deactivate_bus(sc->ap_segment, child, r)); + return (bus_generic_deactivate_resource(dev, child, r)); } #endif #endif static int acpi_pcib_request_feature(device_t pcib, device_t dev, enum pci_feature feature) { uint32_t osc_ctl; struct acpi_hpcib_softc *sc; sc = device_get_softc(pcib); switch (feature) { case PCI_FEATURE_HP: osc_ctl = PCIM_OSC_CTL_PCIE_HP; break; case PCI_FEATURE_AER: osc_ctl = PCIM_OSC_CTL_PCIE_AER; break; default: return (EINVAL); } return (acpi_pcib_osc(sc, osc_ctl)); } static bus_dma_tag_t acpi_pcib_get_dma_tag(device_t bus, device_t child) { struct acpi_hpcib_softc *sc; sc = device_get_softc(bus); return (sc->ap_dma_tag); } diff --git a/sys/dev/agp/agp_i810.c b/sys/dev/agp/agp_i810.c index 6ed04297f6c8..a83189ca7bc8 100644 --- a/sys/dev/agp/agp_i810.c +++ b/sys/dev/agp/agp_i810.c @@ -1,2372 +1,2372 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2000 Doug Rabson * Copyright (c) 2000 Ruslan Ermilov * Copyright (c) 2011 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Fixes for 830/845G support: David Dawes * 852GM/855GM/865G support added by David Dawes * * This is generic Intel GTT handling code, morphed from the AGP * bridge code. */ #include #if 0 #define KTR_AGP_I810 KTR_DEV #else #define KTR_AGP_I810 0 #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MALLOC_DECLARE(M_AGP); struct agp_i810_match; static int agp_i810_check_active(device_t bridge_dev); static int agp_i830_check_active(device_t bridge_dev); static int agp_i915_check_active(device_t bridge_dev); static void agp_82852_set_desc(device_t dev, const struct agp_i810_match *match); static void agp_i810_set_desc(device_t dev, const struct agp_i810_match *match); static void agp_i810_dump_regs(device_t dev); static void agp_i830_dump_regs(device_t dev); static void agp_i855_dump_regs(device_t dev); static void agp_i915_dump_regs(device_t dev); static void agp_i965_dump_regs(device_t dev); static int agp_i810_get_stolen_size(device_t dev); static int agp_i830_get_stolen_size(device_t dev); static int agp_i915_get_stolen_size(device_t dev); static int agp_i810_get_gtt_mappable_entries(device_t dev); static int agp_i830_get_gtt_mappable_entries(device_t dev); static int agp_i915_get_gtt_mappable_entries(device_t dev); static int agp_i810_get_gtt_total_entries(device_t dev); static int agp_i965_get_gtt_total_entries(device_t dev); static int agp_gen5_get_gtt_total_entries(device_t dev); static int agp_i810_install_gatt(device_t dev); static int agp_i830_install_gatt(device_t dev); static int agp_i965_install_gatt(device_t dev); static int agp_g4x_install_gatt(device_t dev); static void agp_i810_deinstall_gatt(device_t dev); static void agp_i830_deinstall_gatt(device_t dev); static void agp_i810_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags); static void agp_i830_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags); static void agp_i915_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags); static void agp_i965_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags); static void agp_g4x_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags); static void agp_i810_write_gtt(device_t dev, u_int index, uint32_t pte); static void agp_i915_write_gtt(device_t dev, u_int index, uint32_t pte); static void agp_i965_write_gtt(device_t dev, u_int index, uint32_t pte); static void agp_g4x_write_gtt(device_t dev, u_int index, uint32_t pte); static u_int32_t agp_i810_read_gtt_pte(device_t dev, u_int index); static u_int32_t agp_i915_read_gtt_pte(device_t dev, u_int index); static u_int32_t agp_i965_read_gtt_pte(device_t dev, u_int index); static u_int32_t agp_g4x_read_gtt_pte(device_t dev, u_int index); static vm_paddr_t agp_i810_read_gtt_pte_paddr(device_t dev, u_int index); static vm_paddr_t agp_i915_read_gtt_pte_paddr(device_t dev, u_int index); static int agp_i810_set_aperture(device_t dev, u_int32_t aperture); static int agp_i830_set_aperture(device_t dev, u_int32_t aperture); static int agp_i915_set_aperture(device_t dev, u_int32_t aperture); static int agp_i810_chipset_flush_setup(device_t dev); static int agp_i915_chipset_flush_setup(device_t dev); static int agp_i965_chipset_flush_setup(device_t dev); static void agp_i810_chipset_flush_teardown(device_t dev); static void agp_i915_chipset_flush_teardown(device_t dev); static void agp_i965_chipset_flush_teardown(device_t dev); static void agp_i810_chipset_flush(device_t dev); static void agp_i830_chipset_flush(device_t dev); static void agp_i915_chipset_flush(device_t dev); enum { CHIP_I810, /* i810/i815 */ CHIP_I830, /* 830M/845G */ CHIP_I855, /* 852GM/855GM/865G */ CHIP_I915, /* 915G/915GM */ CHIP_I965, /* G965 */ CHIP_G33, /* G33/Q33/Q35 */ CHIP_IGD, /* Pineview */ CHIP_G4X, /* G45/Q45 */ }; /* The i810 through i855 have the registers at BAR 1, and the GATT gets * allocated by us. The i915 has registers in BAR 0 and the GATT is at the * start of the stolen memory, and should only be accessed by the OS through * BAR 3. The G965 has registers and GATT in the same BAR (0) -- first 512KB * is registers, second 512KB is GATT. */ static struct resource_spec agp_i810_res_spec[] = { { SYS_RES_MEMORY, AGP_I810_MMADR, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; static struct resource_spec agp_i915_res_spec[] = { { SYS_RES_MEMORY, AGP_I915_MMADR, RF_ACTIVE | RF_SHAREABLE }, { SYS_RES_MEMORY, AGP_I915_GTTADR, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; static struct resource_spec agp_i965_res_spec[] = { { SYS_RES_MEMORY, AGP_I965_GTTMMADR, RF_ACTIVE | RF_SHAREABLE }, { SYS_RES_MEMORY, AGP_I965_APBASE, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; struct agp_i810_softc { struct agp_softc agp; u_int32_t initial_aperture; /* aperture size at startup */ struct agp_gatt *gatt; u_int32_t dcache_size; /* i810 only */ u_int32_t stolen; /* number of i830/845 gtt entries for stolen memory */ u_int stolen_size; /* BIOS-reserved graphics memory */ u_int gtt_total_entries; /* Total number of gtt ptes */ u_int gtt_mappable_entries; /* Number of gtt ptes mappable by CPU */ device_t bdev; /* bridge device */ void *argb_cursor; /* contigmalloc area for ARGB cursor */ struct resource *sc_res[2]; const struct agp_i810_match *match; int sc_flush_page_rid; struct resource *sc_flush_page_res; void *sc_flush_page_vaddr; int sc_bios_allocated_flush_page; }; static device_t intel_agp; struct agp_i810_driver { int chiptype; int gen; int busdma_addr_mask_sz; struct resource_spec *res_spec; int (*check_active)(device_t); void (*set_desc)(device_t, const struct agp_i810_match *); void (*dump_regs)(device_t); int (*get_stolen_size)(device_t); int (*get_gtt_total_entries)(device_t); int (*get_gtt_mappable_entries)(device_t); int (*install_gatt)(device_t); void (*deinstall_gatt)(device_t); void (*write_gtt)(device_t, u_int, uint32_t); void (*install_gtt_pte)(device_t, u_int, vm_offset_t, int); u_int32_t (*read_gtt_pte)(device_t, u_int); vm_paddr_t (*read_gtt_pte_paddr)(device_t , u_int); int (*set_aperture)(device_t, u_int32_t); int (*chipset_flush_setup)(device_t); void (*chipset_flush_teardown)(device_t); void (*chipset_flush)(device_t); }; static struct { struct intel_gtt base; } intel_private; static const struct agp_i810_driver agp_i810_i810_driver = { .chiptype = CHIP_I810, .gen = 1, .busdma_addr_mask_sz = 32, .res_spec = agp_i810_res_spec, .check_active = agp_i810_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i810_dump_regs, .get_stolen_size = agp_i810_get_stolen_size, .get_gtt_mappable_entries = agp_i810_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i810_get_gtt_total_entries, .install_gatt = agp_i810_install_gatt, .deinstall_gatt = agp_i810_deinstall_gatt, .write_gtt = agp_i810_write_gtt, .install_gtt_pte = agp_i810_install_gtt_pte, .read_gtt_pte = agp_i810_read_gtt_pte, .read_gtt_pte_paddr = agp_i810_read_gtt_pte_paddr, .set_aperture = agp_i810_set_aperture, .chipset_flush_setup = agp_i810_chipset_flush_setup, .chipset_flush_teardown = agp_i810_chipset_flush_teardown, .chipset_flush = agp_i810_chipset_flush, }; static const struct agp_i810_driver agp_i810_i815_driver = { .chiptype = CHIP_I810, .gen = 2, .busdma_addr_mask_sz = 32, .res_spec = agp_i810_res_spec, .check_active = agp_i810_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i810_dump_regs, .get_stolen_size = agp_i810_get_stolen_size, .get_gtt_mappable_entries = agp_i830_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i810_get_gtt_total_entries, .install_gatt = agp_i810_install_gatt, .deinstall_gatt = agp_i810_deinstall_gatt, .write_gtt = agp_i810_write_gtt, .install_gtt_pte = agp_i810_install_gtt_pte, .read_gtt_pte = agp_i810_read_gtt_pte, .read_gtt_pte_paddr = agp_i810_read_gtt_pte_paddr, .set_aperture = agp_i810_set_aperture, .chipset_flush_setup = agp_i810_chipset_flush_setup, .chipset_flush_teardown = agp_i810_chipset_flush_teardown, .chipset_flush = agp_i830_chipset_flush, }; static const struct agp_i810_driver agp_i810_i830_driver = { .chiptype = CHIP_I830, .gen = 2, .busdma_addr_mask_sz = 32, .res_spec = agp_i810_res_spec, .check_active = agp_i830_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i830_dump_regs, .get_stolen_size = agp_i830_get_stolen_size, .get_gtt_mappable_entries = agp_i830_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i810_get_gtt_total_entries, .install_gatt = agp_i830_install_gatt, .deinstall_gatt = agp_i830_deinstall_gatt, .write_gtt = agp_i810_write_gtt, .install_gtt_pte = agp_i830_install_gtt_pte, .read_gtt_pte = agp_i810_read_gtt_pte, .read_gtt_pte_paddr = agp_i810_read_gtt_pte_paddr, .set_aperture = agp_i830_set_aperture, .chipset_flush_setup = agp_i810_chipset_flush_setup, .chipset_flush_teardown = agp_i810_chipset_flush_teardown, .chipset_flush = agp_i830_chipset_flush, }; static const struct agp_i810_driver agp_i810_i855_driver = { .chiptype = CHIP_I855, .gen = 2, .busdma_addr_mask_sz = 32, .res_spec = agp_i810_res_spec, .check_active = agp_i830_check_active, .set_desc = agp_82852_set_desc, .dump_regs = agp_i855_dump_regs, .get_stolen_size = agp_i915_get_stolen_size, .get_gtt_mappable_entries = agp_i915_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i810_get_gtt_total_entries, .install_gatt = agp_i830_install_gatt, .deinstall_gatt = agp_i830_deinstall_gatt, .write_gtt = agp_i810_write_gtt, .install_gtt_pte = agp_i830_install_gtt_pte, .read_gtt_pte = agp_i810_read_gtt_pte, .read_gtt_pte_paddr = agp_i810_read_gtt_pte_paddr, .set_aperture = agp_i830_set_aperture, .chipset_flush_setup = agp_i810_chipset_flush_setup, .chipset_flush_teardown = agp_i810_chipset_flush_teardown, .chipset_flush = agp_i830_chipset_flush, }; static const struct agp_i810_driver agp_i810_i865_driver = { .chiptype = CHIP_I855, .gen = 2, .busdma_addr_mask_sz = 32, .res_spec = agp_i810_res_spec, .check_active = agp_i830_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i855_dump_regs, .get_stolen_size = agp_i915_get_stolen_size, .get_gtt_mappable_entries = agp_i915_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i810_get_gtt_total_entries, .install_gatt = agp_i830_install_gatt, .deinstall_gatt = agp_i830_deinstall_gatt, .write_gtt = agp_i810_write_gtt, .install_gtt_pte = agp_i830_install_gtt_pte, .read_gtt_pte = agp_i810_read_gtt_pte, .read_gtt_pte_paddr = agp_i810_read_gtt_pte_paddr, .set_aperture = agp_i915_set_aperture, .chipset_flush_setup = agp_i810_chipset_flush_setup, .chipset_flush_teardown = agp_i810_chipset_flush_teardown, .chipset_flush = agp_i830_chipset_flush, }; static const struct agp_i810_driver agp_i810_i915_driver = { .chiptype = CHIP_I915, .gen = 3, .busdma_addr_mask_sz = 32, .res_spec = agp_i915_res_spec, .check_active = agp_i915_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i915_dump_regs, .get_stolen_size = agp_i915_get_stolen_size, .get_gtt_mappable_entries = agp_i915_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i810_get_gtt_total_entries, .install_gatt = agp_i830_install_gatt, .deinstall_gatt = agp_i830_deinstall_gatt, .write_gtt = agp_i915_write_gtt, .install_gtt_pte = agp_i915_install_gtt_pte, .read_gtt_pte = agp_i915_read_gtt_pte, .read_gtt_pte_paddr = agp_i915_read_gtt_pte_paddr, .set_aperture = agp_i915_set_aperture, .chipset_flush_setup = agp_i915_chipset_flush_setup, .chipset_flush_teardown = agp_i915_chipset_flush_teardown, .chipset_flush = agp_i915_chipset_flush, }; static const struct agp_i810_driver agp_i810_g33_driver = { .chiptype = CHIP_G33, .gen = 3, .busdma_addr_mask_sz = 36, .res_spec = agp_i915_res_spec, .check_active = agp_i915_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i965_dump_regs, .get_stolen_size = agp_i915_get_stolen_size, .get_gtt_mappable_entries = agp_i915_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i965_get_gtt_total_entries, .install_gatt = agp_i830_install_gatt, .deinstall_gatt = agp_i830_deinstall_gatt, .write_gtt = agp_i915_write_gtt, .install_gtt_pte = agp_i915_install_gtt_pte, .read_gtt_pte = agp_i915_read_gtt_pte, .read_gtt_pte_paddr = agp_i915_read_gtt_pte_paddr, .set_aperture = agp_i915_set_aperture, .chipset_flush_setup = agp_i965_chipset_flush_setup, .chipset_flush_teardown = agp_i965_chipset_flush_teardown, .chipset_flush = agp_i915_chipset_flush, }; static const struct agp_i810_driver agp_i810_igd_driver = { .chiptype = CHIP_IGD, .gen = 3, .busdma_addr_mask_sz = 36, .res_spec = agp_i915_res_spec, .check_active = agp_i915_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i915_dump_regs, .get_stolen_size = agp_i915_get_stolen_size, .get_gtt_mappable_entries = agp_i915_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i965_get_gtt_total_entries, .install_gatt = agp_i830_install_gatt, .deinstall_gatt = agp_i830_deinstall_gatt, .write_gtt = agp_i915_write_gtt, .install_gtt_pte = agp_i915_install_gtt_pte, .read_gtt_pte = agp_i915_read_gtt_pte, .read_gtt_pte_paddr = agp_i915_read_gtt_pte_paddr, .set_aperture = agp_i915_set_aperture, .chipset_flush_setup = agp_i965_chipset_flush_setup, .chipset_flush_teardown = agp_i965_chipset_flush_teardown, .chipset_flush = agp_i915_chipset_flush, }; static const struct agp_i810_driver agp_i810_g965_driver = { .chiptype = CHIP_I965, .gen = 4, .busdma_addr_mask_sz = 36, .res_spec = agp_i965_res_spec, .check_active = agp_i915_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i965_dump_regs, .get_stolen_size = agp_i915_get_stolen_size, .get_gtt_mappable_entries = agp_i915_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i965_get_gtt_total_entries, .install_gatt = agp_i965_install_gatt, .deinstall_gatt = agp_i830_deinstall_gatt, .write_gtt = agp_i965_write_gtt, .install_gtt_pte = agp_i965_install_gtt_pte, .read_gtt_pte = agp_i965_read_gtt_pte, .read_gtt_pte_paddr = agp_i915_read_gtt_pte_paddr, .set_aperture = agp_i915_set_aperture, .chipset_flush_setup = agp_i965_chipset_flush_setup, .chipset_flush_teardown = agp_i965_chipset_flush_teardown, .chipset_flush = agp_i915_chipset_flush, }; static const struct agp_i810_driver agp_i810_g4x_driver = { .chiptype = CHIP_G4X, .gen = 5, .busdma_addr_mask_sz = 36, .res_spec = agp_i965_res_spec, .check_active = agp_i915_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i965_dump_regs, .get_stolen_size = agp_i915_get_stolen_size, .get_gtt_mappable_entries = agp_i915_get_gtt_mappable_entries, .get_gtt_total_entries = agp_gen5_get_gtt_total_entries, .install_gatt = agp_g4x_install_gatt, .deinstall_gatt = agp_i830_deinstall_gatt, .write_gtt = agp_g4x_write_gtt, .install_gtt_pte = agp_g4x_install_gtt_pte, .read_gtt_pte = agp_g4x_read_gtt_pte, .read_gtt_pte_paddr = agp_i915_read_gtt_pte_paddr, .set_aperture = agp_i915_set_aperture, .chipset_flush_setup = agp_i965_chipset_flush_setup, .chipset_flush_teardown = agp_i965_chipset_flush_teardown, .chipset_flush = agp_i915_chipset_flush, }; /* For adding new devices, devid is the id of the graphics controller * (pci:0:2:0, for example). The placeholder (usually at pci:0:2:1) for the * second head should never be added. The bridge_offset is the offset to * subtract from devid to get the id of the hostb that the device is on. */ static const struct agp_i810_match { int devid; char *name; const struct agp_i810_driver *driver; } agp_i810_matches[] = { { .devid = 0x71218086, .name = "Intel 82810 (i810 GMCH) SVGA controller", .driver = &agp_i810_i810_driver }, { .devid = 0x71238086, .name = "Intel 82810-DC100 (i810-DC100 GMCH) SVGA controller", .driver = &agp_i810_i810_driver }, { .devid = 0x71258086, .name = "Intel 82810E (i810E GMCH) SVGA controller", .driver = &agp_i810_i810_driver }, { .devid = 0x11328086, .name = "Intel 82815 (i815 GMCH) SVGA controller", .driver = &agp_i810_i815_driver }, { .devid = 0x35778086, .name = "Intel 82830M (830M GMCH) SVGA controller", .driver = &agp_i810_i830_driver }, { .devid = 0x25628086, .name = "Intel 82845M (845M GMCH) SVGA controller", .driver = &agp_i810_i830_driver }, { .devid = 0x35828086, .name = "Intel 82852/855GM SVGA controller", .driver = &agp_i810_i855_driver }, { .devid = 0x25728086, .name = "Intel 82865G (865G GMCH) SVGA controller", .driver = &agp_i810_i865_driver }, { .devid = 0x25828086, .name = "Intel 82915G (915G GMCH) SVGA controller", .driver = &agp_i810_i915_driver }, { .devid = 0x258A8086, .name = "Intel E7221 SVGA controller", .driver = &agp_i810_i915_driver }, { .devid = 0x25928086, .name = "Intel 82915GM (915GM GMCH) SVGA controller", .driver = &agp_i810_i915_driver }, { .devid = 0x27728086, .name = "Intel 82945G (945G GMCH) SVGA controller", .driver = &agp_i810_i915_driver }, { .devid = 0x27A28086, .name = "Intel 82945GM (945GM GMCH) SVGA controller", .driver = &agp_i810_i915_driver }, { .devid = 0x27AE8086, .name = "Intel 945GME SVGA controller", .driver = &agp_i810_i915_driver }, { .devid = 0x29728086, .name = "Intel 946GZ SVGA controller", .driver = &agp_i810_g965_driver }, { .devid = 0x29828086, .name = "Intel G965 SVGA controller", .driver = &agp_i810_g965_driver }, { .devid = 0x29928086, .name = "Intel Q965 SVGA controller", .driver = &agp_i810_g965_driver }, { .devid = 0x29A28086, .name = "Intel G965 SVGA controller", .driver = &agp_i810_g965_driver }, { .devid = 0x29B28086, .name = "Intel Q35 SVGA controller", .driver = &agp_i810_g33_driver }, { .devid = 0x29C28086, .name = "Intel G33 SVGA controller", .driver = &agp_i810_g33_driver }, { .devid = 0x29D28086, .name = "Intel Q33 SVGA controller", .driver = &agp_i810_g33_driver }, { .devid = 0xA0018086, .name = "Intel Pineview SVGA controller", .driver = &agp_i810_igd_driver }, { .devid = 0xA0118086, .name = "Intel Pineview (M) SVGA controller", .driver = &agp_i810_igd_driver }, { .devid = 0x2A028086, .name = "Intel GM965 SVGA controller", .driver = &agp_i810_g965_driver }, { .devid = 0x2A128086, .name = "Intel GME965 SVGA controller", .driver = &agp_i810_g965_driver }, { .devid = 0x2A428086, .name = "Intel GM45 SVGA controller", .driver = &agp_i810_g4x_driver }, { .devid = 0x2E028086, .name = "Intel Eaglelake SVGA controller", .driver = &agp_i810_g4x_driver }, { .devid = 0x2E128086, .name = "Intel Q45 SVGA controller", .driver = &agp_i810_g4x_driver }, { .devid = 0x2E228086, .name = "Intel G45 SVGA controller", .driver = &agp_i810_g4x_driver }, { .devid = 0x2E328086, .name = "Intel G41 SVGA controller", .driver = &agp_i810_g4x_driver }, { .devid = 0x00428086, .name = "Intel Ironlake (D) SVGA controller", .driver = &agp_i810_g4x_driver }, { .devid = 0x00468086, .name = "Intel Ironlake (M) SVGA controller", .driver = &agp_i810_g4x_driver }, { .devid = 0, } }; static const struct agp_i810_match* agp_i810_match(device_t dev) { int i, devid; if (pci_get_class(dev) != PCIC_DISPLAY || (pci_get_subclass(dev) != PCIS_DISPLAY_VGA && pci_get_subclass(dev) != PCIS_DISPLAY_OTHER)) return (NULL); devid = pci_get_devid(dev); for (i = 0; agp_i810_matches[i].devid != 0; i++) { if (agp_i810_matches[i].devid == devid) break; } if (agp_i810_matches[i].devid == 0) return (NULL); else return (&agp_i810_matches[i]); } /* * Find bridge device. */ static device_t agp_i810_find_bridge(device_t dev) { return (pci_find_dbsf(0, 0, 0, 0)); } static void agp_i810_identify(driver_t *driver, device_t parent) { if (device_find_child(parent, "agp", -1) == NULL && agp_i810_match(parent)) device_add_child(parent, "agp", -1); } static int agp_i810_check_active(device_t bridge_dev) { u_int8_t smram; smram = pci_read_config(bridge_dev, AGP_I810_SMRAM, 1); if ((smram & AGP_I810_SMRAM_GMS) == AGP_I810_SMRAM_GMS_DISABLED) return (ENXIO); return (0); } static int agp_i830_check_active(device_t bridge_dev) { int gcc1; gcc1 = pci_read_config(bridge_dev, AGP_I830_GCC1, 1); if ((gcc1 & AGP_I830_GCC1_DEV2) == AGP_I830_GCC1_DEV2_DISABLED) return (ENXIO); return (0); } static int agp_i915_check_active(device_t bridge_dev) { int deven; deven = pci_read_config(bridge_dev, AGP_I915_DEVEN, 4); if ((deven & AGP_I915_DEVEN_D2F0) == AGP_I915_DEVEN_D2F0_DISABLED) return (ENXIO); return (0); } static void agp_82852_set_desc(device_t dev, const struct agp_i810_match *match) { switch (pci_read_config(dev, AGP_I85X_CAPID, 1)) { case AGP_I855_GME: device_set_desc(dev, "Intel 82855GME (855GME GMCH) SVGA controller"); break; case AGP_I855_GM: device_set_desc(dev, "Intel 82855GM (855GM GMCH) SVGA controller"); break; case AGP_I852_GME: device_set_desc(dev, "Intel 82852GME (852GME GMCH) SVGA controller"); break; case AGP_I852_GM: device_set_desc(dev, "Intel 82852GM (852GM GMCH) SVGA controller"); break; default: device_set_desc(dev, "Intel 8285xM (85xGM GMCH) SVGA controller"); break; } } static void agp_i810_set_desc(device_t dev, const struct agp_i810_match *match) { device_set_desc(dev, match->name); } static int agp_i810_probe(device_t dev) { device_t bdev; const struct agp_i810_match *match; int err; if (resource_disabled("agp", device_get_unit(dev))) return (ENXIO); match = agp_i810_match(dev); if (match == NULL) return (ENXIO); bdev = agp_i810_find_bridge(dev); if (bdev == NULL) { if (bootverbose) printf("I810: can't find bridge device\n"); return (ENXIO); } /* * checking whether internal graphics device has been activated. */ err = match->driver->check_active(bdev); if (err != 0) { if (bootverbose) printf("i810: disabled, not probing\n"); return (err); } match->driver->set_desc(dev, match); return (BUS_PROBE_DEFAULT); } static void agp_i810_dump_regs(device_t dev) { struct agp_i810_softc *sc = device_get_softc(dev); device_printf(dev, "AGP_I810_PGTBL_CTL: %08x\n", bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL)); device_printf(dev, "AGP_I810_MISCC: 0x%04x\n", pci_read_config(sc->bdev, AGP_I810_MISCC, 2)); } static void agp_i830_dump_regs(device_t dev) { struct agp_i810_softc *sc = device_get_softc(dev); device_printf(dev, "AGP_I810_PGTBL_CTL: %08x\n", bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL)); device_printf(dev, "AGP_I830_GCC1: 0x%02x\n", pci_read_config(sc->bdev, AGP_I830_GCC1, 1)); } static void agp_i855_dump_regs(device_t dev) { struct agp_i810_softc *sc = device_get_softc(dev); device_printf(dev, "AGP_I810_PGTBL_CTL: %08x\n", bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL)); device_printf(dev, "AGP_I855_GCC1: 0x%02x\n", pci_read_config(sc->bdev, AGP_I855_GCC1, 1)); } static void agp_i915_dump_regs(device_t dev) { struct agp_i810_softc *sc = device_get_softc(dev); device_printf(dev, "AGP_I810_PGTBL_CTL: %08x\n", bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL)); device_printf(dev, "AGP_I855_GCC1: 0x%02x\n", pci_read_config(sc->bdev, AGP_I855_GCC1, 1)); device_printf(dev, "AGP_I915_MSAC: 0x%02x\n", pci_read_config(sc->bdev, AGP_I915_MSAC, 1)); } static void agp_i965_dump_regs(device_t dev) { struct agp_i810_softc *sc = device_get_softc(dev); device_printf(dev, "AGP_I965_PGTBL_CTL2: %08x\n", bus_read_4(sc->sc_res[0], AGP_I965_PGTBL_CTL2)); device_printf(dev, "AGP_I855_GCC1: 0x%02x\n", pci_read_config(sc->bdev, AGP_I855_GCC1, 1)); device_printf(dev, "AGP_I965_MSAC: 0x%02x\n", pci_read_config(sc->bdev, AGP_I965_MSAC, 1)); } static int agp_i810_get_stolen_size(device_t dev) { struct agp_i810_softc *sc; sc = device_get_softc(dev); sc->stolen = 0; sc->stolen_size = 0; return (0); } static int agp_i830_get_stolen_size(device_t dev) { struct agp_i810_softc *sc; unsigned int gcc1; sc = device_get_softc(dev); gcc1 = pci_read_config(sc->bdev, AGP_I830_GCC1, 1); switch (gcc1 & AGP_I830_GCC1_GMS) { case AGP_I830_GCC1_GMS_STOLEN_512: sc->stolen = (512 - 132) * 1024 / 4096; sc->stolen_size = 512 * 1024; break; case AGP_I830_GCC1_GMS_STOLEN_1024: sc->stolen = (1024 - 132) * 1024 / 4096; sc->stolen_size = 1024 * 1024; break; case AGP_I830_GCC1_GMS_STOLEN_8192: sc->stolen = (8192 - 132) * 1024 / 4096; sc->stolen_size = 8192 * 1024; break; default: sc->stolen = 0; device_printf(dev, "unknown memory configuration, disabling (GCC1 %x)\n", gcc1); return (EINVAL); } return (0); } static int agp_i915_get_stolen_size(device_t dev) { struct agp_i810_softc *sc; unsigned int gcc1, stolen, gtt_size; sc = device_get_softc(dev); /* * Stolen memory is set up at the beginning of the aperture by * the BIOS, consisting of the GATT followed by 4kb for the * BIOS display. */ switch (sc->match->driver->chiptype) { case CHIP_I855: gtt_size = 128; break; case CHIP_I915: gtt_size = 256; break; case CHIP_I965: switch (bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL) & AGP_I810_PGTBL_SIZE_MASK) { case AGP_I810_PGTBL_SIZE_128KB: gtt_size = 128; break; case AGP_I810_PGTBL_SIZE_256KB: gtt_size = 256; break; case AGP_I810_PGTBL_SIZE_512KB: gtt_size = 512; break; case AGP_I965_PGTBL_SIZE_1MB: gtt_size = 1024; break; case AGP_I965_PGTBL_SIZE_2MB: gtt_size = 2048; break; case AGP_I965_PGTBL_SIZE_1_5MB: gtt_size = 1024 + 512; break; default: device_printf(dev, "Bad PGTBL size\n"); return (EINVAL); } break; case CHIP_G33: gcc1 = pci_read_config(sc->bdev, AGP_I855_GCC1, 2); switch (gcc1 & AGP_G33_MGGC_GGMS_MASK) { case AGP_G33_MGGC_GGMS_SIZE_1M: gtt_size = 1024; break; case AGP_G33_MGGC_GGMS_SIZE_2M: gtt_size = 2048; break; default: device_printf(dev, "Bad PGTBL size\n"); return (EINVAL); } break; case CHIP_IGD: case CHIP_G4X: gtt_size = 0; break; default: device_printf(dev, "Bad chiptype\n"); return (EINVAL); } /* GCC1 is called MGGC on i915+ */ gcc1 = pci_read_config(sc->bdev, AGP_I855_GCC1, 1); switch (gcc1 & AGP_I855_GCC1_GMS) { case AGP_I855_GCC1_GMS_STOLEN_1M: stolen = 1024; break; case AGP_I855_GCC1_GMS_STOLEN_4M: stolen = 4 * 1024; break; case AGP_I855_GCC1_GMS_STOLEN_8M: stolen = 8 * 1024; break; case AGP_I855_GCC1_GMS_STOLEN_16M: stolen = 16 * 1024; break; case AGP_I855_GCC1_GMS_STOLEN_32M: stolen = 32 * 1024; break; case AGP_I915_GCC1_GMS_STOLEN_48M: stolen = sc->match->driver->gen > 2 ? 48 * 1024 : 0; break; case AGP_I915_GCC1_GMS_STOLEN_64M: stolen = sc->match->driver->gen > 2 ? 64 * 1024 : 0; break; case AGP_G33_GCC1_GMS_STOLEN_128M: stolen = sc->match->driver->gen > 2 ? 128 * 1024 : 0; break; case AGP_G33_GCC1_GMS_STOLEN_256M: stolen = sc->match->driver->gen > 2 ? 256 * 1024 : 0; break; case AGP_G4X_GCC1_GMS_STOLEN_96M: if (sc->match->driver->chiptype == CHIP_I965 || sc->match->driver->chiptype == CHIP_G4X) stolen = 96 * 1024; else stolen = 0; break; case AGP_G4X_GCC1_GMS_STOLEN_160M: if (sc->match->driver->chiptype == CHIP_I965 || sc->match->driver->chiptype == CHIP_G4X) stolen = 160 * 1024; else stolen = 0; break; case AGP_G4X_GCC1_GMS_STOLEN_224M: if (sc->match->driver->chiptype == CHIP_I965 || sc->match->driver->chiptype == CHIP_G4X) stolen = 224 * 1024; else stolen = 0; break; case AGP_G4X_GCC1_GMS_STOLEN_352M: if (sc->match->driver->chiptype == CHIP_I965 || sc->match->driver->chiptype == CHIP_G4X) stolen = 352 * 1024; else stolen = 0; break; default: device_printf(dev, "unknown memory configuration, disabling (GCC1 %x)\n", gcc1); return (EINVAL); } gtt_size += 4; sc->stolen_size = stolen * 1024; sc->stolen = (stolen - gtt_size) * 1024 / 4096; return (0); } static int agp_i810_get_gtt_mappable_entries(device_t dev) { struct agp_i810_softc *sc; uint32_t ap; uint16_t miscc; sc = device_get_softc(dev); miscc = pci_read_config(sc->bdev, AGP_I810_MISCC, 2); if ((miscc & AGP_I810_MISCC_WINSIZE) == AGP_I810_MISCC_WINSIZE_32) ap = 32; else ap = 64; sc->gtt_mappable_entries = (ap * 1024 * 1024) >> AGP_PAGE_SHIFT; return (0); } static int agp_i830_get_gtt_mappable_entries(device_t dev) { struct agp_i810_softc *sc; uint32_t ap; uint16_t gmch_ctl; sc = device_get_softc(dev); gmch_ctl = pci_read_config(sc->bdev, AGP_I830_GCC1, 2); if ((gmch_ctl & AGP_I830_GCC1_GMASIZE) == AGP_I830_GCC1_GMASIZE_64) ap = 64; else ap = 128; sc->gtt_mappable_entries = (ap * 1024 * 1024) >> AGP_PAGE_SHIFT; return (0); } static int agp_i915_get_gtt_mappable_entries(device_t dev) { struct agp_i810_softc *sc; uint32_t ap; sc = device_get_softc(dev); ap = AGP_GET_APERTURE(dev); sc->gtt_mappable_entries = ap >> AGP_PAGE_SHIFT; return (0); } static int agp_i810_get_gtt_total_entries(device_t dev) { struct agp_i810_softc *sc; sc = device_get_softc(dev); sc->gtt_total_entries = sc->gtt_mappable_entries; return (0); } static int agp_i965_get_gtt_total_entries(device_t dev) { struct agp_i810_softc *sc; uint32_t pgetbl_ctl; int error; sc = device_get_softc(dev); error = 0; pgetbl_ctl = bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL); switch (pgetbl_ctl & AGP_I810_PGTBL_SIZE_MASK) { case AGP_I810_PGTBL_SIZE_128KB: sc->gtt_total_entries = 128 * 1024 / 4; break; case AGP_I810_PGTBL_SIZE_256KB: sc->gtt_total_entries = 256 * 1024 / 4; break; case AGP_I810_PGTBL_SIZE_512KB: sc->gtt_total_entries = 512 * 1024 / 4; break; /* GTT pagetable sizes bigger than 512KB are not possible on G33! */ case AGP_I810_PGTBL_SIZE_1MB: sc->gtt_total_entries = 1024 * 1024 / 4; break; case AGP_I810_PGTBL_SIZE_2MB: sc->gtt_total_entries = 2 * 1024 * 1024 / 4; break; case AGP_I810_PGTBL_SIZE_1_5MB: sc->gtt_total_entries = (1024 + 512) * 1024 / 4; break; default: device_printf(dev, "Unknown page table size\n"); error = ENXIO; } return (error); } static void agp_gen5_adjust_pgtbl_size(device_t dev, uint32_t sz) { struct agp_i810_softc *sc; uint32_t pgetbl_ctl, pgetbl_ctl2; sc = device_get_softc(dev); /* Disable per-process page table. */ pgetbl_ctl2 = bus_read_4(sc->sc_res[0], AGP_I965_PGTBL_CTL2); pgetbl_ctl2 &= ~AGP_I810_PGTBL_ENABLED; bus_write_4(sc->sc_res[0], AGP_I965_PGTBL_CTL2, pgetbl_ctl2); /* Write the new ggtt size. */ pgetbl_ctl = bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL); pgetbl_ctl &= ~AGP_I810_PGTBL_SIZE_MASK; pgetbl_ctl |= sz; bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, pgetbl_ctl); } static int agp_gen5_get_gtt_total_entries(device_t dev) { struct agp_i810_softc *sc; uint16_t gcc1; sc = device_get_softc(dev); gcc1 = pci_read_config(sc->bdev, AGP_I830_GCC1, 2); switch (gcc1 & AGP_G4x_GCC1_SIZE_MASK) { case AGP_G4x_GCC1_SIZE_1M: case AGP_G4x_GCC1_SIZE_VT_1M: agp_gen5_adjust_pgtbl_size(dev, AGP_I810_PGTBL_SIZE_1MB); break; case AGP_G4x_GCC1_SIZE_VT_1_5M: agp_gen5_adjust_pgtbl_size(dev, AGP_I810_PGTBL_SIZE_1_5MB); break; case AGP_G4x_GCC1_SIZE_2M: case AGP_G4x_GCC1_SIZE_VT_2M: agp_gen5_adjust_pgtbl_size(dev, AGP_I810_PGTBL_SIZE_2MB); break; default: device_printf(dev, "Unknown page table size\n"); return (ENXIO); } return (agp_i965_get_gtt_total_entries(dev)); } static int agp_i810_install_gatt(device_t dev) { struct agp_i810_softc *sc; sc = device_get_softc(dev); /* Some i810s have on-chip memory called dcache. */ if ((bus_read_1(sc->sc_res[0], AGP_I810_DRT) & AGP_I810_DRT_POPULATED) != 0) sc->dcache_size = 4 * 1024 * 1024; else sc->dcache_size = 0; /* According to the specs the gatt on the i810 must be 64k. */ sc->gatt->ag_virtual = kmem_alloc_contig(64 * 1024, M_NOWAIT | M_ZERO, 0, ~0, PAGE_SIZE, 0, VM_MEMATTR_WRITE_COMBINING); if (sc->gatt->ag_virtual == NULL) { if (bootverbose) device_printf(dev, "contiguous allocation failed\n"); return (ENOMEM); } sc->gatt->ag_physical = vtophys((vm_offset_t)sc->gatt->ag_virtual); /* Install the GATT. */ bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, sc->gatt->ag_physical | 1); return (0); } static void agp_i830_install_gatt_init(struct agp_i810_softc *sc) { uint32_t pgtblctl; /* * The i830 automatically initializes the 128k gatt on boot. * GATT address is already in there, make sure it's enabled. */ pgtblctl = bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL); pgtblctl |= 1; bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, pgtblctl); sc->gatt->ag_physical = pgtblctl & ~1; } static int agp_i830_install_gatt(device_t dev) { struct agp_i810_softc *sc; sc = device_get_softc(dev); agp_i830_install_gatt_init(sc); return (0); } static int agp_gen4_install_gatt(device_t dev, const vm_size_t gtt_offset) { struct agp_i810_softc *sc; sc = device_get_softc(dev); pmap_change_attr((vm_offset_t)rman_get_virtual(sc->sc_res[0]) + gtt_offset, rman_get_size(sc->sc_res[0]) - gtt_offset, VM_MEMATTR_WRITE_COMBINING); agp_i830_install_gatt_init(sc); return (0); } static int agp_i965_install_gatt(device_t dev) { return (agp_gen4_install_gatt(dev, 512 * 1024)); } static int agp_g4x_install_gatt(device_t dev) { return (agp_gen4_install_gatt(dev, 2 * 1024 * 1024)); } static int agp_i810_attach(device_t dev) { struct agp_i810_softc *sc; int error; sc = device_get_softc(dev); sc->bdev = agp_i810_find_bridge(dev); if (sc->bdev == NULL) return (ENOENT); sc->match = agp_i810_match(dev); agp_set_aperture_resource(dev, sc->match->driver->gen <= 2 ? AGP_APBASE : AGP_I915_GMADR); error = agp_generic_attach(dev); if (error) return (error); if (ptoa((vm_paddr_t)Maxmem) > (1ULL << sc->match->driver->busdma_addr_mask_sz) - 1) { device_printf(dev, "agp_i810 does not support physical " "memory above %ju.\n", (uintmax_t)(1ULL << sc->match->driver->busdma_addr_mask_sz) - 1); return (ENOENT); } if (bus_alloc_resources(dev, sc->match->driver->res_spec, sc->sc_res)) { agp_generic_detach(dev); return (ENODEV); } sc->initial_aperture = AGP_GET_APERTURE(dev); sc->gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_WAITOK); sc->gatt->ag_entries = AGP_GET_APERTURE(dev) >> AGP_PAGE_SHIFT; if ((error = sc->match->driver->get_stolen_size(dev)) != 0 || (error = sc->match->driver->install_gatt(dev)) != 0 || (error = sc->match->driver->get_gtt_mappable_entries(dev)) != 0 || (error = sc->match->driver->get_gtt_total_entries(dev)) != 0 || (error = sc->match->driver->chipset_flush_setup(dev)) != 0) { bus_release_resources(dev, sc->match->driver->res_spec, sc->sc_res); free(sc->gatt, M_AGP); agp_generic_detach(dev); return (error); } intel_agp = dev; device_printf(dev, "aperture size is %dM", sc->initial_aperture / 1024 / 1024); if (sc->stolen > 0) printf(", detected %dk stolen memory\n", sc->stolen * 4); else printf("\n"); if (bootverbose) { sc->match->driver->dump_regs(dev); device_printf(dev, "Mappable GTT entries: %d\n", sc->gtt_mappable_entries); device_printf(dev, "Total GTT entries: %d\n", sc->gtt_total_entries); } return (0); } static void agp_i810_deinstall_gatt(device_t dev) { struct agp_i810_softc *sc; sc = device_get_softc(dev); bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, 0); kmem_free(sc->gatt->ag_virtual, 64 * 1024); } static void agp_i830_deinstall_gatt(device_t dev) { struct agp_i810_softc *sc; unsigned int pgtblctl; sc = device_get_softc(dev); pgtblctl = bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL); pgtblctl &= ~1; bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, pgtblctl); } static int agp_i810_detach(device_t dev) { struct agp_i810_softc *sc; sc = device_get_softc(dev); agp_free_cdev(dev); /* Clear the GATT base. */ sc->match->driver->deinstall_gatt(dev); sc->match->driver->chipset_flush_teardown(dev); /* Put the aperture back the way it started. */ AGP_SET_APERTURE(dev, sc->initial_aperture); free(sc->gatt, M_AGP); bus_release_resources(dev, sc->match->driver->res_spec, sc->sc_res); agp_free_res(dev); return (0); } static int agp_i810_resume(device_t dev) { struct agp_i810_softc *sc; sc = device_get_softc(dev); AGP_SET_APERTURE(dev, sc->initial_aperture); /* Install the GATT. */ bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, sc->gatt->ag_physical | 1); return (bus_generic_resume(dev)); } /** * Sets the PCI resource size of the aperture on i830-class and below chipsets, * while returning failure on later chipsets when an actual change is * requested. * * This whole function is likely bogus, as the kernel would probably need to * reconfigure the placement of the AGP aperture if a larger size is requested, * which doesn't happen currently. */ static int agp_i810_set_aperture(device_t dev, u_int32_t aperture) { struct agp_i810_softc *sc; u_int16_t miscc; sc = device_get_softc(dev); /* * Double check for sanity. */ if (aperture != 32 * 1024 * 1024 && aperture != 64 * 1024 * 1024) { device_printf(dev, "bad aperture size %d\n", aperture); return (EINVAL); } miscc = pci_read_config(sc->bdev, AGP_I810_MISCC, 2); miscc &= ~AGP_I810_MISCC_WINSIZE; if (aperture == 32 * 1024 * 1024) miscc |= AGP_I810_MISCC_WINSIZE_32; else miscc |= AGP_I810_MISCC_WINSIZE_64; pci_write_config(sc->bdev, AGP_I810_MISCC, miscc, 2); return (0); } static int agp_i830_set_aperture(device_t dev, u_int32_t aperture) { struct agp_i810_softc *sc; u_int16_t gcc1; sc = device_get_softc(dev); if (aperture != 64 * 1024 * 1024 && aperture != 128 * 1024 * 1024) { device_printf(dev, "bad aperture size %d\n", aperture); return (EINVAL); } gcc1 = pci_read_config(sc->bdev, AGP_I830_GCC1, 2); gcc1 &= ~AGP_I830_GCC1_GMASIZE; if (aperture == 64 * 1024 * 1024) gcc1 |= AGP_I830_GCC1_GMASIZE_64; else gcc1 |= AGP_I830_GCC1_GMASIZE_128; pci_write_config(sc->bdev, AGP_I830_GCC1, gcc1, 2); return (0); } static int agp_i915_set_aperture(device_t dev, u_int32_t aperture) { return (agp_generic_set_aperture(dev, aperture)); } static int agp_i810_method_set_aperture(device_t dev, u_int32_t aperture) { struct agp_i810_softc *sc; sc = device_get_softc(dev); return (sc->match->driver->set_aperture(dev, aperture)); } /** * Writes a GTT entry mapping the page at the given offset from the * beginning of the aperture to the given physical address. Setup the * caching mode according to flags. * * For gen 1, 2 and 3, GTT start is located at AGP_I810_GTT offset * from corresponding BAR start. For gen 4, offset is 512KB + * AGP_I810_GTT, for gen 5 and 6 it is 2MB + AGP_I810_GTT. * * Also, the bits of the physical page address above 4GB needs to be * placed into bits 40-32 of PTE. */ static void agp_i810_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags) { uint32_t pte; pte = (u_int32_t)physical | I810_PTE_VALID; if (flags == AGP_DCACHE_MEMORY) pte |= I810_PTE_LOCAL; else if (flags == AGP_USER_CACHED_MEMORY) pte |= I830_PTE_SYSTEM_CACHED; agp_i810_write_gtt(dev, index, pte); } static void agp_i810_write_gtt(device_t dev, u_int index, uint32_t pte) { struct agp_i810_softc *sc; sc = device_get_softc(dev); bus_write_4(sc->sc_res[0], AGP_I810_GTT + index * 4, pte); CTR2(KTR_AGP_I810, "810_pte %x %x", index, pte); } static void agp_i830_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags) { uint32_t pte; pte = (u_int32_t)physical | I810_PTE_VALID; if (flags == AGP_USER_CACHED_MEMORY) pte |= I830_PTE_SYSTEM_CACHED; agp_i810_write_gtt(dev, index, pte); } static void agp_i915_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags) { uint32_t pte; pte = (u_int32_t)physical | I810_PTE_VALID; if (flags == AGP_USER_CACHED_MEMORY) pte |= I830_PTE_SYSTEM_CACHED; pte |= (physical & 0x0000000f00000000ull) >> 28; agp_i915_write_gtt(dev, index, pte); } static void agp_i915_write_gtt(device_t dev, u_int index, uint32_t pte) { struct agp_i810_softc *sc; sc = device_get_softc(dev); bus_write_4(sc->sc_res[1], index * 4, pte); CTR2(KTR_AGP_I810, "915_pte %x %x", index, pte); } static void agp_i965_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags) { uint32_t pte; pte = (u_int32_t)physical | I810_PTE_VALID; if (flags == AGP_USER_CACHED_MEMORY) pte |= I830_PTE_SYSTEM_CACHED; pte |= (physical & 0x0000000f00000000ull) >> 28; agp_i965_write_gtt(dev, index, pte); } static void agp_i965_write_gtt(device_t dev, u_int index, uint32_t pte) { struct agp_i810_softc *sc; sc = device_get_softc(dev); bus_write_4(sc->sc_res[0], index * 4 + (512 * 1024), pte); CTR2(KTR_AGP_I810, "965_pte %x %x", index, pte); } static void agp_g4x_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags) { uint32_t pte; pte = (u_int32_t)physical | I810_PTE_VALID; if (flags == AGP_USER_CACHED_MEMORY) pte |= I830_PTE_SYSTEM_CACHED; pte |= (physical & 0x0000000f00000000ull) >> 28; agp_g4x_write_gtt(dev, index, pte); } static void agp_g4x_write_gtt(device_t dev, u_int index, uint32_t pte) { struct agp_i810_softc *sc; sc = device_get_softc(dev); bus_write_4(sc->sc_res[0], index * 4 + (2 * 1024 * 1024), pte); CTR2(KTR_AGP_I810, "g4x_pte %x %x", index, pte); } static int agp_i810_bind_page(device_t dev, vm_offset_t offset, vm_offset_t physical) { struct agp_i810_softc *sc = device_get_softc(dev); u_int index; if (offset >= (sc->gatt->ag_entries << AGP_PAGE_SHIFT)) { device_printf(dev, "failed: offset is 0x%08jx, " "shift is %d, entries is %d\n", (intmax_t)offset, AGP_PAGE_SHIFT, sc->gatt->ag_entries); return (EINVAL); } index = offset >> AGP_PAGE_SHIFT; if (sc->stolen != 0 && index < sc->stolen) { device_printf(dev, "trying to bind into stolen memory\n"); return (EINVAL); } sc->match->driver->install_gtt_pte(dev, index, physical, 0); return (0); } static int agp_i810_unbind_page(device_t dev, vm_offset_t offset) { struct agp_i810_softc *sc; u_int index; sc = device_get_softc(dev); if (offset >= (sc->gatt->ag_entries << AGP_PAGE_SHIFT)) return (EINVAL); index = offset >> AGP_PAGE_SHIFT; if (sc->stolen != 0 && index < sc->stolen) { device_printf(dev, "trying to unbind from stolen memory\n"); return (EINVAL); } sc->match->driver->install_gtt_pte(dev, index, 0, 0); return (0); } static u_int32_t agp_i810_read_gtt_pte(device_t dev, u_int index) { struct agp_i810_softc *sc; u_int32_t pte; sc = device_get_softc(dev); pte = bus_read_4(sc->sc_res[0], AGP_I810_GTT + index * 4); return (pte); } static u_int32_t agp_i915_read_gtt_pte(device_t dev, u_int index) { struct agp_i810_softc *sc; u_int32_t pte; sc = device_get_softc(dev); pte = bus_read_4(sc->sc_res[1], index * 4); return (pte); } static u_int32_t agp_i965_read_gtt_pte(device_t dev, u_int index) { struct agp_i810_softc *sc; u_int32_t pte; sc = device_get_softc(dev); pte = bus_read_4(sc->sc_res[0], index * 4 + (512 * 1024)); return (pte); } static u_int32_t agp_g4x_read_gtt_pte(device_t dev, u_int index) { struct agp_i810_softc *sc; u_int32_t pte; sc = device_get_softc(dev); pte = bus_read_4(sc->sc_res[0], index * 4 + (2 * 1024 * 1024)); return (pte); } static vm_paddr_t agp_i810_read_gtt_pte_paddr(device_t dev, u_int index) { struct agp_i810_softc *sc; u_int32_t pte; vm_paddr_t res; sc = device_get_softc(dev); pte = sc->match->driver->read_gtt_pte(dev, index); res = pte & ~PAGE_MASK; return (res); } static vm_paddr_t agp_i915_read_gtt_pte_paddr(device_t dev, u_int index) { struct agp_i810_softc *sc; u_int32_t pte; vm_paddr_t res; sc = device_get_softc(dev); pte = sc->match->driver->read_gtt_pte(dev, index); res = (pte & ~PAGE_MASK) | ((pte & 0xf0) << 28); return (res); } /* * Writing via memory mapped registers already flushes all TLBs. */ static void agp_i810_flush_tlb(device_t dev) { } static int agp_i810_enable(device_t dev, u_int32_t mode) { return (0); } static struct agp_memory * agp_i810_alloc_memory(device_t dev, int type, vm_size_t size) { struct agp_i810_softc *sc; struct agp_memory *mem; vm_page_t m; sc = device_get_softc(dev); if ((size & (AGP_PAGE_SIZE - 1)) != 0 || sc->agp.as_allocated + size > sc->agp.as_maxmem) return (0); if (type == 1) { /* * Mapping local DRAM into GATT. */ if (sc->match->driver->chiptype != CHIP_I810) return (0); if (size != sc->dcache_size) return (0); } else if (type == 2) { /* * Type 2 is the contiguous physical memory type, that hands * back a physical address. This is used for cursors on i810. * Hand back as many single pages with physical as the user * wants, but only allow one larger allocation (ARGB cursor) * for simplicity. */ if (size != AGP_PAGE_SIZE) { if (sc->argb_cursor != NULL) return (0); /* Allocate memory for ARGB cursor, if we can. */ sc->argb_cursor = contigmalloc(size, M_AGP, 0, 0, ~0, PAGE_SIZE, 0); if (sc->argb_cursor == NULL) return (0); } } mem = malloc(sizeof *mem, M_AGP, M_WAITOK); mem->am_id = sc->agp.as_nextid++; mem->am_size = size; mem->am_type = type; if (type != 1 && (type != 2 || size == AGP_PAGE_SIZE)) mem->am_obj = vm_object_allocate(OBJT_SWAP, atop(round_page(size))); else mem->am_obj = 0; if (type == 2) { if (size == AGP_PAGE_SIZE) { /* * Allocate and wire down the page now so that we can * get its physical address. */ VM_OBJECT_WLOCK(mem->am_obj); m = vm_page_grab(mem->am_obj, 0, VM_ALLOC_NOBUSY | VM_ALLOC_WIRED | VM_ALLOC_ZERO); VM_OBJECT_WUNLOCK(mem->am_obj); mem->am_physical = VM_PAGE_TO_PHYS(m); } else { /* Our allocation is already nicely wired down for us. * Just grab the physical address. */ mem->am_physical = vtophys(sc->argb_cursor); } } else mem->am_physical = 0; mem->am_offset = 0; mem->am_is_bound = 0; TAILQ_INSERT_TAIL(&sc->agp.as_memory, mem, am_link); sc->agp.as_allocated += size; return (mem); } static int agp_i810_free_memory(device_t dev, struct agp_memory *mem) { struct agp_i810_softc *sc; vm_page_t m; if (mem->am_is_bound) return (EBUSY); sc = device_get_softc(dev); if (mem->am_type == 2) { if (mem->am_size == AGP_PAGE_SIZE) { /* * Unwire the page which we wired in alloc_memory. */ VM_OBJECT_WLOCK(mem->am_obj); m = vm_page_lookup(mem->am_obj, 0); vm_page_unwire(m, PQ_INACTIVE); VM_OBJECT_WUNLOCK(mem->am_obj); } else { contigfree(sc->argb_cursor, mem->am_size, M_AGP); sc->argb_cursor = NULL; } } sc->agp.as_allocated -= mem->am_size; TAILQ_REMOVE(&sc->agp.as_memory, mem, am_link); if (mem->am_obj) vm_object_deallocate(mem->am_obj); free(mem, M_AGP); return (0); } static int agp_i810_bind_memory(device_t dev, struct agp_memory *mem, vm_offset_t offset) { struct agp_i810_softc *sc; vm_offset_t i; /* Do some sanity checks first. */ if ((offset & (AGP_PAGE_SIZE - 1)) != 0 || offset + mem->am_size > AGP_GET_APERTURE(dev)) { device_printf(dev, "binding memory at bad offset %#x\n", (int)offset); return (EINVAL); } sc = device_get_softc(dev); if (mem->am_type == 2 && mem->am_size != AGP_PAGE_SIZE) { mtx_lock(&sc->agp.as_lock); if (mem->am_is_bound) { mtx_unlock(&sc->agp.as_lock); return (EINVAL); } /* The memory's already wired down, just stick it in the GTT. */ for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE) { sc->match->driver->install_gtt_pte(dev, (offset + i) >> AGP_PAGE_SHIFT, mem->am_physical + i, 0); } mem->am_offset = offset; mem->am_is_bound = 1; mtx_unlock(&sc->agp.as_lock); return (0); } if (mem->am_type != 1) return (agp_generic_bind_memory(dev, mem, offset)); /* * Mapping local DRAM into GATT. */ if (sc->match->driver->chiptype != CHIP_I810) return (EINVAL); for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE) bus_write_4(sc->sc_res[0], AGP_I810_GTT + (i >> AGP_PAGE_SHIFT) * 4, i | 3); return (0); } static int agp_i810_unbind_memory(device_t dev, struct agp_memory *mem) { struct agp_i810_softc *sc; vm_offset_t i; sc = device_get_softc(dev); if (mem->am_type == 2 && mem->am_size != AGP_PAGE_SIZE) { mtx_lock(&sc->agp.as_lock); if (!mem->am_is_bound) { mtx_unlock(&sc->agp.as_lock); return (EINVAL); } for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE) { sc->match->driver->install_gtt_pte(dev, (mem->am_offset + i) >> AGP_PAGE_SHIFT, 0, 0); } mem->am_is_bound = 0; mtx_unlock(&sc->agp.as_lock); return (0); } if (mem->am_type != 1) return (agp_generic_unbind_memory(dev, mem)); if (sc->match->driver->chiptype != CHIP_I810) return (EINVAL); for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE) { sc->match->driver->install_gtt_pte(dev, i >> AGP_PAGE_SHIFT, 0, 0); } return (0); } static device_method_t agp_i810_methods[] = { /* Device interface */ DEVMETHOD(device_identify, agp_i810_identify), DEVMETHOD(device_probe, agp_i810_probe), DEVMETHOD(device_attach, agp_i810_attach), DEVMETHOD(device_detach, agp_i810_detach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, agp_i810_resume), /* AGP interface */ DEVMETHOD(agp_get_aperture, agp_generic_get_aperture), DEVMETHOD(agp_set_aperture, agp_i810_method_set_aperture), DEVMETHOD(agp_bind_page, agp_i810_bind_page), DEVMETHOD(agp_unbind_page, agp_i810_unbind_page), DEVMETHOD(agp_flush_tlb, agp_i810_flush_tlb), DEVMETHOD(agp_enable, agp_i810_enable), DEVMETHOD(agp_alloc_memory, agp_i810_alloc_memory), DEVMETHOD(agp_free_memory, agp_i810_free_memory), DEVMETHOD(agp_bind_memory, agp_i810_bind_memory), DEVMETHOD(agp_unbind_memory, agp_i810_unbind_memory), DEVMETHOD(agp_chipset_flush, agp_intel_gtt_chipset_flush), { 0, 0 } }; static driver_t agp_i810_driver = { "agp", agp_i810_methods, sizeof(struct agp_i810_softc), }; DRIVER_MODULE(agp_i810, vgapci, agp_i810_driver, 0, 0); MODULE_DEPEND(agp_i810, agp, 1, 1, 1); MODULE_DEPEND(agp_i810, pci, 1, 1, 1); void agp_intel_gtt_clear_range(device_t dev, u_int first_entry, u_int num_entries) { struct agp_i810_softc *sc; u_int i; sc = device_get_softc(dev); for (i = 0; i < num_entries; i++) sc->match->driver->install_gtt_pte(dev, first_entry + i, VM_PAGE_TO_PHYS(bogus_page), 0); sc->match->driver->read_gtt_pte(dev, first_entry + num_entries - 1); } void agp_intel_gtt_insert_pages(device_t dev, u_int first_entry, u_int num_entries, vm_page_t *pages, u_int flags) { struct agp_i810_softc *sc; u_int i; sc = device_get_softc(dev); for (i = 0; i < num_entries; i++) { MPASS(pages[i]->valid == VM_PAGE_BITS_ALL); MPASS(pages[i]->ref_count > 0); sc->match->driver->install_gtt_pte(dev, first_entry + i, VM_PAGE_TO_PHYS(pages[i]), flags); } sc->match->driver->read_gtt_pte(dev, first_entry + num_entries - 1); } struct intel_gtt agp_intel_gtt_get(device_t dev) { struct agp_i810_softc *sc; struct intel_gtt res; sc = device_get_softc(dev); res.stolen_size = sc->stolen_size; res.gtt_total_entries = sc->gtt_total_entries; res.gtt_mappable_entries = sc->gtt_mappable_entries; res.do_idle_maps = 0; res.scratch_page_dma = VM_PAGE_TO_PHYS(bogus_page); if (sc->agp.as_aperture != NULL) res.gma_bus_addr = rman_get_start(sc->agp.as_aperture); else res.gma_bus_addr = 0; return (res); } static int agp_i810_chipset_flush_setup(device_t dev) { return (0); } static void agp_i810_chipset_flush_teardown(device_t dev) { /* Nothing to do. */ } static void agp_i810_chipset_flush(device_t dev) { /* Nothing to do. */ } static void agp_i830_chipset_flush(device_t dev) { struct agp_i810_softc *sc; uint32_t hic; int i; sc = device_get_softc(dev); pmap_invalidate_cache(); hic = bus_read_4(sc->sc_res[0], AGP_I830_HIC); bus_write_4(sc->sc_res[0], AGP_I830_HIC, hic | (1U << 31)); for (i = 0; i < 20000 /* 1 sec */; i++) { hic = bus_read_4(sc->sc_res[0], AGP_I830_HIC); if ((hic & (1U << 31)) == 0) break; DELAY(50); } } static int agp_i915_chipset_flush_alloc_page(device_t dev, uint64_t start, uint64_t end) { struct agp_i810_softc *sc; device_t vga; sc = device_get_softc(dev); vga = device_get_parent(dev); sc->sc_flush_page_rid = 100; sc->sc_flush_page_res = BUS_ALLOC_RESOURCE(device_get_parent(vga), dev, SYS_RES_MEMORY, &sc->sc_flush_page_rid, start, end, PAGE_SIZE, RF_ACTIVE); if (sc->sc_flush_page_res == NULL) { device_printf(dev, "Failed to allocate flush page at 0x%jx\n", (uintmax_t)start); return (EINVAL); } sc->sc_flush_page_vaddr = rman_get_virtual(sc->sc_flush_page_res); if (bootverbose) { device_printf(dev, "Allocated flush page phys 0x%jx virt %p\n", (uintmax_t)rman_get_start(sc->sc_flush_page_res), sc->sc_flush_page_vaddr); } return (0); } static void agp_i915_chipset_flush_free_page(device_t dev) { struct agp_i810_softc *sc; device_t vga; sc = device_get_softc(dev); vga = device_get_parent(dev); if (sc->sc_flush_page_res == NULL) return; - BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev, SYS_RES_MEMORY, - sc->sc_flush_page_rid, sc->sc_flush_page_res); + BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev, + sc->sc_flush_page_res); BUS_RELEASE_RESOURCE(device_get_parent(vga), dev, SYS_RES_MEMORY, sc->sc_flush_page_rid, sc->sc_flush_page_res); } static int agp_i915_chipset_flush_setup(device_t dev) { struct agp_i810_softc *sc; uint32_t temp; int error; sc = device_get_softc(dev); temp = pci_read_config(sc->bdev, AGP_I915_IFPADDR, 4); if ((temp & 1) != 0) { temp &= ~1; if (bootverbose) device_printf(dev, "Found already configured flush page at 0x%jx\n", (uintmax_t)temp); sc->sc_bios_allocated_flush_page = 1; /* * In the case BIOS initialized the flush pointer (?) * register, expect that BIOS also set up the resource * for the page. */ error = agp_i915_chipset_flush_alloc_page(dev, temp, temp + PAGE_SIZE - 1); if (error != 0) return (error); } else { sc->sc_bios_allocated_flush_page = 0; error = agp_i915_chipset_flush_alloc_page(dev, 0, 0xffffffff); if (error != 0) return (error); temp = rman_get_start(sc->sc_flush_page_res); pci_write_config(sc->bdev, AGP_I915_IFPADDR, temp | 1, 4); } return (0); } static void agp_i915_chipset_flush_teardown(device_t dev) { struct agp_i810_softc *sc; uint32_t temp; sc = device_get_softc(dev); if (sc->sc_flush_page_res == NULL) return; if (!sc->sc_bios_allocated_flush_page) { temp = pci_read_config(sc->bdev, AGP_I915_IFPADDR, 4); temp &= ~1; pci_write_config(sc->bdev, AGP_I915_IFPADDR, temp, 4); } agp_i915_chipset_flush_free_page(dev); } static int agp_i965_chipset_flush_setup(device_t dev) { struct agp_i810_softc *sc; uint64_t temp; uint32_t temp_hi, temp_lo; int error; sc = device_get_softc(dev); temp_hi = pci_read_config(sc->bdev, AGP_I965_IFPADDR + 4, 4); temp_lo = pci_read_config(sc->bdev, AGP_I965_IFPADDR, 4); if ((temp_lo & 1) != 0) { temp = ((uint64_t)temp_hi << 32) | (temp_lo & ~1); if (bootverbose) device_printf(dev, "Found already configured flush page at 0x%jx\n", (uintmax_t)temp); sc->sc_bios_allocated_flush_page = 1; /* * In the case BIOS initialized the flush pointer (?) * register, expect that BIOS also set up the resource * for the page. */ error = agp_i915_chipset_flush_alloc_page(dev, temp, temp + PAGE_SIZE - 1); if (error != 0) return (error); } else { sc->sc_bios_allocated_flush_page = 0; error = agp_i915_chipset_flush_alloc_page(dev, 0, ~0); if (error != 0) return (error); temp = rman_get_start(sc->sc_flush_page_res); pci_write_config(sc->bdev, AGP_I965_IFPADDR + 4, (temp >> 32) & UINT32_MAX, 4); pci_write_config(sc->bdev, AGP_I965_IFPADDR, (temp & UINT32_MAX) | 1, 4); } return (0); } static void agp_i965_chipset_flush_teardown(device_t dev) { struct agp_i810_softc *sc; uint32_t temp_lo; sc = device_get_softc(dev); if (sc->sc_flush_page_res == NULL) return; if (!sc->sc_bios_allocated_flush_page) { temp_lo = pci_read_config(sc->bdev, AGP_I965_IFPADDR, 4); temp_lo &= ~1; pci_write_config(sc->bdev, AGP_I965_IFPADDR, temp_lo, 4); } agp_i915_chipset_flush_free_page(dev); } static void agp_i915_chipset_flush(device_t dev) { struct agp_i810_softc *sc; sc = device_get_softc(dev); *(uint32_t *)sc->sc_flush_page_vaddr = 1; } int agp_intel_gtt_chipset_flush(device_t dev) { struct agp_i810_softc *sc; sc = device_get_softc(dev); sc->match->driver->chipset_flush(dev); return (0); } void agp_intel_gtt_unmap_memory(device_t dev, struct sglist *sg_list) { } int agp_intel_gtt_map_memory(device_t dev, vm_page_t *pages, u_int num_entries, struct sglist **sg_list) { #if 0 struct agp_i810_softc *sc; #endif struct sglist *sg; int i; #if 0 int error; bus_dma_tag_t dmat; #endif if (*sg_list != NULL) return (0); #if 0 sc = device_get_softc(dev); #endif sg = sglist_alloc(num_entries, M_WAITOK /* XXXKIB */); for (i = 0; i < num_entries; i++) { sg->sg_segs[i].ss_paddr = VM_PAGE_TO_PHYS(pages[i]); sg->sg_segs[i].ss_len = PAGE_SIZE; } #if 0 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1 /* alignment */, 0 /* boundary */, 1ULL << sc->match->busdma_addr_mask_sz /* lowaddr */, BUS_SPACE_MAXADDR /* highaddr */, NULL /* filtfunc */, NULL /* filtfuncarg */, BUS_SPACE_MAXADDR /* maxsize */, BUS_SPACE_UNRESTRICTED /* nsegments */, BUS_SPACE_MAXADDR /* maxsegsz */, 0 /* flags */, NULL /* lockfunc */, NULL /* lockfuncarg */, &dmat); if (error != 0) { sglist_free(sg); return (error); } /* XXXKIB */ #endif *sg_list = sg; return (0); } static void agp_intel_gtt_install_pte(device_t dev, u_int index, vm_paddr_t addr, u_int flags) { struct agp_i810_softc *sc; sc = device_get_softc(dev); sc->match->driver->install_gtt_pte(dev, index, addr, flags); } void agp_intel_gtt_insert_sg_entries(device_t dev, struct sglist *sg_list, u_int first_entry, u_int flags) { struct agp_i810_softc *sc; vm_paddr_t spaddr; size_t slen; u_int i, j; sc = device_get_softc(dev); for (i = j = 0; j < sg_list->sg_nseg; j++) { spaddr = sg_list->sg_segs[i].ss_paddr; slen = sg_list->sg_segs[i].ss_len; for (; slen > 0; i++) { sc->match->driver->install_gtt_pte(dev, first_entry + i, spaddr, flags); spaddr += AGP_PAGE_SIZE; slen -= AGP_PAGE_SIZE; } } sc->match->driver->read_gtt_pte(dev, first_entry + i - 1); } void intel_gtt_clear_range(u_int first_entry, u_int num_entries) { agp_intel_gtt_clear_range(intel_agp, first_entry, num_entries); } void intel_gtt_insert_pages(u_int first_entry, u_int num_entries, vm_page_t *pages, u_int flags) { agp_intel_gtt_insert_pages(intel_agp, first_entry, num_entries, pages, flags); } struct intel_gtt * intel_gtt_get(void) { intel_private.base = agp_intel_gtt_get(intel_agp); return (&intel_private.base); } int intel_gtt_chipset_flush(void) { return (agp_intel_gtt_chipset_flush(intel_agp)); } void intel_gtt_unmap_memory(struct sglist *sg_list) { agp_intel_gtt_unmap_memory(intel_agp, sg_list); } int intel_gtt_map_memory(vm_page_t *pages, u_int num_entries, struct sglist **sg_list) { return (agp_intel_gtt_map_memory(intel_agp, pages, num_entries, sg_list)); } void intel_gtt_insert_sg_entries(struct sglist *sg_list, u_int first_entry, u_int flags) { agp_intel_gtt_insert_sg_entries(intel_agp, sg_list, first_entry, flags); } void intel_gtt_install_pte(u_int index, vm_paddr_t addr, u_int flags) { agp_intel_gtt_install_pte(intel_agp, index, addr, flags); } device_t intel_gtt_get_bridge_device(void) { struct agp_i810_softc *sc; sc = device_get_softc(intel_agp); return (sc->bdev); } vm_paddr_t intel_gtt_read_pte_paddr(u_int entry) { struct agp_i810_softc *sc; sc = device_get_softc(intel_agp); return (sc->match->driver->read_gtt_pte_paddr(intel_agp, entry)); } u_int32_t intel_gtt_read_pte(u_int entry) { struct agp_i810_softc *sc; sc = device_get_softc(intel_agp); return (sc->match->driver->read_gtt_pte(intel_agp, entry)); } void intel_gtt_write(u_int entry, uint32_t val) { struct agp_i810_softc *sc; sc = device_get_softc(intel_agp); return (sc->match->driver->write_gtt(intel_agp, entry, val)); } diff --git a/sys/dev/bhnd/bhndb/bhndb.c b/sys/dev/bhnd/bhndb/bhndb.c index f8a1467894d1..5148c1c8452b 100644 --- a/sys/dev/bhnd/bhndb/bhndb.c +++ b/sys/dev/bhnd/bhndb/bhndb.c @@ -1,2309 +1,2301 @@ /*- * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include /* * Abstract BHND Bridge Device Driver * * Provides generic support for bridging from a parent bus (such as PCI) to * a BHND-compatible bus (e.g. bcma or siba). */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "bhnd_chipc_if.h" #include "bhnd_nvram_if.h" #include "bhndbvar.h" #include "bhndb_bus_if.h" #include "bhndb_hwdata.h" #include "bhndb_private.h" /* Debugging flags */ static u_long bhndb_debug = 0; TUNABLE_ULONG("hw.bhndb.debug", &bhndb_debug); enum { BHNDB_DEBUG_PRIO = 1 << 0, }; #define BHNDB_DEBUG(_type) (BHNDB_DEBUG_ ## _type & bhndb_debug) static bool bhndb_hw_matches(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw *hw); static int bhndb_init_region_cfg(struct bhndb_softc *sc, bhnd_erom_t *erom, struct bhndb_resources *r, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw_priority *table); static int bhndb_find_hwspec(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw **hw); bhndb_addrspace bhndb_get_addrspace(struct bhndb_softc *sc, device_t child); static struct rman *bhndb_get_rman(struct bhndb_softc *sc, device_t child, int type); static int bhndb_init_child_resource(struct resource *r, struct resource *parent, bhnd_size_t offset, bhnd_size_t size); static int bhndb_activate_static_region( struct bhndb_softc *sc, struct bhndb_region *region, - device_t child, int type, int rid, - struct resource *r); + device_t child, struct resource *r); static int bhndb_try_activate_resource( struct bhndb_softc *sc, device_t child, - int type, int rid, struct resource *r, - bool *indirect); + struct resource *r, bool *indirect); static inline struct bhndb_dw_alloc *bhndb_io_resource(struct bhndb_softc *sc, bus_addr_t addr, bus_size_t size, bus_size_t *offset, bool *stolen, bus_addr_t *restore); /** * Default bhndb(4) implementation of DEVICE_PROBE(). * * This function provides the default bhndb implementation of DEVICE_PROBE(), * and is compatible with bhndb(4) bridges attached via bhndb_attach_bridge(). */ int bhndb_generic_probe(device_t dev) { return (BUS_PROBE_NOWILDCARD); } static void bhndb_probe_nomatch(device_t dev, device_t child) { const char *name; name = device_get_name(child); if (name == NULL) name = "unknown device"; device_printf(dev, "<%s> (no driver attached)\n", name); } static int bhndb_print_child(device_t dev, device_t child) { struct resource_list *rl; int retval = 0; retval += bus_print_child_header(dev, child); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) { retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); } retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } static int bhndb_child_location(device_t dev, device_t child, struct sbuf *sb) { struct bhndb_softc *sc; sc = device_get_softc(dev); sbuf_printf(sb, "base=0x%llx", (unsigned long long) sc->chipid.enum_addr); return (0); } /** * Return true if @p cores matches the @p hw specification. * * @param sc BHNDB device state. * @param cores A device table to match against. * @param ncores The number of cores in @p cores. * @param hw The hardware description to be matched against. */ static bool bhndb_hw_matches(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw *hw) { for (u_int i = 0; i < hw->num_hw_reqs; i++) { const struct bhnd_core_match *match; bool found; match = &hw->hw_reqs[i]; found = false; for (u_int d = 0; d < ncores; d++) { struct bhnd_core_info *core = &cores[d]; if (BHNDB_IS_CORE_DISABLED(sc->dev, sc->bus_dev, core)) continue; if (!bhnd_core_matches(core, match)) continue; found = true; break; } if (!found) return (false); } return (true); } /** * Initialize the region maps and priority configuration in @p br using * the priority @p table and the set of cores enumerated by @p erom. * * @param sc The bhndb device state. * @param br The resource state to be configured. * @param erom EROM parser used to enumerate @p cores. * @param cores All cores enumerated on the bridged bhnd bus. * @param ncores The length of @p cores. * @param table Hardware priority table to be used to determine the relative * priorities of per-core port resources. */ static int bhndb_init_region_cfg(struct bhndb_softc *sc, bhnd_erom_t *erom, struct bhndb_resources *br, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw_priority *table) { const struct bhndb_hw_priority *hp; bhnd_addr_t addr; bhnd_size_t size; size_t prio_low, prio_default, prio_high; int error; /* The number of port regions per priority band that must be accessible * via dynamic register windows */ prio_low = 0; prio_default = 0; prio_high = 0; /* * Register bridge regions covering all statically mapped ports. */ for (u_int i = 0; i < ncores; i++) { const struct bhndb_regwin *regw; struct bhnd_core_info *core; struct bhnd_core_match md; core = &cores[i]; md = bhnd_core_get_match_desc(core); for (regw = br->cfg->register_windows; regw->win_type != BHNDB_REGWIN_T_INVALID; regw++) { const struct bhndb_port_priority *pp; uint32_t alloc_flags; /* Only core windows are supported */ if (regw->win_type != BHNDB_REGWIN_T_CORE) continue; /* Skip non-matching cores. */ if (!bhndb_regwin_match_core(regw, core)) continue; /* Fetch the base address of the mapped port */ error = bhnd_erom_lookup_core_addr(erom, &md, regw->d.core.port_type, regw->d.core.port, regw->d.core.region, NULL, &addr, &size); if (error) { /* Skip non-applicable register windows */ if (error == ENOENT) continue; return (error); } /* * Apply the register window's region offset, if any. */ if (regw->d.core.offset > size) { device_printf(sc->dev, "invalid register " "window offset %#jx for region %#jx+%#jx\n", regw->d.core.offset, addr, size); return (EINVAL); } addr += regw->d.core.offset; /* * Always defer to the register window's size. * * If the port size is smaller than the window size, * this ensures that we fully utilize register windows * larger than the referenced port. * * If the port size is larger than the window size, this * ensures that we do not directly map the allocations * within the region to a too-small window. */ size = regw->win_size; /* Fetch allocation flags from the corresponding port * priority entry, if any */ pp = bhndb_hw_priorty_find_port(table, core, regw->d.core.port_type, regw->d.core.port, regw->d.core.region); if (pp != NULL) { alloc_flags = pp->alloc_flags; } else { alloc_flags = 0; } /* * Add to the bus region list. * * The window priority for a statically mapped region is * always HIGH. */ error = bhndb_add_resource_region(br, addr, size, BHNDB_PRIORITY_HIGH, alloc_flags, regw); if (error) return (error); } } /* * Perform priority accounting and register bridge regions for all * ports defined in the priority table */ for (u_int i = 0; i < ncores; i++) { struct bhnd_core_info *core; struct bhnd_core_match md; core = &cores[i]; md = bhnd_core_get_match_desc(core); /* * Skip priority accounting for cores that ... */ /* ... do not require bridge resources */ if (BHNDB_IS_CORE_DISABLED(sc->dev, sc->bus_dev, core)) continue; /* ... do not have a priority table entry */ hp = bhndb_hw_priority_find_core(table, core); if (hp == NULL) continue; /* ... are explicitly disabled in the priority table. */ if (hp->priority == BHNDB_PRIORITY_NONE) continue; /* Determine the number of dynamic windows required and * register their bus_region entries. */ for (u_int i = 0; i < hp->num_ports; i++) { const struct bhndb_port_priority *pp; pp = &hp->ports[i]; /* Fetch the address+size of the mapped port. */ error = bhnd_erom_lookup_core_addr(erom, &md, pp->type, pp->port, pp->region, NULL, &addr, &size); if (error) { /* Skip ports not defined on this device */ if (error == ENOENT) continue; return (error); } /* Skip ports with an existing static mapping */ if (bhndb_has_static_region_mapping(br, addr, size)) continue; /* Define a dynamic region for this port */ error = bhndb_add_resource_region(br, addr, size, pp->priority, pp->alloc_flags, NULL); if (error) return (error); /* Update port mapping counts */ switch (pp->priority) { case BHNDB_PRIORITY_NONE: break; case BHNDB_PRIORITY_LOW: prio_low++; break; case BHNDB_PRIORITY_DEFAULT: prio_default++; break; case BHNDB_PRIORITY_HIGH: prio_high++; break; } } } /* Determine the minimum priority at which we'll allocate direct * register windows from our dynamic pool */ size_t prio_total = prio_low + prio_default + prio_high; if (prio_total <= br->dwa_count) { /* low+default+high priority regions get windows */ br->min_prio = BHNDB_PRIORITY_LOW; } else if (prio_default + prio_high <= br->dwa_count) { /* default+high priority regions get windows */ br->min_prio = BHNDB_PRIORITY_DEFAULT; } else { /* high priority regions get windows */ br->min_prio = BHNDB_PRIORITY_HIGH; } if (BHNDB_DEBUG(PRIO)) { struct bhndb_region *region; const char *direct_msg, *type_msg; bhndb_priority_t prio, prio_min; uint32_t flags; prio_min = br->min_prio; device_printf(sc->dev, "min_prio: %d\n", prio_min); STAILQ_FOREACH(region, &br->bus_regions, link) { prio = region->priority; flags = region->alloc_flags; direct_msg = prio >= prio_min ? "direct" : "indirect"; type_msg = region->static_regwin ? "static" : "dynamic"; device_printf(sc->dev, "region 0x%llx+0x%llx priority " "%u %s/%s", (unsigned long long) region->addr, (unsigned long long) region->size, region->priority, direct_msg, type_msg); if (flags & BHNDB_ALLOC_FULFILL_ON_OVERCOMMIT) printf(" [overcommit]\n"); else printf("\n"); } } return (0); } /** * Find a hardware specification for @p dev. * * @param sc The bhndb device state. * @param cores All cores enumerated on the bridged bhnd bus. * @param ncores The length of @p cores. * @param[out] hw On success, the matched hardware specification. * with @p dev. * * @retval 0 success * @retval non-zero if an error occurs fetching device info for comparison. */ static int bhndb_find_hwspec(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw **hw) { const struct bhndb_hw *next, *hw_table; /* Search for the first matching hardware config. */ hw_table = BHNDB_BUS_GET_HARDWARE_TABLE(sc->parent_dev, sc->dev); for (next = hw_table; next->hw_reqs != NULL; next++) { if (!bhndb_hw_matches(sc, cores, ncores, next)) continue; /* Found */ *hw = next; return (0); } return (ENOENT); } /** * Helper function that must be called by subclass bhndb(4) drivers * when implementing DEVICE_ATTACH() before calling any bhnd(4) or bhndb(4) * APIs on the bridge device. * * This function will add a bridged bhnd(4) child device with a device order of * BHND_PROBE_BUS. Any subclass bhndb(4) driver may use the BHND_PROBE_* * priority bands to add additional devices that will be attached in * their preferred order relative to the bridged bhnd(4) bus. * * @param dev The bridge device to attach. * @param cid The bridged device's chip identification. * @param cores The bridged device's core table. * @param ncores The number of cores in @p cores. * @param bridge_core Core info for the bhnd(4) core serving as the host * bridge. * @param erom_class An erom parser class that may be used to parse * the bridged device's device enumeration table. */ int bhndb_attach(device_t dev, struct bhnd_chipid *cid, struct bhnd_core_info *cores, u_int ncores, struct bhnd_core_info *bridge_core, bhnd_erom_class_t *erom_class) { struct bhndb_devinfo *dinfo; struct bhndb_softc *sc; const struct bhndb_hw *hw; const struct bhndb_hwcfg *hwcfg; const struct bhndb_hw_priority *hwprio; struct bhnd_erom_io *eio; bhnd_erom_t *erom; int error; sc = device_get_softc(dev); sc->dev = dev; sc->parent_dev = device_get_parent(dev); sc->bridge_core = *bridge_core; sc->chipid = *cid; if ((error = bhnd_service_registry_init(&sc->services))) return (error); BHNDB_LOCK_INIT(sc); erom = NULL; /* Find a matching bridge hardware configuration */ if ((error = bhndb_find_hwspec(sc, cores, ncores, &hw))) { device_printf(sc->dev, "unable to identify device, " " using generic bridge resource definitions\n"); hwcfg = BHNDB_BUS_GET_GENERIC_HWCFG(sc->parent_dev, dev); hw = NULL; } else { hwcfg = hw->cfg; } if (hw != NULL && (bootverbose || BHNDB_DEBUG(PRIO))) { device_printf(sc->dev, "%s resource configuration\n", hw->name); } /* Allocate bridge resource state using the discovered hardware * configuration */ sc->bus_res = bhndb_alloc_resources(sc->dev, sc->parent_dev, hwcfg); if (sc->bus_res == NULL) { device_printf(sc->dev, "failed to allocate bridge resource " "state\n"); error = ENOMEM; goto failed; } /* Add our bridged bus device */ sc->bus_dev = BUS_ADD_CHILD(dev, BHND_PROBE_BUS, "bhnd", -1); if (sc->bus_dev == NULL) { error = ENXIO; goto failed; } dinfo = device_get_ivars(sc->bus_dev); dinfo->addrspace = BHNDB_ADDRSPACE_BRIDGED; /* We can now use bhndb to perform bridging of SYS_RES_MEMORY resources; * we use this to instantiate an erom parser instance */ eio = bhnd_erom_iores_new(sc->bus_dev, 0); if ((erom = bhnd_erom_alloc(erom_class, cid, eio)) == NULL) { bhnd_erom_io_fini(eio); error = ENXIO; goto failed; } /* Populate our resource priority configuration */ hwprio = BHNDB_BUS_GET_HARDWARE_PRIO(sc->parent_dev, sc->dev); error = bhndb_init_region_cfg(sc, erom, sc->bus_res, cores, ncores, hwprio); if (error) { device_printf(sc->dev, "failed to initialize resource " "priority configuration: %d\n", error); goto failed; } /* Free our erom instance */ bhnd_erom_free(erom); erom = NULL; return (0); failed: BHNDB_LOCK_DESTROY(sc); if (sc->bus_res != NULL) bhndb_free_resources(sc->bus_res); if (erom != NULL) bhnd_erom_free(erom); bhnd_service_registry_fini(&sc->services); return (error); } /** * Default bhndb(4) implementation of DEVICE_DETACH(). * * This function detaches any child devices, and if successful, releases all * resources held by the bridge device. */ int bhndb_generic_detach(device_t dev) { struct bhndb_softc *sc; int error; sc = device_get_softc(dev); /* Detach children */ if ((error = bus_generic_detach(dev))) return (error); /* Delete children */ if ((error = device_delete_children(dev))) return (error); /* Clean up our service registry */ if ((error = bhnd_service_registry_fini(&sc->services))) return (error); /* Clean up our driver state. */ bhndb_free_resources(sc->bus_res); BHNDB_LOCK_DESTROY(sc); return (0); } /** * Default bhndb(4) implementation of DEVICE_SUSPEND(). * * This function calls bus_generic_suspend() (or implements equivalent * behavior). */ int bhndb_generic_suspend(device_t dev) { return (bus_generic_suspend(dev)); } /** * Default bhndb(4) implementation of DEVICE_RESUME(). * * This function calls bus_generic_resume() (or implements equivalent * behavior). */ int bhndb_generic_resume(device_t dev) { struct bhndb_softc *sc; struct bhndb_resources *bus_res; struct bhndb_dw_alloc *dwa; int error; sc = device_get_softc(dev); bus_res = sc->bus_res; /* Guarantee that all in-use dynamic register windows are mapped to * their previously configured target address. */ BHNDB_LOCK(sc); error = 0; for (size_t i = 0; i < bus_res->dwa_count; i++) { dwa = &bus_res->dw_alloc[i]; /* Skip regions that were not previously used */ if (bhndb_dw_is_free(bus_res, dwa) && dwa->target == 0x0) continue; /* Otherwise, ensure the register window is correct before * any children attempt MMIO */ error = BHNDB_SET_WINDOW_ADDR(dev, dwa->win, dwa->target); if (error) break; } BHNDB_UNLOCK(sc); /* Error restoring hardware state; children cannot be safely resumed */ if (error) { device_printf(dev, "Unable to restore hardware configuration; " "cannot resume: %d\n", error); return (error); } return (bus_generic_resume(dev)); } /** * Default implementation of BHNDB_SUSPEND_RESOURCE. */ static void bhndb_suspend_resource(device_t dev, device_t child, int type, struct resource *r) { struct bhndb_softc *sc; struct bhndb_dw_alloc *dwa; sc = device_get_softc(dev); /* Non-MMIO resources (e.g. IRQs) are handled solely by our parent */ if (type != SYS_RES_MEMORY) return; BHNDB_LOCK(sc); dwa = bhndb_dw_find_resource(sc->bus_res, r); if (dwa == NULL) { BHNDB_UNLOCK(sc); return; } if (BHNDB_DEBUG(PRIO)) device_printf(child, "suspend resource type=%d 0x%jx+0x%jx\n", type, rman_get_start(r), rman_get_size(r)); /* Release the resource's window reference */ bhndb_dw_release(sc->bus_res, dwa, r); BHNDB_UNLOCK(sc); } /** * Default implementation of BHNDB_RESUME_RESOURCE. */ static int bhndb_resume_resource(device_t dev, device_t child, int type, struct resource *r) { struct bhndb_softc *sc; sc = device_get_softc(dev); /* Non-MMIO resources (e.g. IRQs) are handled solely by our parent */ if (type != SYS_RES_MEMORY) return (0); /* Inactive resources don't require reallocation of bridge resources */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (0); if (BHNDB_DEBUG(PRIO)) device_printf(child, "resume resource type=%d 0x%jx+0x%jx\n", type, rman_get_start(r), rman_get_size(r)); - return (bhndb_try_activate_resource(sc, rman_get_device(r), type, - rman_get_rid(r), r, NULL)); + return (bhndb_try_activate_resource(sc, rman_get_device(r), r, NULL)); } /** * Default bhndb(4) implementation of BUS_READ_IVAR(). */ static int bhndb_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { return (ENOENT); } /** * Default bhndb(4) implementation of BUS_WRITE_IVAR(). */ static int bhndb_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { return (ENOENT); } /** * Return the address space for the given @p child device. */ bhndb_addrspace bhndb_get_addrspace(struct bhndb_softc *sc, device_t child) { struct bhndb_devinfo *dinfo; device_t imd_dev; /* Find the directly attached parent of the requesting device */ imd_dev = child; while (imd_dev != NULL && device_get_parent(imd_dev) != sc->dev) imd_dev = device_get_parent(imd_dev); if (imd_dev == NULL) panic("bhndb address space request for non-child device %s\n", device_get_nameunit(child)); dinfo = device_get_ivars(imd_dev); return (dinfo->addrspace); } /** * Return the rman instance for a given resource @p type, if any. * * @param sc The bhndb device state. * @param child The requesting child. * @param type The resource type (e.g. SYS_RES_MEMORY, SYS_RES_IRQ, ...) */ static struct rman * bhndb_get_rman(struct bhndb_softc *sc, device_t child, int type) { switch (bhndb_get_addrspace(sc, child)) { case BHNDB_ADDRSPACE_NATIVE: switch (type) { case SYS_RES_MEMORY: return (&sc->bus_res->ht_mem_rman); case SYS_RES_IRQ: return (NULL); default: return (NULL); } case BHNDB_ADDRSPACE_BRIDGED: switch (type) { case SYS_RES_MEMORY: return (&sc->bus_res->br_mem_rman); case SYS_RES_IRQ: return (&sc->bus_res->br_irq_rman); default: return (NULL); } } /* Quieten gcc */ return (NULL); } /** * Default implementation of BUS_ADD_CHILD() */ static device_t bhndb_add_child(device_t dev, u_int order, const char *name, int unit) { struct bhndb_devinfo *dinfo; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (NULL); dinfo = malloc(sizeof(struct bhndb_devinfo), M_BHND, M_NOWAIT); if (dinfo == NULL) { device_delete_child(dev, child); return (NULL); } dinfo->addrspace = BHNDB_ADDRSPACE_NATIVE; resource_list_init(&dinfo->resources); device_set_ivars(child, dinfo); return (child); } /** * Default implementation of BUS_CHILD_DELETED(). */ static void bhndb_child_deleted(device_t dev, device_t child) { struct bhndb_devinfo *dinfo = device_get_ivars(child); if (dinfo != NULL) { resource_list_free(&dinfo->resources); free(dinfo, M_BHND); } device_set_ivars(child, NULL); } /** * Default implementation of BHNDB_GET_CHIPID(). */ static const struct bhnd_chipid * bhndb_get_chipid(device_t dev, device_t child) { struct bhndb_softc *sc = device_get_softc(dev); return (&sc->chipid); } /** * Default implementation of BHNDB_IS_CORE_DISABLED(). */ static bool bhndb_is_core_disabled(device_t dev, device_t child, struct bhnd_core_info *core) { struct bhndb_softc *sc; sc = device_get_softc(dev); /* Try to defer to the bhndb bus parent */ if (BHNDB_BUS_IS_CORE_DISABLED(sc->parent_dev, dev, core)) return (true); /* Otherwise, we treat bridge-capable cores as unpopulated if they're * not the configured host bridge */ if (BHND_DEVCLASS_SUPPORTS_HOSTB(bhnd_core_class(core))) return (!bhnd_cores_equal(core, &sc->bridge_core)); /* Assume the core is populated */ return (false); } /** * Default bhndb(4) implementation of BHNDB_GET_HOSTB_CORE(). * * This function uses a heuristic valid on all known PCI/PCIe/PCMCIA-bridged * bhnd(4) devices. */ static int bhndb_get_hostb_core(device_t dev, device_t child, struct bhnd_core_info *core) { struct bhndb_softc *sc = device_get_softc(dev); *core = sc->bridge_core; return (0); } /** * Default bhndb(4) implementation of BHND_BUS_GET_SERVICE_REGISTRY(). */ static struct bhnd_service_registry * bhndb_get_service_registry(device_t dev, device_t child) { struct bhndb_softc *sc = device_get_softc(dev); return (&sc->services); } /** * Default bhndb(4) implementation of BUS_ALLOC_RESOURCE(). */ static struct resource * bhndb_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct bhndb_softc *sc; struct resource_list_entry *rle; struct resource *rv; struct rman *rm; int error; bool passthrough, isdefault; sc = device_get_softc(dev); passthrough = (device_get_parent(child) != dev); isdefault = RMAN_IS_DEFAULT_RANGE(start, end); rle = NULL; /* Fetch the resource manager */ rm = bhndb_get_rman(sc, child, type); if (rm == NULL) { /* Delegate to our parent device's bus; the requested * resource type isn't handled locally. */ return (BUS_ALLOC_RESOURCE(device_get_parent(sc->parent_dev), child, type, rid, start, end, count, flags)); } /* Populate defaults */ if (!passthrough && isdefault) { /* Fetch the resource list entry. */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, *rid); if (rle == NULL) { device_printf(dev, "default resource %#x type %d for child %s " "not found\n", *rid, type, device_get_nameunit(child)); return (NULL); } if (rle->res != NULL) { device_printf(dev, "resource entry %#x type %d for child %s is busy\n", *rid, type, device_get_nameunit(child)); return (NULL); } start = rle->start; end = rle->end; count = ulmax(count, rle->count); } /* Validate resource addresses */ if (start > end || count > ((end - start) + 1)) return (NULL); /* Make our reservation */ rv = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); rman_set_type(rv, type); /* Activate */ if (flags & RF_ACTIVE) { error = bus_activate_resource(child, type, *rid, rv); if (error) { device_printf(dev, "failed to activate entry %#x type %d for " "child %s: %d\n", *rid, type, device_get_nameunit(child), error); rman_release_resource(rv); return (NULL); } } /* Update child's resource list entry */ if (rle != NULL) { rle->res = rv; rle->start = rman_get_start(rv); rle->end = rman_get_end(rv); rle->count = rman_get_size(rv); } return (rv); } /** * Default bhndb(4) implementation of BUS_RELEASE_RESOURCE(). */ static int bhndb_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct bhndb_softc *sc; struct resource_list_entry *rle; bool passthrough; int error; sc = device_get_softc(dev); passthrough = (device_get_parent(child) != dev); /* Delegate to our parent device's bus if the requested resource type * isn't handled locally. */ if (bhndb_get_rman(sc, child, type) == NULL) { return (BUS_RELEASE_RESOURCE(device_get_parent(sc->parent_dev), child, type, rid, r)); } /* Deactivate resources */ if (rman_get_flags(r) & RF_ACTIVE) { - error = BUS_DEACTIVATE_RESOURCE(dev, child, type, rid, r); + error = BUS_DEACTIVATE_RESOURCE(dev, child, r); if (error) return (error); } if ((error = rman_release_resource(r))) return (error); if (!passthrough) { /* Clean resource list entry */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, rid); if (rle != NULL) rle->res = NULL; } return (0); } /** * Default bhndb(4) implementation of BUS_ADJUST_RESOURCE(). */ static int bhndb_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct bhndb_softc *sc; struct rman *rm; rman_res_t mstart, mend; int error; sc = device_get_softc(dev); error = 0; /* Delegate to our parent device's bus if the requested resource type * isn't handled locally. */ rm = bhndb_get_rman(sc, child, rman_get_type(r)); if (rm == NULL) { return (BUS_ADJUST_RESOURCE(device_get_parent(sc->parent_dev), child, r, start, end)); } /* Verify basic constraints */ if (end <= start) return (EINVAL); if (!rman_is_region_manager(r, rm)) return (ENXIO); BHNDB_LOCK(sc); /* If not active, allow any range permitted by the resource manager */ if (!(rman_get_flags(r) & RF_ACTIVE)) goto done; /* Otherwise, the range is limited by the bridged resource mapping */ error = bhndb_find_resource_limits(sc->bus_res, r, &mstart, &mend); if (error) goto done; if (start < mstart || end > mend) { error = EINVAL; goto done; } /* Fall through */ done: if (!error) error = rman_adjust_resource(r, start, end); BHNDB_UNLOCK(sc); return (error); } /** * Initialize child resource @p r with a virtual address, tag, and handle * copied from @p parent, adjusted to contain only the range defined by * @p offsize and @p size. * * @param r The register to be initialized. * @param parent The parent bus resource that fully contains the subregion. * @param offset The subregion offset within @p parent. * @param size The subregion size. * @p r. */ static int bhndb_init_child_resource(struct resource *r, struct resource *parent, bhnd_size_t offset, bhnd_size_t size) { bus_space_handle_t bh, child_bh; bus_space_tag_t bt; uintptr_t vaddr; int error; /* Fetch the parent resource's real bus values */ vaddr = (uintptr_t) rman_get_virtual(parent); bt = rman_get_bustag(parent); bh = rman_get_bushandle(parent); /* Configure child resource with window-adjusted real bus values */ vaddr += offset; error = bus_space_subregion(bt, bh, offset, size, &child_bh); if (error) return (error); rman_set_virtual(r, (void *) vaddr); rman_set_bustag(r, bt); rman_set_bushandle(r, child_bh); return (0); } /** * Attempt activation of a fixed register window mapping for @p child. * * @param sc BHNDB device state. * @param region The static region definition capable of mapping @p r. * @param child A child requesting resource activation. * @param type Resource type. * @param rid Resource identifier. * @param r Resource to be activated. * * @retval 0 if @p r was activated successfully * @retval ENOENT if no fixed register window was found. * @retval non-zero if @p r could not be activated. */ static int bhndb_activate_static_region(struct bhndb_softc *sc, - struct bhndb_region *region, device_t child, int type, int rid, - struct resource *r) + struct bhndb_region *region, device_t child, struct resource *r) { struct resource *bridge_res; const struct bhndb_regwin *win; bhnd_size_t parent_offset; rman_res_t r_start, r_size; int error; win = region->static_regwin; KASSERT(win != NULL && BHNDB_REGWIN_T_IS_STATIC(win->win_type), ("can't activate non-static region")); r_start = rman_get_start(r); r_size = rman_get_size(r); /* Find the corresponding bridge resource */ bridge_res = bhndb_host_resource_for_regwin(sc->bus_res->res, win); if (bridge_res == NULL) return (ENXIO); /* Calculate subregion offset within the parent resource */ parent_offset = r_start - region->addr; parent_offset += win->win_offset; /* Configure resource with its real bus values. */ error = bhndb_init_child_resource(r, bridge_res, parent_offset, r_size); if (error) return (error); /* Mark active */ if ((error = rman_activate_resource(r))) return (error); return (0); } /** * Attempt to allocate/retain a dynamic register window for @p r, returning * the retained window. * * @param sc The bhndb driver state. * @param r The resource for which a window will be retained. */ static struct bhndb_dw_alloc * bhndb_retain_dynamic_window(struct bhndb_softc *sc, struct resource *r) { struct bhndb_dw_alloc *dwa; rman_res_t r_start, r_size; int error; BHNDB_LOCK_ASSERT(sc, MA_OWNED); r_start = rman_get_start(r); r_size = rman_get_size(r); /* Look for an existing dynamic window we can reference */ dwa = bhndb_dw_find_mapping(sc->bus_res, r_start, r_size); if (dwa != NULL) { if (bhndb_dw_retain(sc->bus_res, dwa, r) == 0) return (dwa); return (NULL); } /* Otherwise, try to reserve a free window */ dwa = bhndb_dw_next_free(sc->bus_res); if (dwa == NULL) { /* No free windows */ return (NULL); } /* Window must be large enough to map the entire resource */ if (dwa->win->win_size < rman_get_size(r)) return (NULL); /* Set the window target */ error = bhndb_dw_set_addr(sc->dev, sc->bus_res, dwa, rman_get_start(r), rman_get_size(r)); if (error) { device_printf(sc->dev, "dynamic window initialization " "for 0x%llx-0x%llx failed: %d\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1, error); return (NULL); } /* Add our reservation */ if (bhndb_dw_retain(sc->bus_res, dwa, r)) return (NULL); return (dwa); } /** * Activate a resource using any viable static or dynamic register window. * * @param sc The bhndb driver state. * @param child The child holding ownership of @p r. - * @param type The type of the resource to be activated. - * @param rid The resource ID of @p r. * @param r The resource to be activated * @param[out] indirect On error and if not NULL, will be set to 'true' if * the caller should instead use an indirect resource mapping. * * @retval 0 success * @retval non-zero activation failed. */ static int -bhndb_try_activate_resource(struct bhndb_softc *sc, device_t child, int type, - int rid, struct resource *r, bool *indirect) +bhndb_try_activate_resource(struct bhndb_softc *sc, device_t child, + struct resource *r, bool *indirect) { struct bhndb_region *region; struct bhndb_dw_alloc *dwa; bhndb_priority_t dw_priority; rman_res_t r_start, r_size; rman_res_t parent_offset; - int error; + int error, type; BHNDB_LOCK_ASSERT(sc, MA_NOTOWNED); if (indirect != NULL) *indirect = false; + type = rman_get_type(r); switch (type) { case SYS_RES_IRQ: /* IRQ resources are always directly mapped */ return (rman_activate_resource(r)); case SYS_RES_MEMORY: /* Handled below */ break; default: device_printf(sc->dev, "unsupported resource type %d\n", type); return (ENXIO); } /* Only MMIO resources can be mapped via register windows */ KASSERT(type == SYS_RES_MEMORY, ("invalid type: %d", type)); r_start = rman_get_start(r); r_size = rman_get_size(r); /* Activate native addrspace resources using the host address space */ if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_NATIVE) { struct resource *parent; /* Find the bridge resource referenced by the child */ parent = bhndb_host_resource_for_range(sc->bus_res->res, type, r_start, r_size); if (parent == NULL) { device_printf(sc->dev, "host resource not found " "for 0x%llx-0x%llx\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1); return (ENOENT); } /* Initialize child resource with the real bus values */ error = bhndb_init_child_resource(r, parent, r_start - rman_get_start(parent), r_size); if (error) return (error); /* Try to activate child resource */ return (rman_activate_resource(r)); } /* Default to low priority */ dw_priority = BHNDB_PRIORITY_LOW; /* Look for a bus region matching the resource's address range */ region = bhndb_find_resource_region(sc->bus_res, r_start, r_size); if (region != NULL) dw_priority = region->priority; /* Prefer static mappings over consuming a dynamic windows. */ if (region && region->static_regwin) { - error = bhndb_activate_static_region(sc, region, child, type, - rid, r); + error = bhndb_activate_static_region(sc, region, child, r); if (error) device_printf(sc->dev, "static window allocation " "for 0x%llx-0x%llx failed\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1); return (error); } /* A dynamic window will be required; is this resource high enough * priority to be reserved a dynamic window? */ if (dw_priority < sc->bus_res->min_prio) { if (indirect) *indirect = true; return (ENOMEM); } /* Find and retain a usable window */ BHNDB_LOCK(sc); { dwa = bhndb_retain_dynamic_window(sc, r); } BHNDB_UNLOCK(sc); if (dwa == NULL) { if (indirect) *indirect = true; return (ENOMEM); } /* Configure resource with its real bus values. */ parent_offset = dwa->win->win_offset; parent_offset += r_start - dwa->target; error = bhndb_init_child_resource(r, dwa->parent_res, parent_offset, dwa->win->win_size); if (error) goto failed; /* Mark active */ if ((error = rman_activate_resource(r))) goto failed; return (0); failed: /* Release our region allocation. */ BHNDB_LOCK(sc); bhndb_dw_release(sc->bus_res, dwa, r); BHNDB_UNLOCK(sc); return (error); } /** * Default bhndb(4) implementation of BUS_ACTIVATE_RESOURCE(). */ static int -bhndb_activate_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +bhndb_activate_resource(device_t dev, device_t child, struct resource *r) { struct bhndb_softc *sc = device_get_softc(dev); /* Delegate directly to our parent device's bus if the requested * resource type isn't handled locally. */ - if (bhndb_get_rman(sc, child, type) == NULL) { + if (bhndb_get_rman(sc, child, rman_get_type(r)) == NULL) { return (BUS_ACTIVATE_RESOURCE(device_get_parent(sc->parent_dev), - child, type, rid, r)); + child, r)); } - return (bhndb_try_activate_resource(sc, child, type, rid, r, NULL)); + return (bhndb_try_activate_resource(sc, child, r, NULL)); } /** * Default bhndb(4) implementation of BUS_DEACTIVATE_RESOURCE(). */ static int -bhndb_deactivate_resource(device_t dev, device_t child, int type, - int rid, struct resource *r) +bhndb_deactivate_resource(device_t dev, device_t child, struct resource *r) { struct bhndb_dw_alloc *dwa; struct bhndb_softc *sc; struct rman *rm; - int error; + int error, type; sc = device_get_softc(dev); + type = rman_get_type(r); /* Delegate directly to our parent device's bus if the requested * resource type isn't handled locally. */ rm = bhndb_get_rman(sc, child, type); if (rm == NULL) { return (BUS_DEACTIVATE_RESOURCE( - device_get_parent(sc->parent_dev), child, type, rid, r)); + device_get_parent(sc->parent_dev), child, r)); } /* Mark inactive */ if ((error = rman_deactivate_resource(r))) return (error); switch (type) { case SYS_RES_IRQ: /* No bridge-level state to be freed */ return (0); case SYS_RES_MEMORY: /* Free any dynamic window allocation. */ if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) { BHNDB_LOCK(sc); dwa = bhndb_dw_find_resource(sc->bus_res, r); if (dwa != NULL) bhndb_dw_release(sc->bus_res, dwa, r); BHNDB_UNLOCK(sc); } return (0); default: device_printf(dev, "unsupported resource type %d\n", type); return (ENXIO); } } /** * Default bhndb(4) implementation of BUS_GET_RESOURCE_LIST(). */ static struct resource_list * bhndb_get_resource_list(device_t dev, device_t child) { struct bhndb_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } /** * Default bhndb(4) implementation of BHND_BUS_ACTIVATE_RESOURCE(). * * For BHNDB_ADDRSPACE_NATIVE children, all resources are activated as direct * resources via BUS_ACTIVATE_RESOURCE(). * * For BHNDB_ADDRSPACE_BRIDGED children, the resource priority is determined, * and if possible, the resource is activated as a direct resource. For example, * depending on resource priority and bridge resource availability, this * function will attempt to activate SYS_RES_MEMORY resources using either a * static register window, a dynamic register window, or it will configure @p r * as an indirect resource -- in that order. */ static int bhndb_activate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { struct bhndb_softc *sc; struct bhndb_region *region; bhndb_priority_t r_prio; rman_res_t r_start, r_size; int error; bool indirect; KASSERT(!r->direct, ("direct flag set on inactive resource")); KASSERT(!(rman_get_flags(r->res) & RF_ACTIVE), ("RF_ACTIVE set on inactive resource")); sc = device_get_softc(dev); /* Delegate directly to BUS_ACTIVATE_RESOURCE() if the requested * resource type isn't handled locally. */ if (bhndb_get_rman(sc, child, type) == NULL) { - error = BUS_ACTIVATE_RESOURCE(dev, child, type, rid, r->res); + error = BUS_ACTIVATE_RESOURCE(dev, child, r->res); if (error == 0) r->direct = true; return (error); } r_start = rman_get_start(r->res); r_size = rman_get_size(r->res); /* Determine the resource priority of bridged resources, and skip direct * allocation if the priority is too low. */ if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) { switch (type) { case SYS_RES_IRQ: /* IRQ resources are always direct */ break; case SYS_RES_MEMORY: region = bhndb_find_resource_region(sc->bus_res, r_start, r_size); if (region != NULL) r_prio = region->priority; else r_prio = BHNDB_PRIORITY_NONE; /* If less than the minimum dynamic window priority, * this resource should always be indirect. */ if (r_prio < sc->bus_res->min_prio) return (0); break; default: device_printf(dev, "unsupported resource type %d\n", type); return (ENXIO); } } /* Attempt direct activation */ - error = bhndb_try_activate_resource(sc, child, type, rid, r->res, - &indirect); + error = bhndb_try_activate_resource(sc, child, r->res, &indirect); if (!error) { r->direct = true; } else if (indirect) { /* The request was valid, but no viable register window is * available; indirection must be employed. */ error = 0; r->direct = false; } if (BHNDB_DEBUG(PRIO) && bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) { device_printf(child, "activated 0x%llx-0x%llx as %s " "resource\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1, r->direct ? "direct" : "indirect"); } return (error); } /** * Default bhndb(4) implementation of BHND_BUS_DEACTIVATE_RESOURCE(). */ static int bhndb_deactivate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { int error; /* Indirect resources don't require activation */ if (!r->direct) return (0); KASSERT(rman_get_flags(r->res) & RF_ACTIVE, ("RF_ACTIVE not set on direct resource")); /* Perform deactivation */ - error = BUS_DEACTIVATE_RESOURCE(dev, child, type, rid, r->res); + error = BUS_DEACTIVATE_RESOURCE(dev, child, r->res); if (!error) r->direct = false; return (error); } /** * Find the best available bridge resource allocation record capable of handling * bus I/O requests of @p size at @p addr. * * In order of preference, this function will either: * * - Configure and return a free allocation record * - Return an existing allocation record mapping the requested space, or * - Steal, configure, and return an in-use allocation record. * * Will panic if a usable record cannot be found. * * @param sc Bridge driver state. * @param addr The I/O target address. * @param size The size of the I/O operation to be performed at @p addr. * @param[out] borrowed Set to true if the allocation record was borrowed to * fulfill this request; the borrowed record maps the target address range, * and must not be modified. * @param[out] stolen Set to true if the allocation record was stolen to fulfill * this request. If a stolen allocation record is returned, * bhndb_io_resource_restore() must be called upon completion of the bus I/O * request. * @param[out] restore If the allocation record was stolen, this will be set * to the target that must be restored. */ static struct bhndb_dw_alloc * bhndb_io_resource_get_window(struct bhndb_softc *sc, bus_addr_t addr, bus_size_t size, bool *borrowed, bool *stolen, bus_addr_t *restore) { struct bhndb_resources *br; struct bhndb_dw_alloc *dwa; struct bhndb_region *region; BHNDB_LOCK_ASSERT(sc, MA_OWNED); br = sc->bus_res; *borrowed = false; *stolen = false; /* Try to fetch a free window */ if ((dwa = bhndb_dw_next_free(br)) != NULL) return (dwa); /* Search for an existing dynamic mapping of this address range. * Static regions are not searched, as a statically mapped * region would never be allocated as an indirect resource. */ for (size_t i = 0; i < br->dwa_count; i++) { const struct bhndb_regwin *win; dwa = &br->dw_alloc[i]; win = dwa->win; KASSERT(win->win_type == BHNDB_REGWIN_T_DYN, ("invalid register window type")); /* Verify the range */ if (addr < dwa->target) continue; if (addr + size > dwa->target + win->win_size) continue; /* Found */ *borrowed = true; return (dwa); } /* Try to steal a window; this should only be required on very early * PCI_V0 (BCM4318, etc) Wi-Fi chipsets */ region = bhndb_find_resource_region(br, addr, size); if (region == NULL) return (NULL); if ((region->alloc_flags & BHNDB_ALLOC_FULFILL_ON_OVERCOMMIT) == 0) return (NULL); /* Steal a window. This acquires our backing spinlock, disabling * interrupts; the spinlock will be released by * bhndb_dw_return_stolen() */ if ((dwa = bhndb_dw_steal(br, restore)) != NULL) { *stolen = true; return (dwa); } panic("register windows exhausted attempting to map 0x%llx-0x%llx\n", (unsigned long long) addr, (unsigned long long) addr+size-1); } /** * Return a borrowed reference to a bridge resource allocation record capable * of handling bus I/O requests of @p size at @p addr. * * This will either return a reference to an existing allocation record mapping * the requested space, or will configure and return a free allocation record. * * Will panic if a usable record cannot be found. * * @param sc Bridge driver state. * @param addr The I/O target address. * @param size The size of the I/O operation to be performed at @p addr. * @param[out] offset The offset within the returned resource at which * to perform the I/O request. * @param[out] stolen Set to true if the allocation record was stolen to fulfill * this request. If a stolen allocation record is returned, * bhndb_io_resource_restore() must be called upon completion of the bus I/O * request. * @param[out] restore If the allocation record was stolen, this will be set * to the target that must be restored. */ static inline struct bhndb_dw_alloc * bhndb_io_resource(struct bhndb_softc *sc, bus_addr_t addr, bus_size_t size, bus_size_t *offset, bool *stolen, bus_addr_t *restore) { struct bhndb_dw_alloc *dwa; bool borrowed; int error; BHNDB_LOCK_ASSERT(sc, MA_OWNED); dwa = bhndb_io_resource_get_window(sc, addr, size, &borrowed, stolen, restore); /* Adjust the window if the I/O request won't fit in the current * target range. */ if (addr < dwa->target || addr > dwa->target + dwa->win->win_size || (dwa->target + dwa->win->win_size) - addr < size) { /* Cannot modify target of borrowed windows */ if (borrowed) { panic("borrowed register window does not map expected " "range 0x%llx-0x%llx\n", (unsigned long long) addr, (unsigned long long) addr+size-1); } error = bhndb_dw_set_addr(sc->dev, sc->bus_res, dwa, addr, size); if (error) { panic("failed to set register window target mapping " "0x%llx-0x%llx\n", (unsigned long long) addr, (unsigned long long) addr+size-1); } } /* Calculate the offset and return */ *offset = (addr - dwa->target) + dwa->win->win_offset; return (dwa); } /* * BHND_BUS_(READ|WRITE_* implementations */ /* bhndb_bus_(read|write) common implementation */ #define BHNDB_IO_COMMON_SETUP(_io_size) \ struct bhndb_softc *sc; \ struct bhndb_dw_alloc *dwa; \ struct resource *io_res; \ bus_size_t io_offset; \ bus_addr_t restore; \ bool stolen; \ \ sc = device_get_softc(dev); \ \ BHNDB_LOCK(sc); \ dwa = bhndb_io_resource(sc, rman_get_start(r->res) + \ offset, _io_size, &io_offset, &stolen, &restore); \ io_res = dwa->parent_res; \ \ KASSERT(!r->direct, \ ("bhnd_bus slow path used for direct resource")); \ \ KASSERT(rman_get_flags(io_res) & RF_ACTIVE, \ ("i/o resource is not active")); #define BHNDB_IO_COMMON_TEARDOWN() \ if (stolen) { \ bhndb_dw_return_stolen(sc->dev, sc->bus_res, \ dwa, restore); \ } \ BHNDB_UNLOCK(sc); /* Defines a bhndb_bus_read_* method implementation */ #define BHNDB_IO_READ(_type, _name) \ static _type \ bhndb_bus_read_ ## _name (device_t dev, device_t child, \ struct bhnd_resource *r, bus_size_t offset) \ { \ _type v; \ BHNDB_IO_COMMON_SETUP(sizeof(_type)); \ v = bus_read_ ## _name (io_res, io_offset); \ BHNDB_IO_COMMON_TEARDOWN(); \ \ return (v); \ } /* Defines a bhndb_bus_write_* method implementation */ #define BHNDB_IO_WRITE(_type, _name) \ static void \ bhndb_bus_write_ ## _name (device_t dev, device_t child, \ struct bhnd_resource *r, bus_size_t offset, _type value) \ { \ BHNDB_IO_COMMON_SETUP(sizeof(_type)); \ bus_write_ ## _name (io_res, io_offset, value); \ BHNDB_IO_COMMON_TEARDOWN(); \ } /* Defines a bhndb_bus_(read|write|set)_(multi|region)_* method */ #define BHNDB_IO_MISC(_type, _ptr, _op, _size) \ static void \ bhndb_bus_ ## _op ## _ ## _size (device_t dev, \ device_t child, struct bhnd_resource *r, bus_size_t offset, \ _type _ptr datap, bus_size_t count) \ { \ BHNDB_IO_COMMON_SETUP(sizeof(_type) * count); \ bus_ ## _op ## _ ## _size (io_res, io_offset, \ datap, count); \ BHNDB_IO_COMMON_TEARDOWN(); \ } /* Defines a complete set of read/write methods */ #define BHNDB_IO_METHODS(_type, _size) \ BHNDB_IO_READ(_type, _size) \ BHNDB_IO_WRITE(_type, _size) \ \ BHNDB_IO_READ(_type, stream_ ## _size) \ BHNDB_IO_WRITE(_type, stream_ ## _size) \ \ BHNDB_IO_MISC(_type, *, read_multi, _size) \ BHNDB_IO_MISC(_type, *, write_multi, _size) \ \ BHNDB_IO_MISC(_type, *, read_multi_stream, _size) \ BHNDB_IO_MISC(_type, *, write_multi_stream, _size) \ \ BHNDB_IO_MISC(_type, , set_multi, _size) \ BHNDB_IO_MISC(_type, , set_region, _size) \ BHNDB_IO_MISC(_type, *, read_region, _size) \ BHNDB_IO_MISC(_type, *, write_region, _size) \ \ BHNDB_IO_MISC(_type, *, read_region_stream, _size) \ BHNDB_IO_MISC(_type, *, write_region_stream, _size) BHNDB_IO_METHODS(uint8_t, 1); BHNDB_IO_METHODS(uint16_t, 2); BHNDB_IO_METHODS(uint32_t, 4); /** * Default bhndb(4) implementation of BHND_BUS_BARRIER(). */ static void bhndb_bus_barrier(device_t dev, device_t child, struct bhnd_resource *r, bus_size_t offset, bus_size_t length, int flags) { BHNDB_IO_COMMON_SETUP(length); bus_barrier(io_res, io_offset + offset, length, flags); BHNDB_IO_COMMON_TEARDOWN(); } /** * Default bhndb(4) implementation of BHND_MAP_INTR(). */ static int bhndb_bhnd_map_intr(device_t dev, device_t child, u_int intr, rman_res_t *irq) { u_int ivec; int error; /* Is the intr valid? */ if (intr >= bhnd_get_intr_count(child)) return (EINVAL); /* Fetch the interrupt vector */ if ((error = bhnd_get_intr_ivec(child, intr, &ivec))) return (error); /* Map directly to the actual backplane interrupt vector */ *irq = ivec; return (0); } /** * Default bhndb(4) implementation of BHND_UNMAP_INTR(). */ static void bhndb_bhnd_unmap_intr(device_t dev, device_t child, rman_res_t irq) { /* No state to clean up */ } /** * Default bhndb(4) implementation of BUS_SETUP_INTR(). */ static int bhndb_setup_intr(device_t dev, device_t child, struct resource *r, int flags, driver_filter_t filter, driver_intr_t handler, void *arg, void **cookiep) { struct bhndb_softc *sc; struct bhndb_intr_isrc *isrc; struct bhndb_intr_handler *ih; int error; sc = device_get_softc(dev); /* Fetch the isrc */ if ((error = BHNDB_MAP_INTR_ISRC(dev, r, &isrc))) { device_printf(dev, "failed to fetch isrc: %d\n", error); return (error); } /* Allocate new ihandler entry */ ih = bhndb_alloc_intr_handler(child, r, isrc); if (ih == NULL) return (ENOMEM); /* Perform actual interrupt setup via the host isrc */ error = bus_setup_intr(isrc->is_owner, isrc->is_res, flags, filter, handler, arg, &ih->ih_cookiep); if (error) { bhndb_free_intr_handler(ih); return (error); } /* Add to our interrupt handler list */ BHNDB_LOCK(sc); bhndb_register_intr_handler(sc->bus_res, ih); BHNDB_UNLOCK(sc); /* Provide the interrupt handler entry as our cookiep value */ *cookiep = ih; return (0); } /** * Default bhndb(4) implementation of BUS_TEARDOWN_INTR(). */ static int bhndb_teardown_intr(device_t dev, device_t child, struct resource *r, void *cookiep) { struct bhndb_softc *sc; struct bhndb_intr_handler *ih; struct bhndb_intr_isrc *isrc; int error; sc = device_get_softc(dev); /* Locate and claim ownership of the interrupt handler entry */ BHNDB_LOCK(sc); ih = bhndb_find_intr_handler(sc->bus_res, cookiep); if (ih == NULL) { panic("%s requested teardown of invalid cookiep %p", device_get_nameunit(child), cookiep); } bhndb_deregister_intr_handler(sc->bus_res, ih); BHNDB_UNLOCK(sc); /* Perform actual interrupt teardown via the host isrc */ isrc = ih->ih_isrc; error = bus_teardown_intr(isrc->is_owner, isrc->is_res, ih->ih_cookiep); if (error) { /* If teardown fails, we need to reinsert the handler entry * to allow later teardown */ BHNDB_LOCK(sc); bhndb_register_intr_handler(sc->bus_res, ih); BHNDB_UNLOCK(sc); return (error); } /* Free the entry */ bhndb_free_intr_handler(ih); return (0); } /** * Default bhndb(4) implementation of BUS_BIND_INTR(). */ static int bhndb_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { struct bhndb_softc *sc; struct bhndb_intr_handler *ih; struct bhndb_intr_isrc *isrc; sc = device_get_softc(dev); isrc = NULL; /* Fetch the isrc corresponding to the child IRQ resource */ BHNDB_LOCK(sc); STAILQ_FOREACH(ih, &sc->bus_res->bus_intrs, ih_link) { if (ih->ih_res == irq) { isrc = ih->ih_isrc; break; } } BHNDB_UNLOCK(sc); if (isrc == NULL) { panic("%s requested bind of invalid irq %#jx-%#jx", device_get_nameunit(child), rman_get_start(irq), rman_get_end(irq)); } /* Perform actual bind via the host isrc */ return (bus_bind_intr(isrc->is_owner, isrc->is_res, cpu)); } /** * Default bhndb(4) implementation of BUS_DESCRIBE_INTR(). */ static int bhndb_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { struct bhndb_softc *sc; struct bhndb_intr_handler *ih; struct bhndb_intr_isrc *isrc; sc = device_get_softc(dev); /* Locate the interrupt handler entry; the caller owns the handler * reference, and thus our entry is guaranteed to remain valid after * we drop out lock below. */ BHNDB_LOCK(sc); ih = bhndb_find_intr_handler(sc->bus_res, cookie); if (ih == NULL) { panic("%s requested invalid cookiep %p", device_get_nameunit(child), cookie); } isrc = ih->ih_isrc; BHNDB_UNLOCK(sc); /* Perform the actual request via the host isrc */ return (BUS_DESCRIBE_INTR(device_get_parent(isrc->is_owner), isrc->is_owner, isrc->is_res, ih->ih_cookiep, descr)); } /** * Default bhndb(4) implementation of BUS_CONFIG_INTR(). */ static int bhndb_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { /* Unsupported */ return (ENXIO); } /** * Default bhndb(4) implementation of BUS_REMAP_INTR(). */ static int bhndb_remap_intr(device_t dev, device_t child, u_int irq) { /* Unsupported */ return (ENXIO); } /** * Default bhndb(4) implementation of BHND_BUS_GET_DMA_TRANSLATION(). */ static inline int bhndb_get_dma_translation(device_t dev, device_t child, u_int width, uint32_t flags, bus_dma_tag_t *dmat, struct bhnd_dma_translation *translation) { struct bhndb_softc *sc; const struct bhndb_hwcfg *hwcfg; const struct bhnd_dma_translation *match; bus_dma_tag_t match_dmat; bhnd_addr_t addr_mask, match_addr_mask; sc = device_get_softc(dev); hwcfg = sc->bus_res->cfg; /* Is DMA supported? */ if (sc->bus_res->res->dma_tags == NULL) return (ENODEV); /* Is the requested width supported? */ if (width > BHND_DMA_ADDR_32BIT) { /* Backplane must support 64-bit addressing */ if (!(sc->chipid.chip_caps & BHND_CAP_BP64)) width = BHND_DMA_ADDR_32BIT; } /* Find the best matching descriptor for the requested width */ addr_mask = BHND_DMA_ADDR_BITMASK(width); match = NULL; match_addr_mask = 0x0; match_dmat = NULL; for (size_t i = 0; i < sc->bus_res->res->num_dma_tags; i++) { const struct bhnd_dma_translation *dwin; bhnd_addr_t masked; dwin = &hwcfg->dma_translations[i]; /* The base address must be device addressable */ if ((dwin->base_addr & addr_mask) != dwin->base_addr) continue; /* The flags must match */ if ((dwin->flags & flags) != flags) continue; /* The window must cover at least part of our addressable * range */ masked = (dwin->addr_mask | dwin->addrext_mask) & addr_mask; if (masked == 0) continue; /* Is this a better match? */ if (match == NULL || masked > match_addr_mask) { match = dwin; match_addr_mask = masked; match_dmat = sc->bus_res->res->dma_tags[i]; } } if (match == NULL || match_addr_mask == 0) return (ENOENT); if (dmat != NULL) *dmat = match_dmat; if (translation != NULL) *translation = *match; return (0); } /** * Default bhndb(4) implementation of BUS_GET_DMA_TAG(). */ static bus_dma_tag_t bhndb_get_dma_tag(device_t dev, device_t child) { struct bhndb_softc *sc = device_get_softc(dev); /* * A bridge may have multiple DMA translation descriptors, each with * their own incompatible restrictions; drivers should in general call * BHND_BUS_GET_DMA_TRANSLATION() to fetch both the best available DMA * translation, and its corresponding DMA tag. * * Child drivers that do not use BHND_BUS_GET_DMA_TRANSLATION() are * responsible for creating their own restricted DMA tag; since we * cannot do this for them in BUS_GET_DMA_TAG(), we simply return the * bridge parent's DMA tag directly; */ return (bus_get_dma_tag(sc->parent_dev)); } static device_method_t bhndb_methods[] = { /* Device interface */ \ DEVMETHOD(device_probe, bhndb_generic_probe), DEVMETHOD(device_detach, bhndb_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bhndb_generic_suspend), DEVMETHOD(device_resume, bhndb_generic_resume), /* Bus interface */ DEVMETHOD(bus_probe_nomatch, bhndb_probe_nomatch), DEVMETHOD(bus_print_child, bhndb_print_child), DEVMETHOD(bus_child_location, bhndb_child_location), DEVMETHOD(bus_add_child, bhndb_add_child), DEVMETHOD(bus_child_deleted, bhndb_child_deleted), DEVMETHOD(bus_alloc_resource, bhndb_alloc_resource), DEVMETHOD(bus_release_resource, bhndb_release_resource), DEVMETHOD(bus_activate_resource, bhndb_activate_resource), DEVMETHOD(bus_deactivate_resource, bhndb_deactivate_resource), DEVMETHOD(bus_setup_intr, bhndb_setup_intr), DEVMETHOD(bus_teardown_intr, bhndb_teardown_intr), DEVMETHOD(bus_config_intr, bhndb_config_intr), DEVMETHOD(bus_bind_intr, bhndb_bind_intr), DEVMETHOD(bus_describe_intr, bhndb_describe_intr), DEVMETHOD(bus_remap_intr, bhndb_remap_intr), DEVMETHOD(bus_get_dma_tag, bhndb_get_dma_tag), DEVMETHOD(bus_adjust_resource, bhndb_adjust_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_get_resource_list, bhndb_get_resource_list), DEVMETHOD(bus_read_ivar, bhndb_read_ivar), DEVMETHOD(bus_write_ivar, bhndb_write_ivar), /* BHNDB interface */ DEVMETHOD(bhndb_get_chipid, bhndb_get_chipid), DEVMETHOD(bhndb_is_core_disabled, bhndb_is_core_disabled), DEVMETHOD(bhndb_get_hostb_core, bhndb_get_hostb_core), DEVMETHOD(bhndb_suspend_resource, bhndb_suspend_resource), DEVMETHOD(bhndb_resume_resource, bhndb_resume_resource), /* BHND interface */ DEVMETHOD(bhnd_bus_get_chipid, bhndb_get_chipid), DEVMETHOD(bhnd_bus_activate_resource, bhndb_activate_bhnd_resource), DEVMETHOD(bhnd_bus_deactivate_resource, bhndb_deactivate_bhnd_resource), DEVMETHOD(bhnd_bus_get_nvram_var, bhnd_bus_generic_get_nvram_var), DEVMETHOD(bhnd_bus_map_intr, bhndb_bhnd_map_intr), DEVMETHOD(bhnd_bus_unmap_intr, bhndb_bhnd_unmap_intr), DEVMETHOD(bhnd_bus_get_dma_translation, bhndb_get_dma_translation), DEVMETHOD(bhnd_bus_get_service_registry,bhndb_get_service_registry), DEVMETHOD(bhnd_bus_register_provider, bhnd_bus_generic_sr_register_provider), DEVMETHOD(bhnd_bus_deregister_provider, bhnd_bus_generic_sr_deregister_provider), DEVMETHOD(bhnd_bus_retain_provider, bhnd_bus_generic_sr_retain_provider), DEVMETHOD(bhnd_bus_release_provider, bhnd_bus_generic_sr_release_provider), DEVMETHOD(bhnd_bus_read_1, bhndb_bus_read_1), DEVMETHOD(bhnd_bus_read_2, bhndb_bus_read_2), DEVMETHOD(bhnd_bus_read_4, bhndb_bus_read_4), DEVMETHOD(bhnd_bus_write_1, bhndb_bus_write_1), DEVMETHOD(bhnd_bus_write_2, bhndb_bus_write_2), DEVMETHOD(bhnd_bus_write_4, bhndb_bus_write_4), DEVMETHOD(bhnd_bus_read_stream_1, bhndb_bus_read_stream_1), DEVMETHOD(bhnd_bus_read_stream_2, bhndb_bus_read_stream_2), DEVMETHOD(bhnd_bus_read_stream_4, bhndb_bus_read_stream_4), DEVMETHOD(bhnd_bus_write_stream_1, bhndb_bus_write_stream_1), DEVMETHOD(bhnd_bus_write_stream_2, bhndb_bus_write_stream_2), DEVMETHOD(bhnd_bus_write_stream_4, bhndb_bus_write_stream_4), DEVMETHOD(bhnd_bus_read_multi_1, bhndb_bus_read_multi_1), DEVMETHOD(bhnd_bus_read_multi_2, bhndb_bus_read_multi_2), DEVMETHOD(bhnd_bus_read_multi_4, bhndb_bus_read_multi_4), DEVMETHOD(bhnd_bus_write_multi_1, bhndb_bus_write_multi_1), DEVMETHOD(bhnd_bus_write_multi_2, bhndb_bus_write_multi_2), DEVMETHOD(bhnd_bus_write_multi_4, bhndb_bus_write_multi_4), DEVMETHOD(bhnd_bus_read_multi_stream_1, bhndb_bus_read_multi_stream_1), DEVMETHOD(bhnd_bus_read_multi_stream_2, bhndb_bus_read_multi_stream_2), DEVMETHOD(bhnd_bus_read_multi_stream_4, bhndb_bus_read_multi_stream_4), DEVMETHOD(bhnd_bus_write_multi_stream_1,bhndb_bus_write_multi_stream_1), DEVMETHOD(bhnd_bus_write_multi_stream_2,bhndb_bus_write_multi_stream_2), DEVMETHOD(bhnd_bus_write_multi_stream_4,bhndb_bus_write_multi_stream_4), DEVMETHOD(bhnd_bus_set_multi_1, bhndb_bus_set_multi_1), DEVMETHOD(bhnd_bus_set_multi_2, bhndb_bus_set_multi_2), DEVMETHOD(bhnd_bus_set_multi_4, bhndb_bus_set_multi_4), DEVMETHOD(bhnd_bus_set_region_1, bhndb_bus_set_region_1), DEVMETHOD(bhnd_bus_set_region_2, bhndb_bus_set_region_2), DEVMETHOD(bhnd_bus_set_region_4, bhndb_bus_set_region_4), DEVMETHOD(bhnd_bus_read_region_1, bhndb_bus_read_region_1), DEVMETHOD(bhnd_bus_read_region_2, bhndb_bus_read_region_2), DEVMETHOD(bhnd_bus_read_region_4, bhndb_bus_read_region_4), DEVMETHOD(bhnd_bus_write_region_1, bhndb_bus_write_region_1), DEVMETHOD(bhnd_bus_write_region_2, bhndb_bus_write_region_2), DEVMETHOD(bhnd_bus_write_region_4, bhndb_bus_write_region_4), DEVMETHOD(bhnd_bus_read_region_stream_1,bhndb_bus_read_region_stream_1), DEVMETHOD(bhnd_bus_read_region_stream_2,bhndb_bus_read_region_stream_2), DEVMETHOD(bhnd_bus_read_region_stream_4,bhndb_bus_read_region_stream_4), DEVMETHOD(bhnd_bus_write_region_stream_1,bhndb_bus_write_region_stream_1), DEVMETHOD(bhnd_bus_write_region_stream_2,bhndb_bus_write_region_stream_2), DEVMETHOD(bhnd_bus_write_region_stream_4,bhndb_bus_write_region_stream_4), DEVMETHOD(bhnd_bus_barrier, bhndb_bus_barrier), DEVMETHOD_END }; DEFINE_CLASS_0(bhndb, bhndb_driver, bhndb_methods, sizeof(struct bhndb_softc)); MODULE_VERSION(bhndb, 1); MODULE_DEPEND(bhndb, bhnd, 1, 1, 1); diff --git a/sys/dev/bhnd/cores/chipc/chipc.c b/sys/dev/bhnd/cores/chipc/chipc.c index 2d1440e5c987..bdba61a2b942 100644 --- a/sys/dev/bhnd/cores/chipc/chipc.c +++ b/sys/dev/bhnd/cores/chipc/chipc.c @@ -1,1398 +1,1392 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2016 Michael Zhilin * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include /* * Broadcom ChipCommon driver. * * With the exception of some very early chipsets, the ChipCommon core * has been included in all HND SoCs and chipsets based on the siba(4) * and bcma(4) interconnects, providing a common interface to chipset * identification, bus enumeration, UARTs, clocks, watchdog interrupts, * GPIO, flash, etc. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "chipcreg.h" #include "chipcvar.h" #include "chipc_private.h" static struct bhnd_device_quirk chipc_quirks[]; /* Supported device identifiers */ static const struct bhnd_device chipc_devices[] = { BHND_DEVICE(BCM, CC, NULL, chipc_quirks), BHND_DEVICE(BCM, 4706_CC, NULL, chipc_quirks), BHND_DEVICE_END }; /* Device quirks table */ static struct bhnd_device_quirk chipc_quirks[] = { /* HND OTP controller revisions */ BHND_CORE_QUIRK (HWREV_EQ (12), CHIPC_QUIRK_OTP_HND), /* (?) */ BHND_CORE_QUIRK (HWREV_EQ (17), CHIPC_QUIRK_OTP_HND), /* BCM4311 */ BHND_CORE_QUIRK (HWREV_EQ (22), CHIPC_QUIRK_OTP_HND), /* BCM4312 */ /* IPX OTP controller revisions */ BHND_CORE_QUIRK (HWREV_EQ (21), CHIPC_QUIRK_OTP_IPX), BHND_CORE_QUIRK (HWREV_GTE(23), CHIPC_QUIRK_OTP_IPX), BHND_CORE_QUIRK (HWREV_GTE(32), CHIPC_QUIRK_SUPPORTS_SPROM), BHND_CORE_QUIRK (HWREV_GTE(35), CHIPC_QUIRK_SUPPORTS_CAP_EXT), BHND_CORE_QUIRK (HWREV_GTE(49), CHIPC_QUIRK_IPX_OTPL_SIZE), /* 4706 variant quirks */ BHND_CORE_QUIRK (HWREV_EQ (38), CHIPC_QUIRK_4706_NFLASH), /* BCM5357? */ BHND_CHIP_QUIRK (4706, HWREV_ANY, CHIPC_QUIRK_4706_NFLASH), /* 4331 quirks*/ BHND_CHIP_QUIRK (4331, HWREV_ANY, CHIPC_QUIRK_4331_EXTPA_MUX_SPROM), BHND_PKG_QUIRK (4331, TN, CHIPC_QUIRK_4331_GPIO2_5_MUX_SPROM), BHND_PKG_QUIRK (4331, TNA0, CHIPC_QUIRK_4331_GPIO2_5_MUX_SPROM), BHND_PKG_QUIRK (4331, TT, CHIPC_QUIRK_4331_EXTPA2_MUX_SPROM), /* 4360 quirks */ BHND_CHIP_QUIRK (4352, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_CHIP_QUIRK (43460, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_CHIP_QUIRK (43462, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_CHIP_QUIRK (43602, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_DEVICE_QUIRK_END }; static int chipc_add_children(struct chipc_softc *sc); static bhnd_nvram_src chipc_find_nvram_src(struct chipc_softc *sc, struct chipc_caps *caps); static int chipc_read_caps(struct chipc_softc *sc, struct chipc_caps *caps); static bool chipc_should_enable_muxed_sprom( struct chipc_softc *sc); static int chipc_enable_otp_power(struct chipc_softc *sc); static void chipc_disable_otp_power(struct chipc_softc *sc); static int chipc_enable_sprom_pins(struct chipc_softc *sc); static void chipc_disable_sprom_pins(struct chipc_softc *sc); static int chipc_try_activate_resource(device_t dev, - device_t child, int type, int rid, - struct resource *r, bool req_direct); + device_t child, struct resource *r, + bool req_direct); static int chipc_init_rman(struct chipc_softc *sc); static void chipc_free_rman(struct chipc_softc *sc); static struct rman *chipc_get_rman(device_t dev, int type, u_int flags); /* quirk and capability flag convenience macros */ #define CHIPC_QUIRK(_sc, _name) \ ((_sc)->quirks & CHIPC_QUIRK_ ## _name) #define CHIPC_CAP(_sc, _name) \ ((_sc)->caps._name) #define CHIPC_ASSERT_QUIRK(_sc, name) \ KASSERT(CHIPC_QUIRK((_sc), name), ("quirk " __STRING(_name) " not set")) #define CHIPC_ASSERT_CAP(_sc, name) \ KASSERT(CHIPC_CAP((_sc), name), ("capability " __STRING(_name) " not set")) static int chipc_probe(device_t dev) { const struct bhnd_device *id; id = bhnd_device_lookup(dev, chipc_devices, sizeof(chipc_devices[0])); if (id == NULL) return (ENXIO); bhnd_set_default_core_desc(dev); return (BUS_PROBE_DEFAULT); } static int chipc_attach(device_t dev) { struct chipc_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; sc->quirks = bhnd_device_quirks(dev, chipc_devices, sizeof(chipc_devices[0])); sc->sprom_refcnt = 0; CHIPC_LOCK_INIT(sc); STAILQ_INIT(&sc->mem_regions); /* Set up resource management */ if ((error = chipc_init_rman(sc))) { device_printf(sc->dev, "failed to initialize chipc resource state: %d\n", error); goto failed; } /* Allocate the region containing the chipc register block */ if ((sc->core_region = chipc_find_region_by_rid(sc, 0)) == NULL) { error = ENXIO; goto failed; } error = chipc_retain_region(sc, sc->core_region, RF_ALLOCATED|RF_ACTIVE); if (error) { sc->core_region = NULL; goto failed; } /* Save a direct reference to our chipc registers */ sc->core = sc->core_region->cr_res; /* Fetch and parse capability register(s) */ if ((error = chipc_read_caps(sc, &sc->caps))) goto failed; if (bootverbose) chipc_print_caps(sc->dev, &sc->caps); /* Attach all supported child devices */ if ((error = chipc_add_children(sc))) goto failed; /* * Register ourselves with the bus; we're fully initialized and can * response to ChipCommin API requests. * * Since our children may need access to ChipCommon, this must be done * before attaching our children below (via bus_generic_attach). */ if ((error = bhnd_register_provider(dev, BHND_SERVICE_CHIPC))) goto failed; if ((error = bus_generic_attach(dev))) goto failed; return (0); failed: device_delete_children(sc->dev); if (sc->core_region != NULL) { chipc_release_region(sc, sc->core_region, RF_ALLOCATED|RF_ACTIVE); } chipc_free_rman(sc); CHIPC_LOCK_DESTROY(sc); return (error); } static int chipc_detach(device_t dev) { struct chipc_softc *sc; int error; sc = device_get_softc(dev); if ((error = bus_generic_detach(dev))) return (error); if ((error = device_delete_children(dev))) return (error); if ((error = bhnd_deregister_provider(dev, BHND_SERVICE_ANY))) return (error); chipc_release_region(sc, sc->core_region, RF_ALLOCATED|RF_ACTIVE); chipc_free_rman(sc); CHIPC_LOCK_DESTROY(sc); return (0); } static int chipc_add_children(struct chipc_softc *sc) { device_t child; const char *flash_bus; int error; /* SPROM/OTP */ if (sc->caps.nvram_src == BHND_NVRAM_SRC_SPROM || sc->caps.nvram_src == BHND_NVRAM_SRC_OTP) { child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_nvram", -1); if (child == NULL) { device_printf(sc->dev, "failed to add nvram device\n"); return (ENXIO); } /* Both OTP and external SPROM are mapped at CHIPC_SPROM_OTP */ error = chipc_set_mem_resource(sc, child, 0, CHIPC_SPROM_OTP, CHIPC_SPROM_OTP_SIZE, 0, 0); if (error) { device_printf(sc->dev, "failed to set OTP memory " "resource: %d\n", error); return (error); } } /* * PMU/PWR_CTRL * * On AOB ("Always on Bus") devices, the PMU core (if it exists) is * attached directly to the bhnd(4) bus -- not chipc. */ if (sc->caps.pmu && !sc->caps.aob) { child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_pmu", -1); if (child == NULL) { device_printf(sc->dev, "failed to add pmu\n"); return (ENXIO); } } else if (sc->caps.pwr_ctrl) { child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_pwrctl", -1); if (child == NULL) { device_printf(sc->dev, "failed to add pwrctl\n"); return (ENXIO); } } /* GPIO */ child = BUS_ADD_CHILD(sc->dev, 0, "gpio", -1); if (child == NULL) { device_printf(sc->dev, "failed to add gpio\n"); return (ENXIO); } error = chipc_set_mem_resource(sc, child, 0, 0, RM_MAX_END, 0, 0); if (error) { device_printf(sc->dev, "failed to set gpio memory resource: " "%d\n", error); return (error); } /* All remaining devices are SoC-only */ if (bhnd_get_attach_type(sc->dev) != BHND_ATTACH_NATIVE) return (0); /* UARTs */ for (u_int i = 0; i < min(sc->caps.num_uarts, CHIPC_UART_MAX); i++) { int irq_rid, mem_rid; irq_rid = 0; mem_rid = 0; child = BUS_ADD_CHILD(sc->dev, 0, "uart", -1); if (child == NULL) { device_printf(sc->dev, "failed to add uart%u\n", i); return (ENXIO); } /* Shared IRQ */ error = chipc_set_irq_resource(sc, child, irq_rid, 0); if (error) { device_printf(sc->dev, "failed to set uart%u irq %u\n", i, 0); return (error); } /* UART registers are mapped sequentially */ error = chipc_set_mem_resource(sc, child, mem_rid, CHIPC_UART(i), CHIPC_UART_SIZE, 0, 0); if (error) { device_printf(sc->dev, "failed to set uart%u memory " "resource: %d\n", i, error); return (error); } } /* Flash */ flash_bus = chipc_flash_bus_name(sc->caps.flash_type); if (flash_bus != NULL) { int rid; child = BUS_ADD_CHILD(sc->dev, 0, flash_bus, -1); if (child == NULL) { device_printf(sc->dev, "failed to add %s device\n", flash_bus); return (ENXIO); } /* flash memory mapping */ rid = 0; error = chipc_set_mem_resource(sc, child, rid, 0, RM_MAX_END, 1, 1); if (error) { device_printf(sc->dev, "failed to set flash memory " "resource %d: %d\n", rid, error); return (error); } /* flashctrl registers */ rid++; error = chipc_set_mem_resource(sc, child, rid, CHIPC_SFLASH_BASE, CHIPC_SFLASH_SIZE, 0, 0); if (error) { device_printf(sc->dev, "failed to set flash memory " "resource %d: %d\n", rid, error); return (error); } } return (0); } /** * Determine the NVRAM data source for this device. * * The SPROM, OTP, and flash capability flags must be fully populated in * @p caps. * * @param sc chipc driver state. * @param caps capability flags to be used to derive NVRAM configuration. */ static bhnd_nvram_src chipc_find_nvram_src(struct chipc_softc *sc, struct chipc_caps *caps) { uint32_t otp_st, srom_ctrl; /* * We check for hardware presence in order of precedence. For example, * SPROM is always used in preference to internal OTP if found. */ if (CHIPC_QUIRK(sc, SUPPORTS_SPROM) && caps->sprom) { srom_ctrl = bhnd_bus_read_4(sc->core, CHIPC_SPROM_CTRL); if (srom_ctrl & CHIPC_SRC_PRESENT) return (BHND_NVRAM_SRC_SPROM); } /* Check for programmed OTP H/W subregion (contains SROM data) */ if (CHIPC_QUIRK(sc, SUPPORTS_OTP) && caps->otp_size > 0) { /* TODO: need access to HND-OTP device */ if (!CHIPC_QUIRK(sc, OTP_HND)) { device_printf(sc->dev, "NVRAM unavailable: unsupported OTP controller.\n"); return (BHND_NVRAM_SRC_UNKNOWN); } otp_st = bhnd_bus_read_4(sc->core, CHIPC_OTPST); if (otp_st & CHIPC_OTPS_GUP_HW) return (BHND_NVRAM_SRC_OTP); } /* Check for flash */ if (caps->flash_type != CHIPC_FLASH_NONE) return (BHND_NVRAM_SRC_FLASH); /* No NVRAM hardware capability declared */ return (BHND_NVRAM_SRC_UNKNOWN); } /* Read and parse chipc capabilities */ static int chipc_read_caps(struct chipc_softc *sc, struct chipc_caps *caps) { uint32_t cap_reg; uint32_t cap_ext_reg; uint32_t regval; /* Fetch cap registers */ cap_reg = bhnd_bus_read_4(sc->core, CHIPC_CAPABILITIES); cap_ext_reg = 0; if (CHIPC_QUIRK(sc, SUPPORTS_CAP_EXT)) cap_ext_reg = bhnd_bus_read_4(sc->core, CHIPC_CAPABILITIES_EXT); /* Extract values */ caps->num_uarts = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_NUM_UART); caps->mipseb = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_MIPSEB); caps->uart_gpio = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_UARTGPIO); caps->uart_clock = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_UCLKSEL); caps->extbus_type = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_EXTBUS); caps->pwr_ctrl = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_PWR_CTL); caps->jtag_master = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_JTAGP); caps->pll_type = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_PLL); caps->backplane_64 = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_BKPLN64); caps->boot_rom = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_ROM); caps->pmu = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_PMU); caps->eci = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_ECI); caps->sprom = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_SPROM); caps->otp_size = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_OTP_SIZE); caps->seci = CHIPC_GET_FLAG(cap_ext_reg, CHIPC_CAP2_SECI); caps->gsio = CHIPC_GET_FLAG(cap_ext_reg, CHIPC_CAP2_GSIO); caps->aob = CHIPC_GET_FLAG(cap_ext_reg, CHIPC_CAP2_AOB); /* Fetch OTP size for later IPX controller revisions */ if (CHIPC_QUIRK(sc, IPX_OTPL_SIZE)) { regval = bhnd_bus_read_4(sc->core, CHIPC_OTPLAYOUT); caps->otp_size = CHIPC_GET_BITS(regval, CHIPC_OTPL_SIZE); } /* Determine flash type and parameters */ caps->cfi_width = 0; switch (CHIPC_GET_BITS(cap_reg, CHIPC_CAP_FLASH)) { case CHIPC_CAP_SFLASH_ST: caps->flash_type = CHIPC_SFLASH_ST; break; case CHIPC_CAP_SFLASH_AT: caps->flash_type = CHIPC_SFLASH_AT; break; case CHIPC_CAP_NFLASH: /* unimplemented */ caps->flash_type = CHIPC_NFLASH; break; case CHIPC_CAP_PFLASH: caps->flash_type = CHIPC_PFLASH_CFI; /* determine cfi width */ regval = bhnd_bus_read_4(sc->core, CHIPC_FLASH_CFG); if (CHIPC_GET_FLAG(regval, CHIPC_FLASH_CFG_DS)) caps->cfi_width = 2; else caps->cfi_width = 1; break; case CHIPC_CAP_FLASH_NONE: caps->flash_type = CHIPC_FLASH_NONE; break; } /* Handle 4706_NFLASH fallback */ if (CHIPC_QUIRK(sc, 4706_NFLASH) && CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_4706_NFLASH)) { caps->flash_type = CHIPC_NFLASH_4706; } /* Determine NVRAM source. Must occur after the SPROM/OTP/flash * capability flags have been populated. */ caps->nvram_src = chipc_find_nvram_src(sc, caps); /* Determine the SPROM offset within OTP (if any). SPROM-formatted * data is placed within the OTP general use region. */ caps->sprom_offset = 0; if (caps->nvram_src == BHND_NVRAM_SRC_OTP) { CHIPC_ASSERT_QUIRK(sc, OTP_IPX); /* Bit offset to GUP HW subregion containing SPROM data */ regval = bhnd_bus_read_4(sc->core, CHIPC_OTPLAYOUT); caps->sprom_offset = CHIPC_GET_BITS(regval, CHIPC_OTPL_GUP); /* Convert to bytes */ caps->sprom_offset /= 8; } return (0); } static int chipc_suspend(device_t dev) { return (bus_generic_suspend(dev)); } static int chipc_resume(device_t dev) { return (bus_generic_resume(dev)); } static void chipc_probe_nomatch(device_t dev, device_t child) { struct resource_list *rl; const char *name; name = device_get_name(child); if (name == NULL) name = "unknown device"; device_printf(dev, "<%s> at", name); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) { resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); } printf(" (no driver attached)\n"); } static int chipc_print_child(device_t dev, device_t child) { struct resource_list *rl; int retval = 0; retval += bus_print_child_header(dev, child); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) { retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); } retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } static device_t chipc_add_child(device_t dev, u_int order, const char *name, int unit) { struct chipc_devinfo *dinfo; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (NULL); dinfo = malloc(sizeof(struct chipc_devinfo), M_BHND, M_NOWAIT); if (dinfo == NULL) { device_delete_child(dev, child); return (NULL); } resource_list_init(&dinfo->resources); dinfo->irq_mapped = false; device_set_ivars(child, dinfo); return (child); } static void chipc_child_deleted(device_t dev, device_t child) { struct chipc_devinfo *dinfo = device_get_ivars(child); if (dinfo != NULL) { /* Free the child's resource list */ resource_list_free(&dinfo->resources); /* Unmap the child's IRQ */ if (dinfo->irq_mapped) { bhnd_unmap_intr(dev, dinfo->irq); dinfo->irq_mapped = false; } free(dinfo, M_BHND); } device_set_ivars(child, NULL); } static struct resource_list * chipc_get_resource_list(device_t dev, device_t child) { struct chipc_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } /* Allocate region records for the given port, and add the port's memory * range to the mem_rman */ static int chipc_rman_init_regions (struct chipc_softc *sc, bhnd_port_type type, u_int port) { struct chipc_region *cr; rman_res_t start, end; u_int num_regions; int error; num_regions = bhnd_get_region_count(sc->dev, type, port); for (u_int region = 0; region < num_regions; region++) { /* Allocate new region record */ cr = chipc_alloc_region(sc, type, port, region); if (cr == NULL) return (ENODEV); /* Can't manage regions that cannot be allocated */ if (cr->cr_rid < 0) { BHND_DEBUG_DEV(sc->dev, "no rid for chipc region " "%s%u.%u", bhnd_port_type_name(type), port, region); chipc_free_region(sc, cr); continue; } /* Add to rman's managed range */ start = cr->cr_addr; end = cr->cr_end; if ((error = rman_manage_region(&sc->mem_rman, start, end))) { chipc_free_region(sc, cr); return (error); } /* Add to region list */ STAILQ_INSERT_TAIL(&sc->mem_regions, cr, cr_link); } return (0); } /* Initialize memory state for all chipc port regions */ static int chipc_init_rman(struct chipc_softc *sc) { u_int num_ports; int error; /* Port types for which we'll register chipc_region mappings */ bhnd_port_type types[] = { BHND_PORT_DEVICE }; /* Initialize resource manager */ sc->mem_rman.rm_start = 0; sc->mem_rman.rm_end = BUS_SPACE_MAXADDR; sc->mem_rman.rm_type = RMAN_ARRAY; sc->mem_rman.rm_descr = "ChipCommon Device Memory"; if ((error = rman_init(&sc->mem_rman))) { device_printf(sc->dev, "could not initialize mem_rman: %d\n", error); return (error); } /* Populate per-port-region state */ for (u_int i = 0; i < nitems(types); i++) { num_ports = bhnd_get_port_count(sc->dev, types[i]); for (u_int port = 0; port < num_ports; port++) { error = chipc_rman_init_regions(sc, types[i], port); if (error) { device_printf(sc->dev, "region init failed for %s%u: %d\n", bhnd_port_type_name(types[i]), port, error); goto failed; } } } return (0); failed: chipc_free_rman(sc); return (error); } /* Free memory management state */ static void chipc_free_rman(struct chipc_softc *sc) { struct chipc_region *cr, *cr_next; STAILQ_FOREACH_SAFE(cr, &sc->mem_regions, cr_link, cr_next) chipc_free_region(sc, cr); rman_fini(&sc->mem_rman); } /** * Return the rman instance for a given resource @p type, if any. * * @param sc The chipc device state. * @param type The resource type (e.g. SYS_RES_MEMORY, SYS_RES_IRQ, ...) * @param flags Resource flags (e.g. RF_PREFETCHABLE) */ static struct rman * chipc_get_rman(device_t dev, int type, u_int flags) { struct chipc_softc *sc = device_get_softc(dev); switch (type) { case SYS_RES_MEMORY: return (&sc->mem_rman); case SYS_RES_IRQ: /* We delegate IRQ resource management to the parent bus */ return (NULL); default: return (NULL); }; } static struct resource * chipc_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct chipc_softc *sc; struct chipc_region *cr; struct resource_list_entry *rle; struct resource *rv; struct rman *rm; int error; bool passthrough, isdefault; sc = device_get_softc(dev); passthrough = (device_get_parent(child) != dev); isdefault = RMAN_IS_DEFAULT_RANGE(start, end); rle = NULL; /* Fetch the resource manager, delegate request if necessary */ rm = chipc_get_rman(dev, type, flags); if (rm == NULL) { /* Requested resource type is delegated to our parent */ rv = bus_generic_rl_alloc_resource(dev, child, type, rid, start, end, count, flags); return (rv); } /* Populate defaults */ if (!passthrough && isdefault) { /* Fetch the resource list entry. */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, *rid); if (rle == NULL) { device_printf(dev, "default resource %#x type %d for child %s " "not found\n", *rid, type, device_get_nameunit(child)); return (NULL); } if (rle->res != NULL) { device_printf(dev, "resource entry %#x type %d for child %s is busy " "[%d]\n", *rid, type, device_get_nameunit(child), rman_get_flags(rle->res)); return (NULL); } start = rle->start; end = rle->end; count = ulmax(count, rle->count); } /* Locate a mapping region */ if ((cr = chipc_find_region(sc, start, end)) == NULL) { /* Resource requests outside our shared port regions can be * delegated to our parent. */ rv = bus_generic_rl_alloc_resource(dev, child, type, rid, start, end, count, flags); return (rv); } /* * As a special case, children that map the complete ChipCommon register * block are delegated to our parent. * * The rman API does not support sharing resources that are not * identical in size; since we allocate subregions to various children, * any children that need to map the entire register block (e.g. because * they require access to discontiguous register ranges) must make the * allocation through our parent, where we hold a compatible * RF_SHAREABLE allocation. */ if (cr == sc->core_region && cr->cr_addr == start && cr->cr_end == end && cr->cr_count == count) { rv = bus_generic_rl_alloc_resource(dev, child, type, rid, start, end, count, flags); return (rv); } /* Try to retain a region reference */ if ((error = chipc_retain_region(sc, cr, RF_ALLOCATED))) return (NULL); /* Make our rman reservation */ rv = bus_generic_rman_alloc_resource(dev, child, type, rid, start, end, count, flags); if (rv == NULL) { chipc_release_region(sc, cr, RF_ALLOCATED); return (NULL); } /* Update child's resource list entry */ if (rle != NULL) { rle->res = rv; rle->start = rman_get_start(rv); rle->end = rman_get_end(rv); rle->count = rman_get_size(rv); } return (rv); } static int chipc_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct chipc_softc *sc; struct chipc_region *cr; struct rman *rm; struct resource_list_entry *rle; int error; sc = device_get_softc(dev); /* Handled by parent bus? */ rm = chipc_get_rman(dev, type, rman_get_flags(r)); if (rm == NULL || !rman_is_region_manager(r, rm)) { return (bus_generic_rl_release_resource(dev, child, type, rid, r)); } /* Locate the mapping region */ cr = chipc_find_region(sc, rman_get_start(r), rman_get_end(r)); if (cr == NULL) return (EINVAL); /* Deactivate resources */ error = bus_generic_rman_release_resource(dev, child, type, rid, r); if (error != 0) return (error); /* Drop allocation reference */ chipc_release_region(sc, cr, RF_ALLOCATED); /* Clear reference from the resource list entry if exists */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, rid); if (rle != NULL) rle->res = NULL; return (0); } static int chipc_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct chipc_softc *sc; struct chipc_region *cr; struct rman *rm; sc = device_get_softc(dev); /* Handled by parent bus? */ rm = chipc_get_rman(dev, rman_get_type(r), rman_get_flags(r)); if (rm == NULL || !rman_is_region_manager(r, rm)) { return (bus_generic_adjust_resource(dev, child, r, start, end)); } /* The range is limited to the existing region mapping */ cr = chipc_find_region(sc, rman_get_start(r), rman_get_end(r)); if (cr == NULL) return (EINVAL); if (end <= start) return (EINVAL); if (start < cr->cr_addr || end > cr->cr_end) return (EINVAL); /* Range falls within the existing region */ return (rman_adjust_resource(r, start, end)); } /** * Retain an RF_ACTIVE reference to the region mapping @p r, and * configure @p r with its subregion values. * * @param sc Driver instance state. * @param child Requesting child device. - * @param type resource type of @p r. - * @param rid resource id of @p r * @param r resource to be activated. * @param req_direct If true, failure to allocate a direct bhnd resource * will be treated as an error. If false, the resource will not be marked * as RF_ACTIVE if bhnd direct resource allocation fails. */ static int -chipc_try_activate_resource(device_t dev, device_t child, int type, - int rid, struct resource *r, bool req_direct) +chipc_try_activate_resource(device_t dev, device_t child, + struct resource *r, bool req_direct) { struct chipc_softc *sc = device_get_softc(dev); struct rman *rm; struct chipc_region *cr; bhnd_size_t cr_offset; rman_res_t r_start, r_end, r_size; int error; - rm = chipc_get_rman(dev, type, rman_get_flags(r)); + rm = chipc_get_rman(dev, rman_get_type(r), rman_get_flags(r)); if (rm == NULL || !rman_is_region_manager(r, rm)) return (EINVAL); r_start = rman_get_start(r); r_end = rman_get_end(r); r_size = rman_get_size(r); /* Find the corresponding chipc region */ cr = chipc_find_region(sc, r_start, r_end); if (cr == NULL) return (EINVAL); /* Calculate subregion offset within the chipc region */ cr_offset = r_start - cr->cr_addr; /* Retain (and activate, if necessary) the chipc region */ if ((error = chipc_retain_region(sc, cr, RF_ACTIVE))) return (error); /* Configure child resource with its subregion values. */ if (cr->cr_res->direct) { error = chipc_init_child_resource(r, cr->cr_res->res, cr_offset, r_size); if (error) goto cleanup; /* Mark active */ if ((error = rman_activate_resource(r))) goto cleanup; } else if (req_direct) { error = ENOMEM; goto cleanup; } return (0); cleanup: chipc_release_region(sc, cr, RF_ACTIVE); return (error); } static int chipc_activate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { struct rman *rm; int error; /* Delegate non-locally managed resources to parent */ rm = chipc_get_rman(dev, type, rman_get_flags(r->res)); if (rm == NULL || !rman_is_region_manager(r->res, rm)) { return (bhnd_bus_generic_activate_resource(dev, child, type, rid, r)); } /* Try activating the chipc region resource */ - error = chipc_try_activate_resource(dev, child, type, rid, r->res, - false); + error = chipc_try_activate_resource(dev, child, r->res, false); if (error) return (error); /* Mark the child resource as direct according to the returned resource * state */ if (rman_get_flags(r->res) & RF_ACTIVE) r->direct = true; return (0); } static int -chipc_activate_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +chipc_activate_resource(device_t dev, device_t child, struct resource *r) { struct rman *rm; /* Delegate non-locally managed resources to parent */ - rm = chipc_get_rman(dev, type, rman_get_flags(r)); + rm = chipc_get_rman(dev, rman_get_type(r), rman_get_flags(r)); if (rm == NULL || !rman_is_region_manager(r, rm)) { - return (bus_generic_activate_resource(dev, child, type, rid, - r)); + return (bus_generic_activate_resource(dev, child, r)); } /* Try activating the chipc region-based resource */ - return (chipc_try_activate_resource(dev, child, type, rid, r, true)); + return (chipc_try_activate_resource(dev, child, r, true)); } /** * Default bhndb(4) implementation of BUS_DEACTIVATE_RESOURCE(). */ static int -chipc_deactivate_resource(device_t dev, device_t child, int type, - int rid, struct resource *r) +chipc_deactivate_resource(device_t dev, device_t child, + struct resource *r) { struct chipc_softc *sc; struct chipc_region *cr; struct rman *rm; int error; sc = device_get_softc(dev); /* Handled by parent bus? */ - rm = chipc_get_rman(dev, type, rman_get_flags(r)); + rm = chipc_get_rman(dev, rman_get_type(r), rman_get_flags(r)); if (rm == NULL || !rman_is_region_manager(r, rm)) { - return (bus_generic_deactivate_resource(dev, child, type, rid, - r)); + return (bus_generic_deactivate_resource(dev, child, r)); } /* Find the corresponding chipc region */ cr = chipc_find_region(sc, rman_get_start(r), rman_get_end(r)); if (cr == NULL) return (EINVAL); /* Mark inactive */ if ((error = rman_deactivate_resource(r))) return (error); /* Drop associated RF_ACTIVE reference */ chipc_release_region(sc, cr, RF_ACTIVE); return (0); } /** * Examine bus state and make a best effort determination of whether it's * likely safe to enable the muxed SPROM pins. * * On devices that do not use SPROM pin muxing, always returns true. * * @param sc chipc driver state. */ static bool chipc_should_enable_muxed_sprom(struct chipc_softc *sc) { device_t *devs; device_t hostb; device_t parent; int devcount; int error; bool result; /* Nothing to do? */ if (!CHIPC_QUIRK(sc, MUX_SPROM)) return (true); bus_topo_lock(); parent = device_get_parent(sc->dev); hostb = bhnd_bus_find_hostb_device(parent); if ((error = device_get_children(parent, &devs, &devcount))) { bus_topo_unlock(); return (false); } /* Reject any active devices other than ChipCommon, or the * host bridge (if any). */ result = true; for (int i = 0; i < devcount; i++) { if (devs[i] == hostb || devs[i] == sc->dev) continue; if (!device_is_attached(devs[i])) continue; if (device_is_suspended(devs[i])) continue; /* Active device; assume SPROM is busy */ result = false; break; } free(devs, M_TEMP); bus_topo_unlock(); return (result); } static int chipc_enable_sprom(device_t dev) { struct chipc_softc *sc; int error; sc = device_get_softc(dev); CHIPC_LOCK(sc); /* Already enabled? */ if (sc->sprom_refcnt >= 1) { sc->sprom_refcnt++; CHIPC_UNLOCK(sc); return (0); } switch (sc->caps.nvram_src) { case BHND_NVRAM_SRC_SPROM: error = chipc_enable_sprom_pins(sc); break; case BHND_NVRAM_SRC_OTP: error = chipc_enable_otp_power(sc); break; default: error = 0; break; } /* Bump the reference count */ if (error == 0) sc->sprom_refcnt++; CHIPC_UNLOCK(sc); return (error); } static void chipc_disable_sprom(device_t dev) { struct chipc_softc *sc; sc = device_get_softc(dev); CHIPC_LOCK(sc); /* Check reference count, skip disable if in-use. */ KASSERT(sc->sprom_refcnt > 0, ("sprom refcnt overrelease")); sc->sprom_refcnt--; if (sc->sprom_refcnt > 0) { CHIPC_UNLOCK(sc); return; } switch (sc->caps.nvram_src) { case BHND_NVRAM_SRC_SPROM: chipc_disable_sprom_pins(sc); break; case BHND_NVRAM_SRC_OTP: chipc_disable_otp_power(sc); break; default: break; } CHIPC_UNLOCK(sc); } static int chipc_enable_otp_power(struct chipc_softc *sc) { // TODO: Enable OTP resource via PMU, and wait up to 100 usec for // OTPS_READY to be set in `optstatus`. return (0); } static void chipc_disable_otp_power(struct chipc_softc *sc) { // TODO: Disable OTP resource via PMU } /** * If required by this device, enable access to the SPROM. * * @param sc chipc driver state. */ static int chipc_enable_sprom_pins(struct chipc_softc *sc) { uint32_t cctrl; CHIPC_LOCK_ASSERT(sc, MA_OWNED); KASSERT(sc->sprom_refcnt == 0, ("sprom pins already enabled")); /* Nothing to do? */ if (!CHIPC_QUIRK(sc, MUX_SPROM)) return (0); /* Check whether bus is busy */ if (!chipc_should_enable_muxed_sprom(sc)) return (EBUSY); cctrl = bhnd_bus_read_4(sc->core, CHIPC_CHIPCTRL); /* 4331 devices */ if (CHIPC_QUIRK(sc, 4331_EXTPA_MUX_SPROM)) { cctrl &= ~CHIPC_CCTRL4331_EXTPA_EN; if (CHIPC_QUIRK(sc, 4331_GPIO2_5_MUX_SPROM)) cctrl &= ~CHIPC_CCTRL4331_EXTPA_ON_GPIO2_5; if (CHIPC_QUIRK(sc, 4331_EXTPA2_MUX_SPROM)) cctrl &= ~CHIPC_CCTRL4331_EXTPA_EN2; bhnd_bus_write_4(sc->core, CHIPC_CHIPCTRL, cctrl); return (0); } /* 4360 devices */ if (CHIPC_QUIRK(sc, 4360_FEM_MUX_SPROM)) { /* Unimplemented */ } /* Refuse to proceed on unsupported devices with muxed SPROM pins */ device_printf(sc->dev, "muxed sprom lines on unrecognized device\n"); return (ENXIO); } /** * If required by this device, revert any GPIO/pin configuration applied * to allow SPROM access. * * @param sc chipc driver state. */ static void chipc_disable_sprom_pins(struct chipc_softc *sc) { uint32_t cctrl; /* Nothing to do? */ if (!CHIPC_QUIRK(sc, MUX_SPROM)) return; CHIPC_LOCK_ASSERT(sc, MA_OWNED); KASSERT(sc->sprom_refcnt == 0, ("sprom pins in use")); cctrl = bhnd_bus_read_4(sc->core, CHIPC_CHIPCTRL); /* 4331 devices */ if (CHIPC_QUIRK(sc, 4331_EXTPA_MUX_SPROM)) { cctrl |= CHIPC_CCTRL4331_EXTPA_EN; if (CHIPC_QUIRK(sc, 4331_GPIO2_5_MUX_SPROM)) cctrl |= CHIPC_CCTRL4331_EXTPA_ON_GPIO2_5; if (CHIPC_QUIRK(sc, 4331_EXTPA2_MUX_SPROM)) cctrl |= CHIPC_CCTRL4331_EXTPA_EN2; bhnd_bus_write_4(sc->core, CHIPC_CHIPCTRL, cctrl); return; } /* 4360 devices */ if (CHIPC_QUIRK(sc, 4360_FEM_MUX_SPROM)) { /* Unimplemented */ } } static uint32_t chipc_read_chipst(device_t dev) { struct chipc_softc *sc = device_get_softc(dev); return (bhnd_bus_read_4(sc->core, CHIPC_CHIPST)); } static void chipc_write_chipctrl(device_t dev, uint32_t value, uint32_t mask) { struct chipc_softc *sc; uint32_t cctrl; sc = device_get_softc(dev); CHIPC_LOCK(sc); cctrl = bhnd_bus_read_4(sc->core, CHIPC_CHIPCTRL); cctrl = (cctrl & ~mask) | (value | mask); bhnd_bus_write_4(sc->core, CHIPC_CHIPCTRL, cctrl); CHIPC_UNLOCK(sc); } static struct chipc_caps * chipc_get_caps(device_t dev) { struct chipc_softc *sc; sc = device_get_softc(dev); return (&sc->caps); } static device_method_t chipc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, chipc_probe), DEVMETHOD(device_attach, chipc_attach), DEVMETHOD(device_detach, chipc_detach), DEVMETHOD(device_suspend, chipc_suspend), DEVMETHOD(device_resume, chipc_resume), /* Bus interface */ DEVMETHOD(bus_probe_nomatch, chipc_probe_nomatch), DEVMETHOD(bus_print_child, chipc_print_child), DEVMETHOD(bus_add_child, chipc_add_child), DEVMETHOD(bus_child_deleted, chipc_child_deleted), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_alloc_resource, chipc_alloc_resource), DEVMETHOD(bus_release_resource, chipc_release_resource), DEVMETHOD(bus_adjust_resource, chipc_adjust_resource), DEVMETHOD(bus_activate_resource, chipc_activate_resource), DEVMETHOD(bus_deactivate_resource, chipc_deactivate_resource), DEVMETHOD(bus_get_resource_list, chipc_get_resource_list), DEVMETHOD(bus_get_rman, chipc_get_rman), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_config_intr, bus_generic_config_intr), DEVMETHOD(bus_bind_intr, bus_generic_bind_intr), DEVMETHOD(bus_describe_intr, bus_generic_describe_intr), /* BHND bus inteface */ DEVMETHOD(bhnd_bus_activate_resource, chipc_activate_bhnd_resource), /* ChipCommon interface */ DEVMETHOD(bhnd_chipc_read_chipst, chipc_read_chipst), DEVMETHOD(bhnd_chipc_write_chipctrl, chipc_write_chipctrl), DEVMETHOD(bhnd_chipc_enable_sprom, chipc_enable_sprom), DEVMETHOD(bhnd_chipc_disable_sprom, chipc_disable_sprom), DEVMETHOD(bhnd_chipc_get_caps, chipc_get_caps), DEVMETHOD_END }; DEFINE_CLASS_0(bhnd_chipc, bhnd_chipc_driver, chipc_methods, sizeof(struct chipc_softc)); EARLY_DRIVER_MODULE(bhnd_chipc, bhnd, bhnd_chipc_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_DEPEND(bhnd_chipc, bhnd, 1, 1, 1); MODULE_VERSION(bhnd_chipc, 1); diff --git a/sys/dev/bhnd/cores/usb/bhnd_usb.c b/sys/dev/bhnd/cores/usb/bhnd_usb.c index 68701df14c50..fa9e6d7ec31a 100644 --- a/sys/dev/bhnd/cores/usb/bhnd_usb.c +++ b/sys/dev/bhnd/cores/usb/bhnd_usb.c @@ -1,555 +1,551 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2010, Aleksandr Rybalko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Ported version of BroadCom USB core driver from ZRouter project */ #include #include #include #include #include #include #include #include #include #include #include #include #include "bhnd_usbvar.h" /****************************** Variables ************************************/ static const struct bhnd_device bhnd_usb_devs[] = { BHND_DEVICE(BCM, USB20H, "USB2.0 Host core", NULL), BHND_DEVICE_END }; /****************************** Prototypes ***********************************/ static int bhnd_usb_attach(device_t); static int bhnd_usb_probe(device_t); static device_t bhnd_usb_add_child(device_t dev, u_int order, const char *name, int unit); static int bhnd_usb_print_all_resources(device_t dev); static int bhnd_usb_print_child(device_t bus, device_t child); static struct resource * bhnd_usb_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); static int bhnd_usb_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r); static struct resource_list * bhnd_usb_get_reslist(device_t dev, device_t child); static int bhnd_usb_probe(device_t dev) { const struct bhnd_device *id; id = bhnd_device_lookup(dev, bhnd_usb_devs, sizeof(bhnd_usb_devs[0])); if (id == NULL) return (ENXIO); device_set_desc(dev, id->desc); return (BUS_PROBE_DEFAULT); } static int bhnd_usb_attach(device_t dev) { struct bhnd_usb_softc *sc; int rid; uint32_t tmp; int tries, err; sc = device_get_softc(dev); bhnd_reset_hw(dev, 0, 0); /* * Allocate the resources which the parent bus has already * determined for us. * XXX: There are few windows (usually 2), RID should be chip-specific */ rid = 0; sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_mem == NULL) { BHND_ERROR_DEV(dev, "unable to allocate memory"); return (ENXIO); } sc->sc_bt = rman_get_bustag(sc->sc_mem); sc->sc_bh = rman_get_bushandle(sc->sc_mem); sc->sc_maddr = rman_get_start(sc->sc_mem); sc->sc_msize = rman_get_size(sc->sc_mem); rid = 0; sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->sc_irq == NULL) { BHND_ERROR_DEV(dev, "unable to allocate IRQ"); return (ENXIO); } sc->sc_irqn = rman_get_start(sc->sc_irq); sc->mem_rman.rm_start = sc->sc_maddr; sc->mem_rman.rm_end = sc->sc_maddr + sc->sc_msize - 1; sc->mem_rman.rm_type = RMAN_ARRAY; sc->mem_rman.rm_descr = "BHND USB core I/O memory addresses"; if (rman_init(&sc->mem_rman) != 0 || rman_manage_region(&sc->mem_rman, sc->mem_rman.rm_start, sc->mem_rman.rm_end) != 0) { panic("%s: sc->mem_rman", __func__); } /* TODO: macros for registers */ bus_write_4(sc->sc_mem, 0x200, 0x7ff); DELAY(100); #define OHCI_CONTROL 0x04 bus_write_4(sc->sc_mem, OHCI_CONTROL, 0); if ( bhnd_get_device(dev) == BHND_COREID_USB20H) { uint32_t rev = bhnd_get_hwrev(dev); BHND_INFO_DEV(dev, "USB HOST 2.0 setup for rev %d", rev); if (rev == 1/* ? == 2 */) { /* SiBa code */ /* Change Flush control reg */ tmp = bus_read_4(sc->sc_mem, 0x400) & ~0x8; bus_write_4(sc->sc_mem, 0x400, tmp); tmp = bus_read_4(sc->sc_mem, 0x400); BHND_DEBUG_DEV(dev, "USB20H fcr: 0x%x", tmp); /* Change Shim control reg */ tmp = bus_read_4(sc->sc_mem, 0x304) & ~0x100; bus_write_4(sc->sc_mem, 0x304, tmp); tmp = bus_read_4(sc->sc_mem, 0x304); BHND_DEBUG_DEV(dev, "USB20H shim: 0x%x", tmp); } else if (rev >= 5) { /* BCMA code */ err = bhnd_alloc_pmu(dev); if(err) { BHND_ERROR_DEV(dev, "can't alloc pmu: %d", err); return (err); } err = bhnd_request_ext_rsrc(dev, 1); if(err) { BHND_ERROR_DEV(dev, "can't req ext: %d", err); return (err); } /* Take out of resets */ bus_write_4(sc->sc_mem, 0x200, 0x4ff); DELAY(25); bus_write_4(sc->sc_mem, 0x200, 0x6ff); DELAY(25); /* Make sure digital and AFE are locked in USB PHY */ bus_write_4(sc->sc_mem, 0x524, 0x6b); DELAY(50); bus_read_4(sc->sc_mem, 0x524); DELAY(50); bus_write_4(sc->sc_mem, 0x524, 0xab); DELAY(50); bus_read_4(sc->sc_mem, 0x524); DELAY(50); bus_write_4(sc->sc_mem, 0x524, 0x2b); DELAY(50); bus_read_4(sc->sc_mem, 0x524); DELAY(50); bus_write_4(sc->sc_mem, 0x524, 0x10ab); DELAY(50); bus_read_4(sc->sc_mem, 0x524); tries = 10000; for (;;) { DELAY(10); tmp = bus_read_4(sc->sc_mem, 0x528); if (tmp & 0xc000) break; if (--tries != 0) continue; tmp = bus_read_4(sc->sc_mem, 0x528); BHND_ERROR_DEV(dev, "USB20H mdio_rddata 0x%08x", tmp); } /* XXX: Puzzle code */ bus_write_4(sc->sc_mem, 0x528, 0x80000000); bus_read_4(sc->sc_mem, 0x314); DELAY(265); bus_write_4(sc->sc_mem, 0x200, 0x7ff); DELAY(10); /* Take USB and HSIC out of non-driving modes */ bus_write_4(sc->sc_mem, 0x510, 0); } } bus_generic_probe(dev); if (bhnd_get_device(dev) == BHND_COREID_USB20H && ( bhnd_get_hwrev(dev) > 0)) bhnd_usb_add_child(dev, 0, "ehci", -1); bhnd_usb_add_child(dev, 1, "ohci", -1); bus_generic_attach(dev); return (0); } static struct rman * bhnd_usb_get_rman(device_t bus, int type, u_int flags) { struct bhnd_usb_softc *sc = device_get_softc(bus); switch (type) { case SYS_RES_MEMORY: return (&sc->sc_mem_rman); default: return (NULL); } } static struct resource * bhnd_usb_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *rv; struct resource_list *rl; struct resource_list_entry *rle; int passthrough, isdefault; isdefault = RMAN_IS_DEFAULT_RANGE(start,end); passthrough = (device_get_parent(child) != bus); rle = NULL; if (!passthrough && isdefault) { BHND_INFO_DEV(bus, "trying allocate def %d - %d for %s", type, *rid, device_get_nameunit(child) ); rl = BUS_GET_RESOURCE_LIST(bus, child); rle = resource_list_find(rl, type, *rid); if (rle == NULL) return (NULL); if (rle->res != NULL) panic("%s: resource entry is busy", __func__); start = rle->start; end = rle->end; count = rle->count; } else { BHND_INFO_DEV(bus, "trying allocate %d - %d (%jx-%jx) for %s", type, *rid, start, end, device_get_nameunit(child) ); } /* * If the request is for a resource which we manage, * attempt to satisfy the allocation ourselves. */ if (type == SYS_RES_MEMORY) { rv = bus_generic_rman_alloc_resource(bus, child, type, rid, start, end, count, flags); if (rv == NULL) { BHND_ERROR_DEV(bus, "could not allocate resource"); return (NULL); } return (rv); } /* * Pass the request to the parent. */ return (bus_generic_rl_alloc_resource(bus, child, type, rid, start, end, count, flags)); } static struct resource_list * bhnd_usb_get_reslist(device_t dev, device_t child) { struct bhnd_usb_devinfo *sdi; sdi = device_get_ivars(child); return (&sdi->sdi_rl); } static int bhnd_usb_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct bhnd_usb_softc *sc; struct resource_list_entry *rle; bool passthrough; int error; sc = device_get_softc(dev); passthrough = (device_get_parent(child) != dev); /* Delegate to our parent device's bus if the requested resource type * isn't handled locally. */ if (type != SYS_RES_MEMORY) { return (bus_generic_rl_release_resource(dev, child, type, rid, r)); } error = bus_generic_rman_release_resource(dev, child, type, rid, r); if (error != 0) return (error); if (!passthrough) { /* Clean resource list entry */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, rid); if (rle != NULL) rle->res = NULL; } return (0); } static int -bhnd_usb_activate_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +bhnd_usb_activate_resource(device_t dev, device_t child, struct resource *r) { if (type != SYS_RES_MEMORY) - return (bus_generic_activate_resource(dev, child, type, rid, - r)); - return (bus_generic_rman_activate_resource(dev, child, type, rid, r)); + return (bus_generic_activate_resource(dev, child, r)); + return (bus_generic_rman_activate_resource(dev, child, r)); } static int -bhnd_usb_deactivate_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +bhnd_usb_deactivate_resource(device_t dev, device_t child, struct resource *r) { if (type != SYS_RES_MEMORY) - return (bus_generic_deactivate_resource(dev, child, type, rid, - r)); - return (bus_generic_rman_deactivate_resource(dev, child, type, rid, r)); + return (bus_generic_deactivate_resource(dev, child, r)); + return (bus_generic_rman_deactivate_resource(dev, child, r)); } static int bhnd_usb_map_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct bhnd_usb_softc *sc = device_get_softc(dev); struct resource_map_request args; rman_res_t length, start; int error; if (type != SYS_RES_MEMORY) return (bus_generic_map_resource(dev, child, type, r, argsp, map)); /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); args.offset = start - rman_get_start(sc->sc_mem); args.length = length; return (bus_generic_map_resource(dev, child, type, sc->sc_mem, &args, map)); } static int bhnd_usb_unmap_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map *map) { struct bhnd_usb_softc *sc = device_get_softc(dev); if (type == SYS_RES_MEMORY) r = sc->sc_mem; return (bus_generic_unmap_resource(dev, child, type, r, map)); } static int bhnd_usb_print_all_resources(device_t dev) { struct bhnd_usb_devinfo *sdi; struct resource_list *rl; int retval; retval = 0; sdi = device_get_ivars(dev); rl = &sdi->sdi_rl; if (STAILQ_FIRST(rl)) retval += printf(" at"); retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld"); return (retval); } static int bhnd_usb_print_child(device_t bus, device_t child) { int retval = 0; retval += bus_print_child_header(bus, child); retval += bhnd_usb_print_all_resources(child); if (device_get_flags(child)) retval += printf(" flags %#x", device_get_flags(child)); retval += printf(" on %s\n", device_get_nameunit(bus)); return (retval); } static device_t bhnd_usb_add_child(device_t dev, u_int order, const char *name, int unit) { struct bhnd_usb_softc *sc; struct bhnd_usb_devinfo *sdi; device_t child; int error; sc = device_get_softc(dev); sdi = malloc(sizeof(struct bhnd_usb_devinfo), M_DEVBUF, M_NOWAIT|M_ZERO); if (sdi == NULL) return (NULL); resource_list_init(&sdi->sdi_rl); sdi->sdi_irq_mapped = false; if (strncmp(name, "ohci", 4) == 0) { sdi->sdi_maddr = sc->sc_maddr + 0x000; sdi->sdi_msize = 0x200; } else if (strncmp(name, "ehci", 4) == 0) { sdi->sdi_maddr = sc->sc_maddr + 0x000; sdi->sdi_msize = 0x1000; } else { panic("Unknown subdevice"); } /* Map the child's IRQ */ if ((error = bhnd_map_intr(dev, 0, &sdi->sdi_irq))) { BHND_ERROR_DEV(dev, "could not map %s interrupt: %d", name, error); goto failed; } sdi->sdi_irq_mapped = true; BHND_INFO_DEV(dev, "%s: irq=%ju maddr=0x%jx", name, sdi->sdi_irq, sdi->sdi_maddr); /* * Add memory window and irq to child's resource list. */ resource_list_add(&sdi->sdi_rl, SYS_RES_MEMORY, 0, sdi->sdi_maddr, sdi->sdi_maddr + sdi->sdi_msize - 1, sdi->sdi_msize); resource_list_add(&sdi->sdi_rl, SYS_RES_IRQ, 0, sdi->sdi_irq, sdi->sdi_irq, 1); child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) { BHND_ERROR_DEV(dev, "could not add %s", name); goto failed; } device_set_ivars(child, sdi); return (child); failed: if (sdi->sdi_irq_mapped) bhnd_unmap_intr(dev, sdi->sdi_irq); resource_list_free(&sdi->sdi_rl); free(sdi, M_DEVBUF); return (NULL); } static void bhnd_usb_child_deleted(device_t dev, device_t child) { struct bhnd_usb_devinfo *dinfo; if ((dinfo = device_get_ivars(child)) == NULL) return; if (dinfo->sdi_irq_mapped) bhnd_unmap_intr(dev, dinfo->sdi_irq); resource_list_free(&dinfo->sdi_rl); free(dinfo, M_DEVBUF); } static device_method_t bhnd_usb_methods[] = { /* Device interface */ DEVMETHOD(device_attach, bhnd_usb_attach), DEVMETHOD(device_probe, bhnd_usb_probe), /* Bus interface */ DEVMETHOD(bus_add_child, bhnd_usb_add_child), DEVMETHOD(bus_child_deleted, bhnd_usb_child_deleted), DEVMETHOD(bus_alloc_resource, bhnd_usb_alloc_resource), DEVMETHOD(bus_get_resource_list, bhnd_usb_get_reslist), DEVMETHOD(bus_get_rman, bhnd_usb_get_rman), DEVMETHOD(bus_print_child, bhnd_usb_print_child), DEVMETHOD(bus_release_resource, bhnd_usb_release_resource), DEVMETHOD(bus_activate_resource, bhnd_usb_activate_resource), DEVMETHOD(bus_deactivate_resource, bhnd_usb_deactivate_resource), DEVMETHOD(bus_map_resource, bhnd_usb_map_resource), DEVMETHOD(bus_unmap_resource, bhnd_usb_unmap_resource), /* Bus interface: generic part */ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD_END }; DEFINE_CLASS_0(bhnd_usb, bhnd_usb_driver, bhnd_usb_methods, sizeof(struct bhnd_usb_softc)); DRIVER_MODULE(bhnd_usb, bhnd, bhnd_usb_driver, 0, 0); MODULE_VERSION(bhnd_usb, 1); diff --git a/sys/dev/dpaa/fman.c b/sys/dev/dpaa/fman.c index 7c8122a03ce7..2364df0be801 100644 --- a/sys/dev/dpaa/fman.c +++ b/sys/dev/dpaa/fman.c @@ -1,555 +1,554 @@ /*- * Copyright (c) 2011-2012 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include "opt_platform.h" #include #include #include #include #include "fman.h" static MALLOC_DEFINE(M_FMAN, "fman", "fman devices information"); /** * @group FMan private defines. * @{ */ enum fman_irq_enum { FMAN_IRQ_NUM = 0, FMAN_ERR_IRQ_NUM = 1 }; enum fman_mu_ram_map { FMAN_MURAM_OFF = 0x0, FMAN_MURAM_SIZE = 0x28000 }; struct fman_config { device_t fman_device; uintptr_t mem_base_addr; uintptr_t irq_num; uintptr_t err_irq_num; uint8_t fm_id; t_FmExceptionsCallback *exception_callback; t_FmBusErrorCallback *bus_error_callback; }; /** * @group FMan private methods/members. * @{ */ /** * Frame Manager firmware. * We use the same firmware for both P3041 and P2041 devices. */ const uint32_t fman_firmware[] = FMAN_UC_IMG; const uint32_t fman_firmware_size = sizeof(fman_firmware); int -fman_activate_resource(device_t bus, device_t child, int type, int rid, - struct resource *res) +fman_activate_resource(device_t bus, device_t child, struct resource *res) { struct fman_softc *sc; bus_space_tag_t bt; bus_space_handle_t bh; int i, rv; sc = device_get_softc(bus); - if (type != SYS_RES_IRQ) { + if (rman_get_type(res) != SYS_RES_IRQ) { for (i = 0; i < sc->sc_base.nranges; i++) { if (rman_is_region_manager(res, &sc->rman) != 0) { bt = rman_get_bustag(sc->mem_res); rv = bus_space_subregion(bt, rman_get_bushandle(sc->mem_res), rman_get_start(res) - rman_get_start(sc->mem_res), rman_get_size(res), &bh); if (rv != 0) return (rv); rman_set_bustag(res, bt); rman_set_bushandle(res, bh); return (rman_activate_resource(res)); } } return (EINVAL); } - return (bus_generic_activate_resource(bus, child, type, rid, res)); + return (bus_generic_activate_resource(bus, child, res)); } int fman_release_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { struct resource_list *rl; struct resource_list_entry *rle; int passthrough, rv; passthrough = (device_get_parent(child) != bus); rl = BUS_GET_RESOURCE_LIST(bus, child); if (type != SYS_RES_IRQ) { if ((rman_get_flags(res) & RF_ACTIVE) != 0 ){ rv = bus_deactivate_resource(child, type, rid, res); if (rv != 0) return (rv); } rv = rman_release_resource(res); if (rv != 0) return (rv); if (!passthrough) { rle = resource_list_find(rl, type, rid); KASSERT(rle != NULL, ("%s: resource entry not found!", __func__)); KASSERT(rle->res != NULL, ("%s: resource entry is not busy", __func__)); rle->res = NULL; } return (0); } return (resource_list_release(rl, bus, child, type, rid, res)); } struct resource * fman_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct fman_softc *sc; struct resource_list *rl; struct resource_list_entry *rle = NULL; struct resource *res; int i, isdefault, passthrough; isdefault = RMAN_IS_DEFAULT_RANGE(start, end); passthrough = (device_get_parent(child) != bus); sc = device_get_softc(bus); rl = BUS_GET_RESOURCE_LIST(bus, child); switch (type) { case SYS_RES_MEMORY: KASSERT(!(isdefault && passthrough), ("%s: passthrough of default allocation", __func__)); if (!passthrough) { rle = resource_list_find(rl, type, *rid); if (rle == NULL) return (NULL); KASSERT(rle->res == NULL, ("%s: resource entry is busy", __func__)); if (isdefault) { start = rle->start; count = ulmax(count, rle->count); end = ulmax(rle->end, start + count - 1); } } res = NULL; /* Map fman ranges to nexus ranges. */ for (i = 0; i < sc->sc_base.nranges; i++) { if (start >= sc->sc_base.ranges[i].bus && end < sc->sc_base.ranges[i].bus + sc->sc_base.ranges[i].size) { start += rman_get_start(sc->mem_res); end += rman_get_start(sc->mem_res); res = rman_reserve_resource(&sc->rman, start, end, count, flags & ~RF_ACTIVE, child); if (res == NULL) return (NULL); rman_set_rid(res, *rid); rman_set_type(res, type); if ((flags & RF_ACTIVE) != 0 && bus_activate_resource( child, type, *rid, res) != 0) { rman_release_resource(res); return (NULL); } break; } } if (!passthrough) rle->res = res; return (res); case SYS_RES_IRQ: return (resource_list_alloc(rl, bus, child, type, rid, start, end, count, flags)); } return (NULL); } static int fman_fill_ranges(phandle_t node, struct simplebus_softc *sc) { int host_address_cells; cell_t *base_ranges; ssize_t nbase_ranges; int err; int i, j, k; err = OF_searchencprop(OF_parent(node), "#address-cells", &host_address_cells, sizeof(host_address_cells)); if (err <= 0) return (-1); nbase_ranges = OF_getproplen(node, "ranges"); if (nbase_ranges < 0) return (-1); sc->nranges = nbase_ranges / sizeof(cell_t) / (sc->acells + host_address_cells + sc->scells); if (sc->nranges == 0) return (0); sc->ranges = malloc(sc->nranges * sizeof(sc->ranges[0]), M_DEVBUF, M_WAITOK); base_ranges = malloc(nbase_ranges, M_DEVBUF, M_WAITOK); OF_getencprop(node, "ranges", base_ranges, nbase_ranges); for (i = 0, j = 0; i < sc->nranges; i++) { sc->ranges[i].bus = 0; for (k = 0; k < sc->acells; k++) { sc->ranges[i].bus <<= 32; sc->ranges[i].bus |= base_ranges[j++]; } sc->ranges[i].host = 0; for (k = 0; k < host_address_cells; k++) { sc->ranges[i].host <<= 32; sc->ranges[i].host |= base_ranges[j++]; } sc->ranges[i].size = 0; for (k = 0; k < sc->scells; k++) { sc->ranges[i].size <<= 32; sc->ranges[i].size |= base_ranges[j++]; } } free(base_ranges, M_DEVBUF); return (sc->nranges); } static t_Handle fman_init(struct fman_softc *sc, struct fman_config *cfg) { phandle_t node; t_FmParams fm_params; t_Handle muram_handle, fm_handle; t_Error error; t_FmRevisionInfo revision_info; uint16_t clock; uint32_t tmp, mod; /* MURAM configuration */ muram_handle = FM_MURAM_ConfigAndInit(cfg->mem_base_addr + FMAN_MURAM_OFF, FMAN_MURAM_SIZE); if (muram_handle == NULL) { device_printf(cfg->fman_device, "couldn't init FM MURAM module" "\n"); return (NULL); } sc->muram_handle = muram_handle; /* Fill in FM configuration */ fm_params.fmId = cfg->fm_id; /* XXX we support only one partition thus each fman has master id */ fm_params.guestId = NCSW_MASTER_ID; fm_params.baseAddr = cfg->mem_base_addr; fm_params.h_FmMuram = muram_handle; /* Get FMan clock in Hz */ if ((tmp = fman_get_clock(sc)) == 0) return (NULL); /* Convert FMan clock to MHz */ clock = (uint16_t)(tmp / 1000000); mod = tmp % 1000000; if (mod >= 500000) ++clock; fm_params.fmClkFreq = clock; fm_params.f_Exception = cfg->exception_callback; fm_params.f_BusError = cfg->bus_error_callback; fm_params.h_App = cfg->fman_device; fm_params.irq = cfg->irq_num; fm_params.errIrq = cfg->err_irq_num; fm_params.firmware.size = fman_firmware_size; fm_params.firmware.p_Code = (uint32_t*)fman_firmware; fm_handle = FM_Config(&fm_params); if (fm_handle == NULL) { device_printf(cfg->fman_device, "couldn't configure FM " "module\n"); goto err; } FM_ConfigResetOnInit(fm_handle, TRUE); error = FM_Init(fm_handle); if (error != E_OK) { device_printf(cfg->fman_device, "couldn't init FM module\n"); goto err2; } error = FM_GetRevision(fm_handle, &revision_info); if (error != E_OK) { device_printf(cfg->fman_device, "couldn't get FM revision\n"); goto err2; } device_printf(cfg->fman_device, "Hardware version: %d.%d.\n", revision_info.majorRev, revision_info.minorRev); /* Initialize the simplebus part of things */ simplebus_init(sc->sc_base.dev, 0); node = ofw_bus_get_node(sc->sc_base.dev); fman_fill_ranges(node, &sc->sc_base); sc->rman.rm_type = RMAN_ARRAY; sc->rman.rm_descr = "FMan range"; rman_init_from_resource(&sc->rman, sc->mem_res); for (node = OF_child(node); node > 0; node = OF_peer(node)) { simplebus_add_device(sc->sc_base.dev, node, 0, NULL, -1, NULL); } return (fm_handle); err2: FM_Free(fm_handle); err: FM_MURAM_Free(muram_handle); return (NULL); } static void fman_exception_callback(t_Handle app_handle, e_FmExceptions exception) { struct fman_softc *sc; sc = app_handle; device_printf(sc->sc_base.dev, "FMan exception occurred.\n"); } static void fman_error_callback(t_Handle app_handle, e_FmPortType port_type, uint8_t port_id, uint64_t addr, uint8_t tnum, uint16_t liodn) { struct fman_softc *sc; sc = app_handle; device_printf(sc->sc_base.dev, "FMan error occurred.\n"); } /** @} */ /** * @group FMan driver interface. * @{ */ int fman_get_handle(device_t dev, t_Handle *fmh) { struct fman_softc *sc = device_get_softc(dev); *fmh = sc->fm_handle; return (0); } int fman_get_muram_handle(device_t dev, t_Handle *muramh) { struct fman_softc *sc = device_get_softc(dev); *muramh = sc->muram_handle; return (0); } int fman_get_bushandle(device_t dev, vm_offset_t *fm_base) { struct fman_softc *sc = device_get_softc(dev); *fm_base = rman_get_bushandle(sc->mem_res); return (0); } int fman_attach(device_t dev) { struct fman_softc *sc; struct fman_config cfg; pcell_t qchan_range[2]; phandle_t node; sc = device_get_softc(dev); sc->sc_base.dev = dev; /* Check if MallocSmart allocator is ready */ if (XX_MallocSmartInit() != E_OK) { device_printf(dev, "could not initialize smart allocator.\n"); return (ENXIO); } node = ofw_bus_get_node(dev); if (OF_getencprop(node, "fsl,qman-channel-range", qchan_range, sizeof(qchan_range)) <= 0) { device_printf(dev, "Missing QMan channel range property!\n"); return (ENXIO); } sc->qman_chan_base = qchan_range[0]; sc->qman_chan_count = qchan_range[1]; sc->mem_rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE | RF_SHAREABLE); if (!sc->mem_res) { device_printf(dev, "could not allocate memory.\n"); return (ENXIO); } sc->irq_rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE); if (!sc->irq_res) { device_printf(dev, "could not allocate interrupt.\n"); goto err; } sc->err_irq_rid = 1; sc->err_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->err_irq_rid, RF_ACTIVE | RF_SHAREABLE); if (!sc->err_irq_res) { device_printf(dev, "could not allocate error interrupt.\n"); goto err; } /* Set FMan configuration */ cfg.fman_device = dev; cfg.fm_id = device_get_unit(dev); cfg.mem_base_addr = rman_get_bushandle(sc->mem_res); cfg.irq_num = (uintptr_t)sc->irq_res; cfg.err_irq_num = (uintptr_t)sc->err_irq_res; cfg.exception_callback = fman_exception_callback; cfg.bus_error_callback = fman_error_callback; sc->fm_handle = fman_init(sc, &cfg); if (sc->fm_handle == NULL) { device_printf(dev, "could not be configured\n"); goto err; } return (bus_generic_attach(dev)); err: fman_detach(dev); return (ENXIO); } int fman_detach(device_t dev) { struct fman_softc *sc; sc = device_get_softc(dev); if (sc->muram_handle) { FM_MURAM_Free(sc->muram_handle); } if (sc->fm_handle) { FM_Free(sc->fm_handle); } if (sc->mem_res) { bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); } if (sc->irq_res) { bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); } if (sc->irq_res) { bus_release_resource(dev, SYS_RES_IRQ, sc->err_irq_rid, sc->err_irq_res); } return (0); } int fman_suspend(device_t dev) { return (0); } int fman_resume_dev(device_t dev) { return (0); } int fman_shutdown(device_t dev) { return (0); } int fman_qman_channel_id(device_t dev, int port) { struct fman_softc *sc; int qman_port_id[] = {0x31, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07}; int i; sc = device_get_softc(dev); for (i = 0; i < sc->qman_chan_count; i++) { if (qman_port_id[i] == port) return (sc->qman_chan_base + i); } return (0); } /** @} */ diff --git a/sys/dev/dpaa/fman.h b/sys/dev/dpaa/fman.h index c7b57c60171d..b201b9fd9355 100644 --- a/sys/dev/dpaa/fman.h +++ b/sys/dev/dpaa/fman.h @@ -1,77 +1,77 @@ /*- * Copyright (c) 2011-2012 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef FMAN_H_ #define FMAN_H_ #include /** * FMan driver instance data. */ struct fman_softc { struct simplebus_softc sc_base; struct resource *mem_res; struct resource *irq_res; struct resource *err_irq_res; struct rman rman; int mem_rid; int irq_rid; int err_irq_rid; int qman_chan_base; int qman_chan_count; t_Handle fm_handle; t_Handle muram_handle; }; /** * @group QMan bus interface. * @{ */ struct resource * fman_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int fman_activate_resource(device_t bus, device_t child, - int type, int rid, struct resource *res); + struct resource *res); int fman_release_resource(device_t bus, device_t child, int type, int rid, struct resource *res); int fman_attach(device_t dev); int fman_detach(device_t dev); int fman_suspend(device_t dev); int fman_resume_dev(device_t dev); int fman_shutdown(device_t dev); int fman_read_ivar(device_t dev, device_t child, int index, uintptr_t *result); int fman_qman_channel_id(device_t, int); /** @} */ uint32_t fman_get_clock(struct fman_softc *sc); int fman_get_handle(device_t dev, t_Handle *fmh); int fman_get_muram_handle(device_t dev, t_Handle *muramh); int fman_get_bushandle(device_t dev, vm_offset_t *fm_base); #endif /* FMAN_H_ */ diff --git a/sys/dev/dpaa2/dpaa2_mc.c b/sys/dev/dpaa2/dpaa2_mc.c index 0dbb282399ae..3e0920cbf8a5 100644 --- a/sys/dev/dpaa2/dpaa2_mc.c +++ b/sys/dev/dpaa2/dpaa2_mc.c @@ -1,892 +1,888 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * The DPAA2 Management Complex (MC) bus driver. * * MC is a hardware resource manager which can be found in several NXP * SoCs (LX2160A, for example) and provides an access to the specialized * hardware objects used in network-oriented packet processing applications. */ #include "opt_acpi.h" #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEV_ACPI #include #include #endif #ifdef FDT #include #include #include #include #endif #include "pcib_if.h" #include "pci_if.h" #include "dpaa2_mc.h" /* Macros to read/write MC registers */ #define mcreg_read_4(_sc, _r) bus_read_4(&(_sc)->map[1], (_r)) #define mcreg_write_4(_sc, _r, _v) bus_write_4(&(_sc)->map[1], (_r), (_v)) #define IORT_DEVICE_NAME "MCE" /* MC Registers */ #define MC_REG_GCR1 0x0000u #define MC_REG_GCR2 0x0004u /* TODO: Does it exist? */ #define MC_REG_GSR 0x0008u #define MC_REG_FAPR 0x0028u /* General Control Register 1 (GCR1) */ #define GCR1_P1_STOP 0x80000000u #define GCR1_P2_STOP 0x40000000u /* General Status Register (GSR) */ #define GSR_HW_ERR(v) (((v) & 0x80000000u) >> 31) #define GSR_CAT_ERR(v) (((v) & 0x40000000u) >> 30) #define GSR_DPL_OFFSET(v) (((v) & 0x3FFFFF00u) >> 8) #define GSR_MCS(v) (((v) & 0xFFu) >> 0) /* Timeouts to wait for the MC status. */ #define MC_STAT_TIMEOUT 1000u /* us */ #define MC_STAT_ATTEMPTS 100u /** * @brief Structure to describe a DPAA2 device as a managed resource. */ struct dpaa2_mc_devinfo { STAILQ_ENTRY(dpaa2_mc_devinfo) link; device_t dpaa2_dev; uint32_t flags; uint32_t owners; }; MALLOC_DEFINE(M_DPAA2_MC, "dpaa2_mc", "DPAA2 Management Complex"); static struct resource_spec dpaa2_mc_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE | RF_UNMAPPED }, { SYS_RES_MEMORY, 1, RF_ACTIVE | RF_UNMAPPED | RF_OPTIONAL }, RESOURCE_SPEC_END }; static u_int dpaa2_mc_get_xref(device_t, device_t); static u_int dpaa2_mc_map_id(device_t, device_t, uintptr_t *); static int dpaa2_mc_alloc_msi_impl(device_t, device_t, int, int, int *); static int dpaa2_mc_release_msi_impl(device_t, device_t, int, int *); static int dpaa2_mc_map_msi_impl(device_t, device_t, int, uint64_t *, uint32_t *); /* * For device interface. */ int dpaa2_mc_attach(device_t dev) { struct dpaa2_mc_softc *sc; struct resource_map_request req; uint32_t val; int error; sc = device_get_softc(dev); sc->dev = dev; sc->msi_allocated = false; sc->msi_owner = NULL; error = bus_alloc_resources(sc->dev, dpaa2_mc_spec, sc->res); if (error) { device_printf(dev, "%s: failed to allocate resources\n", __func__); return (ENXIO); } if (sc->res[1]) { resource_init_map_request(&req); req.memattr = VM_MEMATTR_DEVICE; error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[1], &req, &sc->map[1]); if (error) { device_printf(dev, "%s: failed to map control " "registers\n", __func__); dpaa2_mc_detach(dev); return (ENXIO); } if (bootverbose) device_printf(dev, "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n", mcreg_read_4(sc, MC_REG_GCR1), mcreg_read_4(sc, MC_REG_GCR2), mcreg_read_4(sc, MC_REG_GSR), mcreg_read_4(sc, MC_REG_FAPR)); /* Reset P1_STOP and P2_STOP bits to resume MC processor. */ val = mcreg_read_4(sc, MC_REG_GCR1) & ~(GCR1_P1_STOP | GCR1_P2_STOP); mcreg_write_4(sc, MC_REG_GCR1, val); /* Poll MC status. */ if (bootverbose) device_printf(dev, "polling MC status...\n"); for (int i = 0; i < MC_STAT_ATTEMPTS; i++) { val = mcreg_read_4(sc, MC_REG_GSR); if (GSR_MCS(val) != 0u) break; DELAY(MC_STAT_TIMEOUT); } if (bootverbose) device_printf(dev, "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n", mcreg_read_4(sc, MC_REG_GCR1), mcreg_read_4(sc, MC_REG_GCR2), mcreg_read_4(sc, MC_REG_GSR), mcreg_read_4(sc, MC_REG_FAPR)); } /* At least 64 bytes of the command portal should be available. */ if (rman_get_size(sc->res[0]) < DPAA2_MCP_MEM_WIDTH) { device_printf(dev, "%s: MC portal memory region too small: " "%jd\n", __func__, rman_get_size(sc->res[0])); dpaa2_mc_detach(dev); return (ENXIO); } /* Map MC portal memory resource. */ resource_init_map_request(&req); req.memattr = VM_MEMATTR_DEVICE; error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[0], &req, &sc->map[0]); if (error) { device_printf(dev, "Failed to map MC portal memory\n"); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a resource manager for the DPAA2 I/O objects. */ sc->dpio_rman.rm_type = RMAN_ARRAY; sc->dpio_rman.rm_descr = "DPAA2 DPIO objects"; error = rman_init(&sc->dpio_rman); if (error) { device_printf(dev, "Failed to initialize a resource manager for " "the DPAA2 I/O objects: error=%d\n", error); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a resource manager for the DPAA2 buffer pools. */ sc->dpbp_rman.rm_type = RMAN_ARRAY; sc->dpbp_rman.rm_descr = "DPAA2 DPBP objects"; error = rman_init(&sc->dpbp_rman); if (error) { device_printf(dev, "Failed to initialize a resource manager for " "the DPAA2 buffer pools: error=%d\n", error); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a resource manager for the DPAA2 concentrators. */ sc->dpcon_rman.rm_type = RMAN_ARRAY; sc->dpcon_rman.rm_descr = "DPAA2 DPCON objects"; error = rman_init(&sc->dpcon_rman); if (error) { device_printf(dev, "Failed to initialize a resource manager for " "the DPAA2 concentrators: error=%d\n", error); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a resource manager for the DPAA2 MC portals. */ sc->dpmcp_rman.rm_type = RMAN_ARRAY; sc->dpmcp_rman.rm_descr = "DPAA2 DPMCP objects"; error = rman_init(&sc->dpmcp_rman); if (error) { device_printf(dev, "Failed to initialize a resource manager for " "the DPAA2 MC portals: error=%d\n", error); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a list of non-allocatable DPAA2 devices. */ mtx_init(&sc->mdev_lock, "MC portal mdev lock", NULL, MTX_DEF); STAILQ_INIT(&sc->mdev_list); mtx_init(&sc->msi_lock, "MC MSI lock", NULL, MTX_DEF); /* * Add a root resource container as the only child of the bus. All of * the direct descendant containers will be attached to the root one * instead of the MC device. */ sc->rcdev = device_add_child(dev, "dpaa2_rc", 0); if (sc->rcdev == NULL) { dpaa2_mc_detach(dev); return (ENXIO); } bus_generic_probe(dev); bus_generic_attach(dev); return (0); } int dpaa2_mc_detach(device_t dev) { struct dpaa2_mc_softc *sc; struct dpaa2_devinfo *dinfo = NULL; int error; bus_generic_detach(dev); sc = device_get_softc(dev); if (sc->rcdev) device_delete_child(dev, sc->rcdev); bus_release_resources(dev, dpaa2_mc_spec, sc->res); dinfo = device_get_ivars(dev); if (dinfo) free(dinfo, M_DPAA2_MC); error = bus_generic_detach(dev); if (error != 0) return (error); return (device_delete_children(dev)); } /* * For bus interface. */ struct resource * dpaa2_mc_alloc_resource(device_t mcdev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; struct rman *rm; int error; rm = dpaa2_mc_rman(mcdev, type, flags); if (rm == NULL) return (bus_generic_alloc_resource(mcdev, child, type, rid, start, end, count, flags)); /* * Skip managing DPAA2-specific resource. It must be provided to MC by * calling DPAA2_MC_MANAGE_DEV() beforehand. */ if (type <= DPAA2_DEV_MC) { error = rman_manage_region(rm, start, end); if (error) { device_printf(mcdev, "rman_manage_region() failed: " "start=%#jx, end=%#jx, error=%d\n", start, end, error); goto fail; } } res = bus_generic_rman_alloc_resource(mcdev, child, type, rid, start, end, count, flags); if (res == NULL) goto fail; return (res); fail: device_printf(mcdev, "%s() failed: type=%d, rid=%d, start=%#jx, " "end=%#jx, count=%#jx, flags=%x\n", __func__, type, *rid, start, end, count, flags); return (NULL); } int dpaa2_mc_adjust_resource(device_t mcdev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct rman *rm; rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r)); if (rm) return (bus_generic_rman_adjust_resource(mcdev, child, r, start, end)); return (bus_generic_adjust_resource(mcdev, child, r, start, end)); } int dpaa2_mc_release_resource(device_t mcdev, device_t child, int type, int rid, struct resource *r) { struct rman *rm; rm = dpaa2_mc_rman(mcdev, type, rman_get_flags(r)); if (rm) return (bus_generic_rman_release_resource(mcdev, child, type, rid, r)); return (bus_generic_release_resource(mcdev, child, type, rid, r)); } int -dpaa2_mc_activate_resource(device_t mcdev, device_t child, int type, int rid, - struct resource *r) +dpaa2_mc_activate_resource(device_t mcdev, device_t child, struct resource *r) { struct rman *rm; - rm = dpaa2_mc_rman(mcdev, type, rman_get_flags(r)); + rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r)); if (rm) - return (bus_generic_rman_activate_resource(mcdev, child, type, - rid, r)); - return (bus_generic_activate_resource(mcdev, child, type, rid, r)); + return (bus_generic_rman_activate_resource(mcdev, child, r)); + return (bus_generic_activate_resource(mcdev, child, r)); } int -dpaa2_mc_deactivate_resource(device_t mcdev, device_t child, int type, int rid, - struct resource *r) +dpaa2_mc_deactivate_resource(device_t mcdev, device_t child, struct resource *r) { struct rman *rm; - rm = dpaa2_mc_rman(mcdev, type, rman_get_flags(r)); + rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r)); if (rm) - return (bus_generic_rman_deactivate_resource(mcdev, child, type, - rid, r)); - return (bus_generic_deactivate_resource(mcdev, child, type, rid, r)); + return (bus_generic_rman_deactivate_resource(mcdev, child, r)); + return (bus_generic_deactivate_resource(mcdev, child, r)); } /* * For pseudo-pcib interface. */ int dpaa2_mc_alloc_msi(device_t mcdev, device_t child, int count, int maxcount, int *irqs) { #if defined(INTRNG) return (dpaa2_mc_alloc_msi_impl(mcdev, child, count, maxcount, irqs)); #else return (ENXIO); #endif } int dpaa2_mc_release_msi(device_t mcdev, device_t child, int count, int *irqs) { #if defined(INTRNG) return (dpaa2_mc_release_msi_impl(mcdev, child, count, irqs)); #else return (ENXIO); #endif } int dpaa2_mc_map_msi(device_t mcdev, device_t child, int irq, uint64_t *addr, uint32_t *data) { #if defined(INTRNG) return (dpaa2_mc_map_msi_impl(mcdev, child, irq, addr, data)); #else return (ENXIO); #endif } int dpaa2_mc_get_id(device_t mcdev, device_t child, enum pci_id_type type, uintptr_t *id) { struct dpaa2_devinfo *dinfo; dinfo = device_get_ivars(child); if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (ENXIO); if (type == PCI_ID_MSI) return (dpaa2_mc_map_id(mcdev, child, id)); *id = dinfo->icid; return (0); } /* * For DPAA2 Management Complex bus driver interface. */ int dpaa2_mc_manage_dev(device_t mcdev, device_t dpaa2_dev, uint32_t flags) { struct dpaa2_mc_softc *sc; struct dpaa2_devinfo *dinfo; struct dpaa2_mc_devinfo *di; struct rman *rm; int error; sc = device_get_softc(mcdev); dinfo = device_get_ivars(dpaa2_dev); if (!sc || !dinfo || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); di = malloc(sizeof(*di), M_DPAA2_MC, M_WAITOK | M_ZERO); if (!di) return (ENOMEM); di->dpaa2_dev = dpaa2_dev; di->flags = flags; di->owners = 0; /* Append a new managed DPAA2 device to the queue. */ mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_INSERT_TAIL(&sc->mdev_list, di, link); mtx_unlock(&sc->mdev_lock); if (flags & DPAA2_MC_DEV_ALLOCATABLE) { /* Select rman based on a type of the DPAA2 device. */ rm = dpaa2_mc_rman(mcdev, dinfo->dtype, 0); if (!rm) return (ENOENT); /* Manage DPAA2 device as an allocatable resource. */ error = rman_manage_region(rm, (rman_res_t) dpaa2_dev, (rman_res_t) dpaa2_dev); if (error) return (error); } return (0); } int dpaa2_mc_get_free_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype) { struct rman *rm; rman_res_t start, end; int error; if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); /* Select resource manager based on a type of the DPAA2 device. */ rm = dpaa2_mc_rman(mcdev, devtype, 0); if (!rm) return (ENOENT); /* Find first free DPAA2 device of the given type. */ error = rman_first_free_region(rm, &start, &end); if (error) return (error); KASSERT(start == end, ("start != end, but should be the same pointer " "to the DPAA2 device: start=%jx, end=%jx", start, end)); *dpaa2_dev = (device_t) start; return (0); } int dpaa2_mc_get_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype, uint32_t obj_id) { struct dpaa2_mc_softc *sc; struct dpaa2_devinfo *dinfo; struct dpaa2_mc_devinfo *di; int error = ENOENT; sc = device_get_softc(mcdev); if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_FOREACH(di, &sc->mdev_list, link) { dinfo = device_get_ivars(di->dpaa2_dev); if (dinfo->dtype == devtype && dinfo->id == obj_id) { *dpaa2_dev = di->dpaa2_dev; error = 0; break; } } mtx_unlock(&sc->mdev_lock); return (error); } int dpaa2_mc_get_shared_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype) { struct dpaa2_mc_softc *sc; struct dpaa2_devinfo *dinfo; struct dpaa2_mc_devinfo *di; device_t dev = NULL; uint32_t owners = UINT32_MAX; int error = ENOENT; sc = device_get_softc(mcdev); if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_FOREACH(di, &sc->mdev_list, link) { dinfo = device_get_ivars(di->dpaa2_dev); if ((dinfo->dtype == devtype) && (di->flags & DPAA2_MC_DEV_SHAREABLE) && (di->owners < owners)) { dev = di->dpaa2_dev; owners = di->owners; } } if (dev) { *dpaa2_dev = dev; error = 0; } mtx_unlock(&sc->mdev_lock); return (error); } int dpaa2_mc_reserve_dev(device_t mcdev, device_t dpaa2_dev, enum dpaa2_dev_type devtype) { struct dpaa2_mc_softc *sc; struct dpaa2_mc_devinfo *di; int error = ENOENT; sc = device_get_softc(mcdev); if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_FOREACH(di, &sc->mdev_list, link) { if (di->dpaa2_dev == dpaa2_dev && (di->flags & DPAA2_MC_DEV_SHAREABLE)) { di->owners++; error = 0; break; } } mtx_unlock(&sc->mdev_lock); return (error); } int dpaa2_mc_release_dev(device_t mcdev, device_t dpaa2_dev, enum dpaa2_dev_type devtype) { struct dpaa2_mc_softc *sc; struct dpaa2_mc_devinfo *di; int error = ENOENT; sc = device_get_softc(mcdev); if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_FOREACH(di, &sc->mdev_list, link) { if (di->dpaa2_dev == dpaa2_dev && (di->flags & DPAA2_MC_DEV_SHAREABLE)) { di->owners -= di->owners > 0 ? 1 : 0; error = 0; break; } } mtx_unlock(&sc->mdev_lock); return (error); } /** * @internal */ static u_int dpaa2_mc_get_xref(device_t mcdev, device_t child) { struct dpaa2_mc_softc *sc = device_get_softc(mcdev); struct dpaa2_devinfo *dinfo = device_get_ivars(child); #ifdef DEV_ACPI u_int xref, devid; #endif #ifdef FDT phandle_t msi_parent; #endif int error; if (sc && dinfo) { #ifdef DEV_ACPI if (sc->acpi_based) { /* * NOTE: The first named component from the IORT table * with the given name (as a substring) will be used. */ error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, dinfo->icid, &xref, &devid); if (error) return (0); return (xref); } #endif #ifdef FDT if (!sc->acpi_based) { /* FDT-based driver. */ error = ofw_bus_msimap(sc->ofw_node, dinfo->icid, &msi_parent, NULL); if (error) return (0); return ((u_int) msi_parent); } #endif } return (0); } /** * @internal */ static u_int dpaa2_mc_map_id(device_t mcdev, device_t child, uintptr_t *id) { struct dpaa2_devinfo *dinfo; #ifdef DEV_ACPI u_int xref, devid; int error; #endif dinfo = device_get_ivars(child); if (dinfo) { /* * The first named components from IORT table with the given * name (as a substring) will be used. */ #ifdef DEV_ACPI error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, dinfo->icid, &xref, &devid); if (error == 0) *id = devid; else #endif *id = dinfo->icid; /* RID not in IORT, likely FW bug */ return (0); } return (ENXIO); } /** * @internal * @brief Obtain a resource manager based on the given type of the resource. */ struct rman * dpaa2_mc_rman(device_t mcdev, int type, u_int flags) { struct dpaa2_mc_softc *sc; sc = device_get_softc(mcdev); switch (type) { case DPAA2_DEV_IO: return (&sc->dpio_rman); case DPAA2_DEV_BP: return (&sc->dpbp_rman); case DPAA2_DEV_CON: return (&sc->dpcon_rman); case DPAA2_DEV_MCP: return (&sc->dpmcp_rman); default: break; } return (NULL); } #if defined(INTRNG) && !defined(IOMMU) /** * @internal * @brief Allocates requested number of MSIs. * * NOTE: This function is a part of fallback solution when IOMMU isn't available. * Total number of IRQs is limited to 32. */ static int dpaa2_mc_alloc_msi_impl(device_t mcdev, device_t child, int count, int maxcount, int *irqs) { struct dpaa2_mc_softc *sc = device_get_softc(mcdev); int msi_irqs[DPAA2_MC_MSI_COUNT]; int error; /* Pre-allocate a bunch of MSIs for MC to be used by its children. */ if (!sc->msi_allocated) { error = intr_alloc_msi(mcdev, child, dpaa2_mc_get_xref(mcdev, child), DPAA2_MC_MSI_COUNT, DPAA2_MC_MSI_COUNT, msi_irqs); if (error) { device_printf(mcdev, "failed to pre-allocate %d MSIs: " "error=%d\n", DPAA2_MC_MSI_COUNT, error); return (error); } mtx_assert(&sc->msi_lock, MA_NOTOWNED); mtx_lock(&sc->msi_lock); for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { sc->msi[i].child = NULL; sc->msi[i].irq = msi_irqs[i]; } sc->msi_owner = child; sc->msi_allocated = true; mtx_unlock(&sc->msi_lock); } error = ENOENT; /* Find the first free MSIs from the pre-allocated pool. */ mtx_assert(&sc->msi_lock, MA_NOTOWNED); mtx_lock(&sc->msi_lock); for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { if (sc->msi[i].child != NULL) continue; error = 0; for (int j = 0; j < count; j++) { if (i + j >= DPAA2_MC_MSI_COUNT) { device_printf(mcdev, "requested %d MSIs exceed " "limit of %d available\n", count, DPAA2_MC_MSI_COUNT); error = E2BIG; break; } sc->msi[i + j].child = child; irqs[j] = sc->msi[i + j].irq; } break; } mtx_unlock(&sc->msi_lock); return (error); } /** * @internal * @brief Marks IRQs as free in the pre-allocated pool of MSIs. * * NOTE: This function is a part of fallback solution when IOMMU isn't available. * Total number of IRQs is limited to 32. * NOTE: MSIs are kept allocated in the kernel as a part of the pool. */ static int dpaa2_mc_release_msi_impl(device_t mcdev, device_t child, int count, int *irqs) { struct dpaa2_mc_softc *sc = device_get_softc(mcdev); mtx_assert(&sc->msi_lock, MA_NOTOWNED); mtx_lock(&sc->msi_lock); for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { if (sc->msi[i].child != child) continue; for (int j = 0; j < count; j++) { if (sc->msi[i].irq == irqs[j]) { sc->msi[i].child = NULL; break; } } } mtx_unlock(&sc->msi_lock); return (0); } /** * @internal * @brief Provides address to write to and data according to the given MSI from * the pre-allocated pool. * * NOTE: This function is a part of fallback solution when IOMMU isn't available. * Total number of IRQs is limited to 32. */ static int dpaa2_mc_map_msi_impl(device_t mcdev, device_t child, int irq, uint64_t *addr, uint32_t *data) { struct dpaa2_mc_softc *sc = device_get_softc(mcdev); int error = EINVAL; mtx_assert(&sc->msi_lock, MA_NOTOWNED); mtx_lock(&sc->msi_lock); for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { if (sc->msi[i].child == child && sc->msi[i].irq == irq) { error = 0; break; } } mtx_unlock(&sc->msi_lock); if (error) return (error); return (intr_map_msi(mcdev, sc->msi_owner, dpaa2_mc_get_xref(mcdev, sc->msi_owner), irq, addr, data)); } #endif /* defined(INTRNG) && !defined(IOMMU) */ static device_method_t dpaa2_mc_methods[] = { DEVMETHOD_END }; DEFINE_CLASS_0(dpaa2_mc, dpaa2_mc_driver, dpaa2_mc_methods, sizeof(struct dpaa2_mc_softc)); diff --git a/sys/dev/dpaa2/dpaa2_mc.h b/sys/dev/dpaa2/dpaa2_mc.h index 23b18f8d2ca6..7af3b2a4eb24 100644 --- a/sys/dev/dpaa2/dpaa2_mc.h +++ b/sys/dev/dpaa2/dpaa2_mc.h @@ -1,219 +1,219 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _DPAA2_MC_H #define _DPAA2_MC_H #include #include #include #include #include #include "pci_if.h" #include "dpaa2_types.h" #include "dpaa2_mcp.h" #include "dpaa2_swp.h" #include "dpaa2_ni.h" #include "dpaa2_io.h" #include "dpaa2_mac.h" #include "dpaa2_con.h" #include "dpaa2_bp.h" /* * Maximum number of MSIs supported by the MC for its children without IOMMU. * * TODO: Should be much more with IOMMU translation. */ #define DPAA2_MC_MSI_COUNT 32 /* Flags for DPAA2 devices as resources. */ #define DPAA2_MC_DEV_ALLOCATABLE 0x01u /* to be managed by DPAA2-specific rman */ #define DPAA2_MC_DEV_ASSOCIATED 0x02u /* to obtain info about DPAA2 device */ #define DPAA2_MC_DEV_SHAREABLE 0x04u /* to be shared among DPAA2 devices */ struct dpaa2_mc_devinfo; /* about managed DPAA2 devices */ /** * @brief Software context for the DPAA2 Management Complex (MC) driver. * * dev: Device associated with this software context. * rcdev: Child device associated with the root resource container. * acpi_based: Attached using ACPI (true) or FDT (false). * ofw_node: FDT node of the Management Complex (acpi_based == false). * * res: Unmapped MC command portal and control registers resources. * map: Mapped MC command portal and control registers resources. * * dpio_rman: I/O objects resource manager. * dpbp_rman: Buffer Pools resource manager. * dpcon_rman: Concentrators resource manager. * dpmcp_rman: MC portals resource manager. */ struct dpaa2_mc_softc { device_t dev; device_t rcdev; bool acpi_based; phandle_t ofw_node; struct resource *res[2]; struct resource_map map[2]; /* For allocatable managed DPAA2 objects. */ struct rman dpio_rman; struct rman dpbp_rman; struct rman dpcon_rman; struct rman dpmcp_rman; /* For managed DPAA2 objects. */ struct mtx mdev_lock; STAILQ_HEAD(, dpaa2_mc_devinfo) mdev_list; /* NOTE: Workaround in case of no IOMMU available. */ #ifndef IOMMU device_t msi_owner; bool msi_allocated; struct mtx msi_lock; struct { device_t child; int irq; } msi[DPAA2_MC_MSI_COUNT]; #endif }; /** * @brief Software context for the DPAA2 Resource Container (RC) driver. * * dev: Device associated with this software context. * portal: Helper object to send commands to the MC portal. * unit: Helps to distinguish between root (0) and child DRPCs. * cont_id: Container ID. */ struct dpaa2_rc_softc { device_t dev; int unit; uint32_t cont_id; }; /** * @brief Information about MSI messages supported by the DPAA2 object. * * msi_msgnum: Number of MSI messages supported by the DPAA2 object. * msi_alloc: Number of MSI messages allocated for the DPAA2 object. * msi_handlers: Number of MSI message handlers configured. */ struct dpaa2_msinfo { uint8_t msi_msgnum; uint8_t msi_alloc; uint32_t msi_handlers; }; /** * @brief Information about DPAA2 device. * * pdev: Parent device. * dev: Device this devinfo is associated with. * * id: ID of a logical DPAA2 object resource. * portal_id: ID of the MC portal which belongs to the object's container. * icid: Isolation context ID of the DPAA2 object. It is shared * between a resource container and all of its children. * * dtype: Type of the DPAA2 object. * resources: Resources available for this DPAA2 device. * msi: Information about MSI messages supported by the DPAA2 object. */ struct dpaa2_devinfo { device_t pdev; device_t dev; uint32_t id; uint32_t portal_id; uint32_t icid; enum dpaa2_dev_type dtype; struct resource_list resources; struct dpaa2_msinfo msi; /* * DPAA2 object might or might not have its own portal allocated to * execute MC commands. If the portal has been allocated, it takes * precedence over the portal owned by the resource container. */ struct dpaa2_mcp *portal; }; DECLARE_CLASS(dpaa2_mc_driver); /* For device interface. */ int dpaa2_mc_attach(device_t dev); int dpaa2_mc_detach(device_t dev); /* For bus interface. */ struct rman *dpaa2_mc_rman(device_t mcdev, int type, u_int flags); struct resource * dpaa2_mc_alloc_resource(device_t mcdev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int dpaa2_mc_adjust_resource(device_t mcdev, device_t child, struct resource *r, rman_res_t start, rman_res_t end); int dpaa2_mc_release_resource(device_t mcdev, device_t child, int type, int rid, struct resource *r); -int dpaa2_mc_activate_resource(device_t mcdev, device_t child, int type, - int rid, struct resource *r); -int dpaa2_mc_deactivate_resource(device_t mcdev, device_t child, int type, - int rid, struct resource *r); +int dpaa2_mc_activate_resource(device_t mcdev, device_t child, + struct resource *r); +int dpaa2_mc_deactivate_resource(device_t mcdev, device_t child, + struct resource *r); /* For pseudo-pcib interface. */ int dpaa2_mc_alloc_msi(device_t mcdev, device_t child, int count, int maxcount, int *irqs); int dpaa2_mc_release_msi(device_t mcdev, device_t child, int count, int *irqs); int dpaa2_mc_map_msi(device_t mcdev, device_t child, int irq, uint64_t *addr, uint32_t *data); int dpaa2_mc_get_id(device_t mcdev, device_t child, enum pci_id_type type, uintptr_t *id); /* For DPAA2 MC bus interface. */ int dpaa2_mc_manage_dev(device_t mcdev, device_t dpaa2_dev, uint32_t flags); int dpaa2_mc_get_free_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype); int dpaa2_mc_get_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype, uint32_t obj_id); int dpaa2_mc_get_shared_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype); int dpaa2_mc_reserve_dev(device_t mcdev, device_t dpaa2_dev, enum dpaa2_dev_type devtype); int dpaa2_mc_release_dev(device_t mcdev, device_t dpaa2_dev, enum dpaa2_dev_type devtype); #endif /* _DPAA2_MC_H */ diff --git a/sys/dev/exca/exca.c b/sys/dev/exca/exca.c index 5a5a68b7dc62..98e0ffdf9d18 100644 --- a/sys/dev/exca/exca.c +++ b/sys/dev/exca/exca.c @@ -1,933 +1,933 @@ /*- * SPDX-License-Identifier: BSD-4-Clause AND BSD-2-Clause * * Copyright (c) 2002-2005 M. Warner Losh * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * This software may be derived from NetBSD i82365.c and other files with * the following copyright: * * Copyright (c) 1997 Marc Horowitz. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Marc Horowitz. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef EXCA_DEBUG #define DEVPRINTF(dev, fmt, args...) device_printf((dev), (fmt), ## args) #define DPRINTF(fmt, args...) printf(fmt, ## args) #else #define DEVPRINTF(dev, fmt, args...) #define DPRINTF(fmt, args...) #endif static const char *chip_names[] = { "CardBus socket", "Intel i82365SL-A/B or clone", "Intel i82365sl-DF step", "VLSI chip", "Cirrus Logic PD6710", "Cirrus logic PD6722", "Cirrus Logic PD6729", "Vadem 365", "Vadem 465", "Vadem 468", "Vadem 469", "Ricoh RF5C296", "Ricoh RF5C396", "IBM clone", "IBM KING PCMCIA Controller" }; static exca_getb_fn exca_mem_getb; static exca_putb_fn exca_mem_putb; static exca_getb_fn exca_io_getb; static exca_putb_fn exca_io_putb; /* memory */ #define EXCA_MEMINFO(NUM) { \ EXCA_SYSMEM_ADDR ## NUM ## _START_LSB, \ EXCA_SYSMEM_ADDR ## NUM ## _START_MSB, \ EXCA_SYSMEM_ADDR ## NUM ## _STOP_LSB, \ EXCA_SYSMEM_ADDR ## NUM ## _STOP_MSB, \ EXCA_SYSMEM_ADDR ## NUM ## _WIN, \ EXCA_CARDMEM_ADDR ## NUM ## _LSB, \ EXCA_CARDMEM_ADDR ## NUM ## _MSB, \ EXCA_ADDRWIN_ENABLE_MEM ## NUM, \ } static struct mem_map_index_st { int sysmem_start_lsb; int sysmem_start_msb; int sysmem_stop_lsb; int sysmem_stop_msb; int sysmem_win; int cardmem_lsb; int cardmem_msb; int memenable; } mem_map_index[] = { EXCA_MEMINFO(0), EXCA_MEMINFO(1), EXCA_MEMINFO(2), EXCA_MEMINFO(3), EXCA_MEMINFO(4) }; #undef EXCA_MEMINFO static uint8_t exca_mem_getb(struct exca_softc *sc, int reg) { return (bus_space_read_1(sc->bst, sc->bsh, sc->offset + reg)); } static void exca_mem_putb(struct exca_softc *sc, int reg, uint8_t val) { bus_space_write_1(sc->bst, sc->bsh, sc->offset + reg, val); } static uint8_t exca_io_getb(struct exca_softc *sc, int reg) { bus_space_write_1(sc->bst, sc->bsh, EXCA_REG_INDEX, reg + sc->offset); return (bus_space_read_1(sc->bst, sc->bsh, EXCA_REG_DATA)); } static void exca_io_putb(struct exca_softc *sc, int reg, uint8_t val) { bus_space_write_1(sc->bst, sc->bsh, EXCA_REG_INDEX, reg + sc->offset); bus_space_write_1(sc->bst, sc->bsh, EXCA_REG_DATA, val); } /* * Helper function. This will map the requested memory slot. We setup the * map before we call this function. This is used to initially force the * mapping, as well as later restore the mapping after it has been destroyed * in some fashion (due to a power event typically). */ static void exca_do_mem_map(struct exca_softc *sc, int win) { struct mem_map_index_st *map; struct pccard_mem_handle *mem; uint32_t offset; uint32_t mem16; uint32_t attrmem; map = &mem_map_index[win]; mem = &sc->mem[win]; mem16 = (mem->kind & PCCARD_MEM_16BIT) ? EXCA_SYSMEM_ADDRX_START_MSB_DATASIZE_16BIT : 0; attrmem = (mem->kind & PCCARD_MEM_ATTR) ? EXCA_CARDMEM_ADDRX_MSB_REGACTIVE_ATTR : 0; offset = ((mem->cardaddr >> EXCA_CARDMEM_ADDRX_SHIFT) - (mem->addr >> EXCA_SYSMEM_ADDRX_SHIFT)) & 0x3fff; exca_putb(sc, map->sysmem_start_lsb, mem->addr >> EXCA_SYSMEM_ADDRX_SHIFT); exca_putb(sc, map->sysmem_start_msb, ((mem->addr >> (EXCA_SYSMEM_ADDRX_SHIFT + 8)) & EXCA_SYSMEM_ADDRX_START_MSB_ADDR_MASK) | mem16); exca_putb(sc, map->sysmem_stop_lsb, (mem->addr + mem->realsize - 1) >> EXCA_SYSMEM_ADDRX_SHIFT); exca_putb(sc, map->sysmem_stop_msb, (((mem->addr + mem->realsize - 1) >> (EXCA_SYSMEM_ADDRX_SHIFT + 8)) & EXCA_SYSMEM_ADDRX_STOP_MSB_ADDR_MASK) | EXCA_SYSMEM_ADDRX_STOP_MSB_WAIT2); exca_putb(sc, map->sysmem_win, mem->addr >> EXCA_MEMREG_WIN_SHIFT); exca_putb(sc, map->cardmem_lsb, offset & 0xff); exca_putb(sc, map->cardmem_msb, ((offset >> 8) & EXCA_CARDMEM_ADDRX_MSB_ADDR_MASK) | attrmem); DPRINTF("%s %d-bit memory", mem->kind & PCCARD_MEM_ATTR ? "attribute" : "common", mem->kind & PCCARD_MEM_16BIT ? 16 : 8); exca_setb(sc, EXCA_ADDRWIN_ENABLE, map->memenable | EXCA_ADDRWIN_ENABLE_MEMCS16); DELAY(100); #ifdef EXCA_DEBUG { int r1, r2, r3, r4, r5, r6, r7; r1 = exca_getb(sc, map->sysmem_start_msb); r2 = exca_getb(sc, map->sysmem_start_lsb); r3 = exca_getb(sc, map->sysmem_stop_msb); r4 = exca_getb(sc, map->sysmem_stop_lsb); r5 = exca_getb(sc, map->cardmem_msb); r6 = exca_getb(sc, map->cardmem_lsb); r7 = exca_getb(sc, map->sysmem_win); printf("exca_do_mem_map win %d: %#02x%#02x %#02x%#02x " "%#02x%#02x %#02x (%#08x+%#06x.%#06x*%#06x) flags %#x\n", win, r1, r2, r3, r4, r5, r6, r7, mem->addr, mem->size, mem->realsize, mem->cardaddr, mem->kind); } #endif } /* * public interface to map a resource. kind is the type of memory to * map (either common or attribute). Memory created via this interface * starts out at card address 0. Since the only way to set this is * to set it on a struct resource after it has been mapped, we're safe * in mapping this assumption. Note that resources can be remapped using * exca_do_mem_map so that's how the card address can be set later. */ int exca_mem_map(struct exca_softc *sc, int kind, struct resource *res) { int win; for (win = 0; win < EXCA_MEM_WINS; win++) { if ((sc->memalloc & (1 << win)) == 0) { sc->memalloc |= (1 << win); break; } } if (win >= EXCA_MEM_WINS) return (ENOSPC); if (sc->flags & EXCA_HAS_MEMREG_WIN) { #ifdef __LP64__ if (rman_get_start(res) >> (EXCA_MEMREG_WIN_SHIFT + 8) != 0) { device_printf(sc->dev, "Does not support mapping above 4GB."); return (EINVAL); } #endif } else { if (rman_get_start(res) >> EXCA_MEMREG_WIN_SHIFT != 0) { device_printf(sc->dev, "Does not support mapping above 16M."); return (EINVAL); } } sc->mem[win].cardaddr = 0; sc->mem[win].memt = rman_get_bustag(res); sc->mem[win].memh = rman_get_bushandle(res); sc->mem[win].addr = rman_get_start(res); sc->mem[win].size = rman_get_end(res) - sc->mem[win].addr + 1; sc->mem[win].realsize = sc->mem[win].size + EXCA_MEM_PAGESIZE - 1; sc->mem[win].realsize = sc->mem[win].realsize - (sc->mem[win].realsize % EXCA_MEM_PAGESIZE); sc->mem[win].kind = kind; DPRINTF("exca_mem_map window %d bus %x+%x card addr %x\n", win, sc->mem[win].addr, sc->mem[win].size, sc->mem[win].cardaddr); exca_do_mem_map(sc, win); return (0); } /* * Private helper function. This turns off a given memory map that is in * use. We do this by just clearing the enable bit in the pcic. If we needed * to make memory unmapping/mapping pairs faster, we would have to store * more state information about the pcic and then use that to intelligently * to the map/unmap. However, since we don't do that sort of thing often * (generally just at configure time), it isn't a case worth optimizing. */ static void exca_mem_unmap(struct exca_softc *sc, int window) { if (window < 0 || window >= EXCA_MEM_WINS) panic("exca_mem_unmap: window out of range"); exca_clrb(sc, EXCA_ADDRWIN_ENABLE, mem_map_index[window].memenable); sc->memalloc &= ~(1 << window); } /* * Find the map that we're using to hold the resource. This works well * so long as the client drivers don't do silly things like map the same * area mutliple times, or map both common and attribute memory at the * same time. This latter restriction is a bug. We likely should just * store a pointer to the res in the mem[x] data structure. */ static int exca_mem_findmap(struct exca_softc *sc, struct resource *res) { int win; for (win = 0; win < EXCA_MEM_WINS; win++) { if (sc->mem[win].memt == rman_get_bustag(res) && sc->mem[win].addr == rman_get_start(res) && sc->mem[win].size == rman_get_size(res)) return (win); } return (-1); } /* * Set the memory flag. This means that we are setting if the memory * is coming from attribute memory or from common memory on the card. * CIS entries are generally in attribute memory (although they can * reside in common memory). Generally, this is the only use for attribute * memory. However, some cards require their drivers to dance in both * common and/or attribute memory and this interface (and setting the * offset interface) exist for such cards. */ int exca_mem_set_flags(struct exca_softc *sc, struct resource *res, uint32_t flags) { int win; win = exca_mem_findmap(sc, res); if (win < 0) { device_printf(sc->dev, "set_res_flags: specified resource not active\n"); return (ENOENT); } switch (flags) { case PCCARD_A_MEM_ATTR: sc->mem[win].kind |= PCCARD_MEM_ATTR; break; case PCCARD_A_MEM_COM: sc->mem[win].kind &= ~PCCARD_MEM_ATTR; break; case PCCARD_A_MEM_16BIT: sc->mem[win].kind |= PCCARD_MEM_16BIT; break; case PCCARD_A_MEM_8BIT: sc->mem[win].kind &= ~PCCARD_MEM_16BIT; break; } exca_do_mem_map(sc, win); return (0); } /* * Given a resource, go ahead and unmap it if we can find it in the * resrouce list that's used. */ int exca_mem_unmap_res(struct exca_softc *sc, struct resource *res) { int win; win = exca_mem_findmap(sc, res); if (win < 0) return (ENOENT); exca_mem_unmap(sc, win); return (0); } /* * Set the offset of the memory. We use this for reading the CIS and * frobbing the pccard's pccard registers (CCR, etc). Some drivers * need to access arbitrary attribute and common memory during their * initialization and operation. */ int exca_mem_set_offset(struct exca_softc *sc, struct resource *res, uint32_t cardaddr, uint32_t *deltap) { int win; uint32_t delta; win = exca_mem_findmap(sc, res); if (win < 0) { device_printf(sc->dev, "set_memory_offset: specified resource not active\n"); return (ENOENT); } sc->mem[win].cardaddr = rounddown2(cardaddr, EXCA_MEM_PAGESIZE); delta = cardaddr % EXCA_MEM_PAGESIZE; if (deltap) *deltap = delta; sc->mem[win].realsize = sc->mem[win].size + delta + EXCA_MEM_PAGESIZE - 1; sc->mem[win].realsize = sc->mem[win].realsize - (sc->mem[win].realsize % EXCA_MEM_PAGESIZE); exca_do_mem_map(sc, win); return (0); } /* I/O */ #define EXCA_IOINFO(NUM) { \ EXCA_IOADDR ## NUM ## _START_LSB, \ EXCA_IOADDR ## NUM ## _START_MSB, \ EXCA_IOADDR ## NUM ## _STOP_LSB, \ EXCA_IOADDR ## NUM ## _STOP_MSB, \ EXCA_ADDRWIN_ENABLE_IO ## NUM, \ EXCA_IOCTL_IO ## NUM ## _WAITSTATE \ | EXCA_IOCTL_IO ## NUM ## _ZEROWAIT \ | EXCA_IOCTL_IO ## NUM ## _IOCS16SRC_MASK \ | EXCA_IOCTL_IO ## NUM ## _DATASIZE_MASK, \ { \ EXCA_IOCTL_IO ## NUM ## _IOCS16SRC_CARD, \ EXCA_IOCTL_IO ## NUM ## _IOCS16SRC_DATASIZE \ | EXCA_IOCTL_IO ## NUM ## _DATASIZE_8BIT, \ EXCA_IOCTL_IO ## NUM ## _IOCS16SRC_DATASIZE \ | EXCA_IOCTL_IO ## NUM ## _DATASIZE_16BIT, \ } \ } static struct io_map_index_st { int start_lsb; int start_msb; int stop_lsb; int stop_msb; int ioenable; int ioctlmask; int ioctlbits[3]; /* indexed by PCCARD_WIDTH_* */ } io_map_index[] = { EXCA_IOINFO(0), EXCA_IOINFO(1), }; #undef EXCA_IOINFO static void exca_do_io_map(struct exca_softc *sc, int win) { struct io_map_index_st *map; struct pccard_io_handle *io; map = &io_map_index[win]; io = &sc->io[win]; exca_putb(sc, map->start_lsb, io->addr & 0xff); exca_putb(sc, map->start_msb, (io->addr >> 8) & 0xff); exca_putb(sc, map->stop_lsb, (io->addr + io->size - 1) & 0xff); exca_putb(sc, map->stop_msb, ((io->addr + io->size - 1) >> 8) & 0xff); exca_clrb(sc, EXCA_IOCTL, map->ioctlmask); exca_setb(sc, EXCA_IOCTL, map->ioctlbits[io->width]); exca_setb(sc, EXCA_ADDRWIN_ENABLE, map->ioenable); #ifdef EXCA_DEBUG { int r1, r2, r3, r4; r1 = exca_getb(sc, map->start_msb); r2 = exca_getb(sc, map->start_lsb); r3 = exca_getb(sc, map->stop_msb); r4 = exca_getb(sc, map->stop_lsb); DPRINTF("exca_do_io_map window %d: %02x%02x %02x%02x " "(%08x+%08x)\n", win, r1, r2, r3, r4, io->addr, io->size); } #endif } int exca_io_map(struct exca_softc *sc, int width, struct resource *r) { int win; #ifdef EXCA_DEBUG static char *width_names[] = { "auto", "io8", "io16"}; #endif for (win=0; win < EXCA_IO_WINS; win++) { if ((sc->ioalloc & (1 << win)) == 0) { sc->ioalloc |= (1 << win); break; } } if (win >= EXCA_IO_WINS) return (ENOSPC); sc->io[win].iot = rman_get_bustag(r); sc->io[win].ioh = rman_get_bushandle(r); sc->io[win].addr = rman_get_start(r); sc->io[win].size = rman_get_end(r) - sc->io[win].addr + 1; sc->io[win].flags = 0; sc->io[win].width = width; DPRINTF("exca_io_map window %d %s port %x+%x\n", win, width_names[width], sc->io[win].addr, sc->io[win].size); exca_do_io_map(sc, win); return (0); } static void exca_io_unmap(struct exca_softc *sc, int window) { if (window >= EXCA_IO_WINS) panic("exca_io_unmap: window out of range"); exca_clrb(sc, EXCA_ADDRWIN_ENABLE, io_map_index[window].ioenable); sc->ioalloc &= ~(1 << window); sc->io[window].iot = 0; sc->io[window].ioh = 0; sc->io[window].addr = 0; sc->io[window].size = 0; sc->io[window].flags = 0; sc->io[window].width = 0; } static int exca_io_findmap(struct exca_softc *sc, struct resource *res) { int win; for (win = 0; win < EXCA_IO_WINS; win++) { if (sc->io[win].iot == rman_get_bustag(res) && sc->io[win].addr == rman_get_start(res) && sc->io[win].size == rman_get_size(res)) return (win); } return (-1); } int exca_io_unmap_res(struct exca_softc *sc, struct resource *res) { int win; win = exca_io_findmap(sc, res); if (win < 0) return (ENOENT); exca_io_unmap(sc, win); return (0); } /* Misc */ /* * If interrupts are enabled, then we should be able to just wait for * an interrupt routine to wake us up. Busy waiting shouldn't be * necessary. Sadly, not all legacy ISA cards support an interrupt * for the busy state transitions, at least according to their datasheets, * so we busy wait a while here.. */ static void exca_wait_ready(struct exca_softc *sc) { int i; DEVPRINTF(sc->dev, "exca_wait_ready: status 0x%02x\n", exca_getb(sc, EXCA_IF_STATUS)); for (i = 0; i < 10000; i++) { if (exca_getb(sc, EXCA_IF_STATUS) & EXCA_IF_STATUS_READY) return; DELAY(500); } device_printf(sc->dev, "ready never happened, status = %02x\n", exca_getb(sc, EXCA_IF_STATUS)); } /* * Reset the card. Ideally, we'd do a lot of this via interrupts. * However, many PC Cards will deassert the ready signal. This means * that they are asserting an interrupt. This makes it hard to * do anything but a busy wait here. One could argue that these * such cards are broken, or that the bridge that allows this sort * of interrupt through isn't quite what you'd want (and may be a standards * violation). However, such arguing would leave a huge class of PC Cards * and bridges out of reach for use in the system. * * Maybe I should reevaluate the above based on the power bug I fixed * in OLDCARD. */ void exca_reset(struct exca_softc *sc, device_t child) { int win; /* enable socket i/o */ exca_setb(sc, EXCA_PWRCTL, EXCA_PWRCTL_OE); exca_putb(sc, EXCA_INTR, EXCA_INTR_ENABLE); /* hold reset for 30ms */ DELAY(30*1000); /* clear the reset flag */ exca_setb(sc, EXCA_INTR, EXCA_INTR_RESET); /* wait 20ms as per PC Card standard (r2.01) section 4.3.6 */ DELAY(20*1000); exca_wait_ready(sc); /* disable all address windows */ exca_putb(sc, EXCA_ADDRWIN_ENABLE, 0); exca_setb(sc, EXCA_INTR, EXCA_INTR_CARDTYPE_IO); DEVPRINTF(sc->dev, "card type is io\n"); /* reinstall all the memory and io mappings */ for (win = 0; win < EXCA_MEM_WINS; ++win) if (sc->memalloc & (1 << win)) exca_do_mem_map(sc, win); for (win = 0; win < EXCA_IO_WINS; ++win) if (sc->ioalloc & (1 << win)) exca_do_io_map(sc, win); } /* * Initialize the exca_softc data structure for the first time. */ void exca_init(struct exca_softc *sc, device_t dev, bus_space_tag_t bst, bus_space_handle_t bsh, uint32_t offset) { sc->dev = dev; sc->memalloc = 0; sc->ioalloc = 0; sc->bst = bst; sc->bsh = bsh; sc->offset = offset; sc->flags = 0; sc->getb = exca_mem_getb; sc->putb = exca_mem_putb; sc->pccarddev = device_add_child(dev, "pccard", -1); if (sc->pccarddev == NULL) DEVPRINTF(brdev, "WARNING: cannot add pccard bus.\n"); else if (device_probe_and_attach(sc->pccarddev) != 0) DEVPRINTF(brdev, "WARNING: cannot attach pccard bus.\n"); } /* * Is this socket valid? */ static int exca_valid_slot(struct exca_softc *exca) { uint8_t c; /* Assume the worst */ exca->chipset = EXCA_BOGUS; /* * see if there's a PCMCIA controller here * Intel PCMCIA controllers use 0x82 and 0x83 * IBM clone chips use 0x88 and 0x89, apparently */ c = exca_getb(exca, EXCA_IDENT); DEVPRINTF(exca->dev, "Ident is %x\n", c); if ((c & EXCA_IDENT_IFTYPE_MASK) != EXCA_IDENT_IFTYPE_MEM_AND_IO) return (0); if ((c & EXCA_IDENT_ZERO) != 0) return (0); switch (c & EXCA_IDENT_REV_MASK) { /* * 82365 or clones. */ case EXCA_IDENT_REV_I82365SLR0: case EXCA_IDENT_REV_I82365SLR1: exca->chipset = EXCA_I82365; /* * Check for Vadem chips by unlocking their extra * registers and looking for valid ID. Bit 3 in * the ID register is normally 0, except when * EXCA_VADEMREV is set. Other bridges appear * to ignore this frobbing. */ bus_space_write_1(exca->bst, exca->bsh, EXCA_REG_INDEX, EXCA_VADEM_COOKIE1); bus_space_write_1(exca->bst, exca->bsh, EXCA_REG_INDEX, EXCA_VADEM_COOKIE2); exca_setb(exca, EXCA_VADEM_VMISC, EXCA_VADEM_REV); c = exca_getb(exca, EXCA_IDENT); if (c & 0x08) { switch (c & 7) { case 1: exca->chipset = EXCA_VG365; break; case 2: exca->chipset = EXCA_VG465; break; case 3: exca->chipset = EXCA_VG468; break; default: exca->chipset = EXCA_VG469; break; } exca_clrb(exca, EXCA_VADEM_VMISC, EXCA_VADEM_REV); break; } /* * Check for RICOH RF5C[23]96 PCMCIA Controller */ c = exca_getb(exca, EXCA_RICOH_ID); if (c == EXCA_RID_396) { exca->chipset = EXCA_RF5C396; break; } else if (c == EXCA_RID_296) { exca->chipset = EXCA_RF5C296; break; } /* * Check for Cirrus logic chips. */ exca_putb(exca, EXCA_CIRRUS_CHIP_INFO, 0); c = exca_getb(exca, EXCA_CIRRUS_CHIP_INFO); if ((c & EXCA_CIRRUS_CHIP_INFO_CHIP_ID) == EXCA_CIRRUS_CHIP_INFO_CHIP_ID) { c = exca_getb(exca, EXCA_CIRRUS_CHIP_INFO); if ((c & EXCA_CIRRUS_CHIP_INFO_CHIP_ID) == 0) { if (c & EXCA_CIRRUS_CHIP_INFO_SLOTS) exca->chipset = EXCA_PD6722; else exca->chipset = EXCA_PD6710; break; } } break; case EXCA_IDENT_REV_I82365SLDF: /* * Intel i82365sl-DF step or maybe a vlsi 82c146 * we detected the vlsi case earlier, so if the controller * isn't set, we know it is a i82365sl step D. * XXXX Except we didn't -- this is a regression but VLSI * controllers are super hard to find these days for testing. */ exca->chipset = EXCA_I82365SL_DF; break; case EXCA_IDENT_REV_IBM1: case EXCA_IDENT_REV_IBM2: exca->chipset = EXCA_IBM; break; case EXCA_IDENT_REV_IBM_KING: exca->chipset = EXCA_IBM_KING; break; default: return (0); } return (1); } /* * Probe the expected slots. We maybe should set the ID for each of these * slots too while we're at it. But maybe that belongs to a separate * function. * * The caller must guarantee that at least EXCA_NSLOTS are present in exca. */ int exca_probe_slots(device_t dev, struct exca_softc *exca, bus_space_tag_t iot, bus_space_handle_t ioh) { int err; int i; err = ENXIO; for (i = 0; i < EXCA_NSLOTS; i++) { exca_init(&exca[i], dev, iot, ioh, i * EXCA_SOCKET_SIZE); exca->getb = exca_io_getb; exca->putb = exca_io_putb; if (exca_valid_slot(&exca[i])) { device_set_desc(dev, chip_names[exca[i].chipset]); err = 0; } } return (err); } void exca_insert(struct exca_softc *exca) { if (device_is_attached(exca->pccarddev)) { if (CARD_ATTACH_CARD(exca->pccarddev) != 0) device_printf(exca->dev, "PC Card card activation failed\n"); } else { device_printf(exca->dev, "PC Card inserted, but no pccard bus.\n"); } } void exca_removal(struct exca_softc *exca) { if (device_is_attached(exca->pccarddev)) CARD_DETACH_CARD(exca->pccarddev); } int -exca_activate_resource(struct exca_softc *exca, device_t child, int type, - int rid, struct resource *res) +exca_activate_resource(struct exca_softc *exca, device_t child, + struct resource *res) { int err; if (rman_get_flags(res) & RF_ACTIVE) return (0); err = BUS_ACTIVATE_RESOURCE(device_get_parent(exca->dev), child, - type, rid, res); + res); if (err) return (err); - switch (type) { + switch (rman_get_type(res)) { case SYS_RES_IOPORT: err = exca_io_map(exca, PCCARD_WIDTH_AUTO, res); break; case SYS_RES_MEMORY: err = exca_mem_map(exca, 0, res); break; } if (err) BUS_DEACTIVATE_RESOURCE(device_get_parent(exca->dev), child, - type, rid, res); + res); return (err); } int -exca_deactivate_resource(struct exca_softc *exca, device_t child, int type, - int rid, struct resource *res) +exca_deactivate_resource(struct exca_softc *exca, device_t child, + struct resource *res) { if (rman_get_flags(res) & RF_ACTIVE) { /* if activated */ - switch (type) { + switch (rman_get_type(res)) { case SYS_RES_IOPORT: if (exca_io_unmap_res(exca, res)) return (ENOENT); break; case SYS_RES_MEMORY: if (exca_mem_unmap_res(exca, res)) return (ENOENT); break; } } return (BUS_DEACTIVATE_RESOURCE(device_get_parent(exca->dev), child, - type, rid, res)); + res)); } #if 0 static struct resource * exca_alloc_resource(struct exca_softc *sc, device_t child, int type, int *rid, u_long start, u_long end, u_long count, uint flags) { struct resource *res = NULL; int tmp; switch (type) { case SYS_RES_MEMORY: if (start < cbb_start_mem) start = cbb_start_mem; if (end < start) end = start; flags = (flags & ~RF_ALIGNMENT_MASK) | rman_make_alignment_flags(CBB_MEMALIGN); break; case SYS_RES_IOPORT: if (start < cbb_start_16_io) start = cbb_start_16_io; if (end < start) end = start; break; case SYS_RES_IRQ: tmp = rman_get_start(sc->irq_res); if (start > tmp || end < tmp || count != 1) { device_printf(child, "requested interrupt %ld-%ld," "count = %ld not supported by cbb\n", start, end, count); return (NULL); } flags |= RF_SHAREABLE; start = end = rman_get_start(sc->irq_res); break; } res = BUS_ALLOC_RESOURCE(up, child, type, rid, start, end, count, flags & ~RF_ACTIVE); if (res == NULL) return (NULL); cbb_insert_res(sc, res, type, *rid); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, res) != 0) { bus_release_resource(child, type, *rid, res); return (NULL); } } return (res); } static int exca_release_resource(struct exca_softc *sc, device_t child, int type, int rid, struct resource *res) { int error; if (rman_get_flags(res) & RF_ACTIVE) { error = bus_deactivate_resource(child, type, rid, res); if (error != 0) return (error); } cbb_remove_res(sc, res); return (BUS_RELEASE_RESOURCE(device_get_parent(brdev), child, type, rid, res)); } #endif static int exca_modevent(module_t mod, int cmd, void *arg) { return 0; } DEV_MODULE(exca, exca_modevent, NULL); MODULE_VERSION(exca, 1); diff --git a/sys/dev/exca/excavar.h b/sys/dev/exca/excavar.h index b301ab5ab26b..07cf2701c4a4 100644 --- a/sys/dev/exca/excavar.h +++ b/sys/dev/exca/excavar.h @@ -1,166 +1,166 @@ /*- * SPDX-License-Identifier: BSD-4-Clause AND BSD-2-Clause * * Copyright (c) 2002 M. Warner Losh * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * This software may be derived from NetBSD i82365.c and other files with * the following copyright: * * Copyright (c) 1997 Marc Horowitz. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Marc Horowitz. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _SYS_DEV_EXCA_EXCAVAR_H #define _SYS_DEV_EXCA_EXCAVAR_H /* * Structure to manage the ExCA part of the chip. */ struct exca_softc; typedef uint8_t (exca_getb_fn)(struct exca_softc *, int); typedef void (exca_putb_fn)(struct exca_softc *, int, uint8_t); struct exca_softc { device_t dev; int memalloc; struct pccard_mem_handle mem[EXCA_MEM_WINS]; int ioalloc; struct pccard_io_handle io[EXCA_IO_WINS]; bus_space_tag_t bst; bus_space_handle_t bsh; uint32_t flags; #define EXCA_SOCKET_PRESENT 0x00000001 #define EXCA_HAS_MEMREG_WIN 0x00000002 #define EXCA_CARD_OK 0x00000004 #define EXCA_EVENT 0x80000000 uint32_t offset; int chipset; #define EXCA_CARDBUS 0 #define EXCA_I82365 1 /* Intel i82365SL-A/B or clone */ #define EXCA_I82365SL_DF 2 /* Intel i82365sl-DF step */ #define EXCA_VLSI 3 /* VLSI chip */ #define EXCA_PD6710 4 /* Cirrus logic PD6710 */ #define EXCA_PD6722 5 /* Cirrus logic PD6722 */ #define EXCA_PD6729 6 /* Cirrus Logic PD6729 */ #define EXCA_VG365 7 /* Vadem 365 */ #define EXCA_VG465 8 /* Vadem 465 */ #define EXCA_VG468 9 /* Vadem 468 */ #define EXCA_VG469 10 /* Vadem 469 */ #define EXCA_RF5C296 11 /* Ricoh RF5C296 */ #define EXCA_RF5C396 12 /* Ricoh RF5C396 */ #define EXCA_IBM 13 /* IBM clone */ #define EXCA_IBM_KING 14 /* IBM KING PCMCIA Controller */ #define EXCA_BOGUS -1 /* Invalid/not present/etc */ exca_getb_fn *getb; exca_putb_fn *putb; device_t pccarddev; uint32_t status; /* status, hw dependent */ }; void exca_init(struct exca_softc *sc, device_t dev, bus_space_tag_t, bus_space_handle_t, uint32_t); void exca_insert(struct exca_softc *sc); int exca_io_map(struct exca_softc *sc, int width, struct resource *r); int exca_io_unmap_res(struct exca_softc *sc, struct resource *res); int exca_is_pcic(struct exca_softc *sc); int exca_mem_map(struct exca_softc *sc, int kind, struct resource *res); int exca_mem_set_flags(struct exca_softc *sc, struct resource *res, uint32_t flags); int exca_mem_set_offset(struct exca_softc *sc, struct resource *res, uint32_t cardaddr, uint32_t *deltap); int exca_mem_unmap_res(struct exca_softc *sc, struct resource *res); int exca_probe_slots(device_t dev, struct exca_softc *exca, bus_space_tag_t iot, bus_space_handle_t ioh); void exca_removal(struct exca_softc *); void exca_reset(struct exca_softc *, device_t child); /* bus/device interfaces */ -int exca_activate_resource(struct exca_softc *exca, device_t child, int type, - int rid, struct resource *res); -int exca_deactivate_resource(struct exca_softc *exca, device_t child, int type, - int rid, struct resource *res); +int exca_activate_resource(struct exca_softc *exca, device_t child, + struct resource *res); +int exca_deactivate_resource(struct exca_softc *exca, device_t child, + struct resource *res); static __inline uint8_t exca_getb(struct exca_softc *sc, int reg) { return (sc->getb(sc, reg)); } static __inline void exca_putb(struct exca_softc *sc, int reg, uint8_t val) { sc->putb(sc, reg, val); } static __inline void exca_setb(struct exca_softc *sc, int reg, uint8_t mask) { exca_putb(sc, reg, exca_getb(sc, reg) | mask); } static __inline void exca_clrb(struct exca_softc *sc, int reg, uint8_t mask) { exca_putb(sc, reg, exca_getb(sc, reg) & ~mask); } enum { EXCA_IVAR_SLOT = 100, }; #define EXCA_ACCESSOR(A, B, T) \ static inline int \ exca_get_ ## A(device_t dev, T *t) \ { \ return BUS_READ_IVAR(device_get_parent(dev), dev, \ EXCA_IVAR_ ## B, (uintptr_t *) t); \ } EXCA_ACCESSOR(slot, SLOT, uint32_t) #endif /* !_SYS_DEV_EXCA_EXCAVAR_H */ diff --git a/sys/dev/fdt/simplebus.c b/sys/dev/fdt/simplebus.c index 2bd1a1402797..8069d3af69cf 100644 --- a/sys/dev/fdt/simplebus.c +++ b/sys/dev/fdt/simplebus.c @@ -1,569 +1,545 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2013 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include /* * Bus interface. */ static int simplebus_probe(device_t dev); static struct resource *simplebus_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int simplebus_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r); -static int simplebus_activate_resource(device_t bus, - device_t child, int type, int rid, struct resource *r); -static int simplebus_deactivate_resource(device_t bus, - device_t child, int type, int rid, struct resource *r); static void simplebus_probe_nomatch(device_t bus, device_t child); static int simplebus_print_child(device_t bus, device_t child); static device_t simplebus_add_child(device_t dev, u_int order, const char *name, int unit); static struct resource_list *simplebus_get_resource_list(device_t bus, device_t child); static ssize_t simplebus_get_property(device_t bus, device_t child, const char *propname, void *propvalue, size_t size, device_property_type_t type); /* * ofw_bus interface */ static const struct ofw_bus_devinfo *simplebus_get_devinfo(device_t bus, device_t child); /* * Driver methods. */ static device_method_t simplebus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, simplebus_probe), DEVMETHOD(device_attach, simplebus_attach), DEVMETHOD(device_detach, simplebus_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_add_child, simplebus_add_child), DEVMETHOD(bus_print_child, simplebus_print_child), DEVMETHOD(bus_probe_nomatch, simplebus_probe_nomatch), DEVMETHOD(bus_read_ivar, bus_generic_read_ivar), DEVMETHOD(bus_write_ivar, bus_generic_write_ivar), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, simplebus_alloc_resource), DEVMETHOD(bus_release_resource, simplebus_release_resource), - DEVMETHOD(bus_activate_resource, simplebus_activate_resource), - DEVMETHOD(bus_deactivate_resource, simplebus_deactivate_resource), + DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), + DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_map_resource, bus_generic_map_resource), DEVMETHOD(bus_unmap_resource, bus_generic_unmap_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), DEVMETHOD(bus_get_resource_list, simplebus_get_resource_list), DEVMETHOD(bus_get_property, simplebus_get_property), DEVMETHOD(bus_get_device_path, ofw_bus_gen_get_device_path), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, simplebus_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; DEFINE_CLASS_0(simplebus, simplebus_driver, simplebus_methods, sizeof(struct simplebus_softc)); EARLY_DRIVER_MODULE(simplebus, ofwbus, simplebus_driver, 0, 0, BUS_PASS_BUS); EARLY_DRIVER_MODULE(simplebus, simplebus, simplebus_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); static int simplebus_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); /* * XXX We should attach only to pure' compatible = "simple-bus"', * without any other compatible string. * For now, filter only know cases: * "syscon", "simple-bus"; is handled by fdt/syscon driver * "simple-mfd", "simple-bus"; is handled by fdt/simple-mfd driver */ if (ofw_bus_is_compatible(dev, "syscon") || ofw_bus_is_compatible(dev, "simple-mfd")) return (ENXIO); /* * FDT data puts a "simple-bus" compatible string on many things that * have children but aren't really buses in our world. Without a * ranges property we will fail to attach, so just fail to probe too. */ if (!(ofw_bus_is_compatible(dev, "simple-bus") && ofw_bus_has_prop(dev, "ranges")) && (ofw_bus_get_type(dev) == NULL || strcmp(ofw_bus_get_type(dev), "soc") != 0)) return (ENXIO); device_set_desc(dev, "Flattened device tree simple bus"); return (BUS_PROBE_GENERIC); } int simplebus_attach_impl(device_t dev) { struct simplebus_softc *sc; phandle_t node; sc = device_get_softc(dev); simplebus_init(dev, 0); if ((sc->flags & SB_FLAG_NO_RANGES) == 0 && simplebus_fill_ranges(sc->node, sc) < 0) { device_printf(dev, "could not get ranges\n"); return (ENXIO); } /* * In principle, simplebus could have an interrupt map, but ignore that * for now */ for (node = OF_child(sc->node); node > 0; node = OF_peer(node)) simplebus_add_device(dev, node, 0, NULL, -1, NULL); return (0); } int simplebus_attach(device_t dev) { int rv; rv = simplebus_attach_impl(dev); if (rv != 0) return (rv); return (bus_generic_attach(dev)); } int simplebus_detach(device_t dev) { struct simplebus_softc *sc; sc = device_get_softc(dev); if (sc->ranges != NULL) free(sc->ranges, M_DEVBUF); return (bus_generic_detach(dev)); } void simplebus_init(device_t dev, phandle_t node) { struct simplebus_softc *sc; sc = device_get_softc(dev); if (node == 0) node = ofw_bus_get_node(dev); sc->dev = dev; sc->node = node; /* * Some important numbers */ sc->acells = 2; OF_getencprop(node, "#address-cells", &sc->acells, sizeof(sc->acells)); sc->scells = 1; OF_getencprop(node, "#size-cells", &sc->scells, sizeof(sc->scells)); } int simplebus_fill_ranges(phandle_t node, struct simplebus_softc *sc) { int host_address_cells; cell_t *base_ranges; ssize_t nbase_ranges; int err; int i, j, k; err = OF_searchencprop(OF_parent(node), "#address-cells", &host_address_cells, sizeof(host_address_cells)); if (err <= 0) return (-1); nbase_ranges = OF_getproplen(node, "ranges"); if (nbase_ranges < 0) return (-1); sc->nranges = nbase_ranges / sizeof(cell_t) / (sc->acells + host_address_cells + sc->scells); if (sc->nranges == 0) return (0); sc->ranges = malloc(sc->nranges * sizeof(sc->ranges[0]), M_DEVBUF, M_WAITOK); base_ranges = malloc(nbase_ranges, M_DEVBUF, M_WAITOK); OF_getencprop(node, "ranges", base_ranges, nbase_ranges); for (i = 0, j = 0; i < sc->nranges; i++) { sc->ranges[i].bus = 0; for (k = 0; k < sc->acells; k++) { sc->ranges[i].bus <<= 32; sc->ranges[i].bus |= base_ranges[j++]; } sc->ranges[i].host = 0; for (k = 0; k < host_address_cells; k++) { sc->ranges[i].host <<= 32; sc->ranges[i].host |= base_ranges[j++]; } sc->ranges[i].size = 0; for (k = 0; k < sc->scells; k++) { sc->ranges[i].size <<= 32; sc->ranges[i].size |= base_ranges[j++]; } } free(base_ranges, M_DEVBUF); return (sc->nranges); } struct simplebus_devinfo * simplebus_setup_dinfo(device_t dev, phandle_t node, struct simplebus_devinfo *di) { struct simplebus_softc *sc; struct simplebus_devinfo *ndi; sc = device_get_softc(dev); if (di == NULL) ndi = malloc(sizeof(*ndi), M_DEVBUF, M_WAITOK | M_ZERO); else ndi = di; if (ofw_bus_gen_setup_devinfo(&ndi->obdinfo, node) != 0) { if (di == NULL) free(ndi, M_DEVBUF); return (NULL); } resource_list_init(&ndi->rl); ofw_bus_reg_to_rl(dev, node, sc->acells, sc->scells, &ndi->rl); ofw_bus_intr_to_rl(dev, node, &ndi->rl, NULL); return (ndi); } device_t simplebus_add_device(device_t dev, phandle_t node, u_int order, const char *name, int unit, struct simplebus_devinfo *di) { struct simplebus_devinfo *ndi; device_t cdev; if ((ndi = simplebus_setup_dinfo(dev, node, di)) == NULL) return (NULL); cdev = device_add_child_ordered(dev, order, name, unit); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", ndi->obdinfo.obd_name); resource_list_free(&ndi->rl); ofw_bus_gen_destroy_devinfo(&ndi->obdinfo); if (di == NULL) free(ndi, M_DEVBUF); return (NULL); } device_set_ivars(cdev, ndi); return(cdev); } static device_t simplebus_add_child(device_t dev, u_int order, const char *name, int unit) { device_t cdev; struct simplebus_devinfo *ndi; cdev = device_add_child_ordered(dev, order, name, unit); if (cdev == NULL) return (NULL); ndi = malloc(sizeof(*ndi), M_DEVBUF, M_WAITOK | M_ZERO); ndi->obdinfo.obd_node = -1; resource_list_init(&ndi->rl); device_set_ivars(cdev, ndi); return (cdev); } static const struct ofw_bus_devinfo * simplebus_get_devinfo(device_t bus __unused, device_t child) { struct simplebus_devinfo *ndi; ndi = device_get_ivars(child); if (ndi == NULL) return (NULL); return (&ndi->obdinfo); } static struct resource_list * simplebus_get_resource_list(device_t bus __unused, device_t child) { struct simplebus_devinfo *ndi; ndi = device_get_ivars(child); if (ndi == NULL) return (NULL); return (&ndi->rl); } static ssize_t simplebus_get_property(device_t bus, device_t child, const char *propname, void *propvalue, size_t size, device_property_type_t type) { phandle_t node, xref; ssize_t ret, i; uint32_t *buffer; uint64_t val; switch (type) { case DEVICE_PROP_ANY: case DEVICE_PROP_BUFFER: case DEVICE_PROP_UINT32: case DEVICE_PROP_UINT64: case DEVICE_PROP_HANDLE: break; default: return (-1); } node = ofw_bus_get_node(child); if (propvalue == NULL || size == 0) return (OF_getproplen(node, propname)); /* * Integer values are stored in BE format. * If caller declared that the underlying property type is uint32_t * we need to do the conversion to match host endianness. */ if (type == DEVICE_PROP_UINT32) return (OF_getencprop(node, propname, propvalue, size)); /* * uint64_t also requires endianness handling. * In FDT every 8 byte value is stored using two uint32_t variables * in BE format. Now, since the upper bits are stored as the first * of the pair, both halves require swapping. */ if (type == DEVICE_PROP_UINT64) { ret = OF_getencprop(node, propname, propvalue, size); if (ret <= 0) { return (ret); } buffer = (uint32_t *)propvalue; for (i = 0; i < size / 4; i += 2) { val = (uint64_t)buffer[i] << 32 | buffer[i + 1]; ((uint64_t *)buffer)[i / 2] = val; } return (ret); } if (type == DEVICE_PROP_HANDLE) { if (size < sizeof(node)) return (-1); ret = OF_getencprop(node, propname, &xref, sizeof(xref)); if (ret <= 0) return (ret); node = OF_node_from_xref(xref); if (propvalue != NULL) *(uint32_t *)propvalue = node; return (ret); } return (OF_getprop(node, propname, propvalue, size)); } static struct resource * simplebus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct simplebus_softc *sc; struct simplebus_devinfo *di; struct resource_list_entry *rle; int j; sc = device_get_softc(bus); /* * Request for the default allocation with a given rid: use resource * list stored in the local device info. */ if (RMAN_IS_DEFAULT_RANGE(start, end)) { if ((di = device_get_ivars(child)) == NULL) return (NULL); rle = resource_list_find(&di->rl, type, *rid); if (rle == NULL) { if (bootverbose) device_printf(bus, "no default resources for " "rid = %d, type = %d\n", *rid, type); return (NULL); } start = rle->start; end = rle->end; count = rle->count; } if (type == SYS_RES_IOPORT) type = SYS_RES_MEMORY; if (type == SYS_RES_MEMORY) { /* Remap through ranges property */ for (j = 0; j < sc->nranges; j++) { if (start >= sc->ranges[j].bus && end < sc->ranges[j].bus + sc->ranges[j].size) { start -= sc->ranges[j].bus; start += sc->ranges[j].host; end -= sc->ranges[j].bus; end += sc->ranges[j].host; break; } } if (j == sc->nranges && sc->nranges != 0) { if (bootverbose) device_printf(bus, "Could not map resource " "%#jx-%#jx\n", start, end); return (NULL); } } return (bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags)); } static int simplebus_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { if (type == SYS_RES_IOPORT) type = SYS_RES_MEMORY; return (bus_generic_release_resource(bus, child, type, rid, r)); } -static int -simplebus_activate_resource(device_t bus, device_t child, int type, int rid, - struct resource *r) -{ - - if (type == SYS_RES_IOPORT) - type = SYS_RES_MEMORY; - return (bus_generic_activate_resource(bus, child, type, rid, r)); -} - -static int -simplebus_deactivate_resource(device_t bus, device_t child, int type, int rid, - struct resource *r) -{ - - if (type == SYS_RES_IOPORT) - type = SYS_RES_MEMORY; - return (bus_generic_deactivate_resource(bus, child, type, rid, r)); -} - static int simplebus_print_res(struct simplebus_devinfo *di) { int rv; if (di == NULL) return (0); rv = 0; rv += resource_list_print_type(&di->rl, "mem", SYS_RES_MEMORY, "%#jx"); rv += resource_list_print_type(&di->rl, "irq", SYS_RES_IRQ, "%jd"); return (rv); } static void simplebus_probe_nomatch(device_t bus, device_t child) { const char *name, *type, *compat; if (!bootverbose) return; compat = ofw_bus_get_compat(child); if (compat == NULL) return; name = ofw_bus_get_name(child); type = ofw_bus_get_type(child); device_printf(bus, "<%s>", name != NULL ? name : "unknown"); simplebus_print_res(device_get_ivars(child)); if (!ofw_bus_status_okay(child)) printf(" disabled"); if (type) printf(" type %s", type); printf(" compat %s (no driver attached)\n", compat); } static int simplebus_print_child(device_t bus, device_t child) { int rv; rv = bus_print_child_header(bus, child); rv += simplebus_print_res(device_get_ivars(child)); if (!ofw_bus_status_okay(child)) rv += printf(" disabled"); rv += bus_print_child_footer(bus, child); return (rv); } diff --git a/sys/dev/hyperv/pcib/vmbus_pcib.c b/sys/dev/hyperv/pcib/vmbus_pcib.c index 3d3041ee76b3..fd8a7feae984 100644 --- a/sys/dev/hyperv/pcib/vmbus_pcib.c +++ b/sys/dev/hyperv/pcib/vmbus_pcib.c @@ -1,2049 +1,2047 @@ /*- * Copyright (c) 2016-2017 Microsoft Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #ifdef NEW_PCIB #include "opt_acpi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__aarch64__) #include #endif #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #if defined(__i386__) || defined(__amd64__) #include #include #include #endif #if defined(__aarch64__) #include #include #include #include #endif #include #include #include #include #include "vmbus_if.h" struct completion { unsigned int done; struct mtx lock; }; static void init_completion(struct completion *c) { memset(c, 0, sizeof(*c)); mtx_init(&c->lock, "hvcmpl", NULL, MTX_DEF); c->done = 0; } static void reinit_completion(struct completion *c) { c->done = 0; } static void free_completion(struct completion *c) { mtx_destroy(&c->lock); } static void complete(struct completion *c) { mtx_lock(&c->lock); c->done++; mtx_unlock(&c->lock); wakeup(c); } static void wait_for_completion(struct completion *c) { mtx_lock(&c->lock); while (c->done == 0) mtx_sleep(c, &c->lock, 0, "hvwfc", 0); c->done--; mtx_unlock(&c->lock); } /* * Return: 0 if completed, a non-zero value if timed out. */ static int wait_for_completion_timeout(struct completion *c, int timeout) { int ret; mtx_lock(&c->lock); if (c->done == 0) mtx_sleep(c, &c->lock, 0, "hvwfc", timeout); if (c->done > 0) { c->done--; ret = 0; } else { ret = 1; } mtx_unlock(&c->lock); return (ret); } #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #define PCI_MAKE_VERSION(major, minor) ((uint32_t)(((major) << 16) | (minor))) enum pci_protocol_version_t { PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), PCI_PROTOCOL_VERSION_1_4 = PCI_MAKE_VERSION(1, 4), }; static enum pci_protocol_version_t pci_protocol_versions[] = { PCI_PROTOCOL_VERSION_1_4, PCI_PROTOCOL_VERSION_1_1, }; #define PCI_CONFIG_MMIO_LENGTH 0x2000 #define CFG_PAGE_OFFSET 0x1000 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET) /* * Message Types */ enum pci_message_type { /* * Version 1.1 */ PCI_MESSAGE_BASE = 0x42490000, PCI_BUS_RELATIONS = PCI_MESSAGE_BASE + 0, PCI_QUERY_BUS_RELATIONS = PCI_MESSAGE_BASE + 1, PCI_POWER_STATE_CHANGE = PCI_MESSAGE_BASE + 4, PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5, PCI_QUERY_RESOURCE_RESOURCES = PCI_MESSAGE_BASE + 6, PCI_BUS_D0ENTRY = PCI_MESSAGE_BASE + 7, PCI_BUS_D0EXIT = PCI_MESSAGE_BASE + 8, PCI_READ_BLOCK = PCI_MESSAGE_BASE + 9, PCI_WRITE_BLOCK = PCI_MESSAGE_BASE + 0xA, PCI_EJECT = PCI_MESSAGE_BASE + 0xB, PCI_QUERY_STOP = PCI_MESSAGE_BASE + 0xC, PCI_REENABLE = PCI_MESSAGE_BASE + 0xD, PCI_QUERY_STOP_FAILED = PCI_MESSAGE_BASE + 0xE, PCI_EJECTION_COMPLETE = PCI_MESSAGE_BASE + 0xF, PCI_RESOURCES_ASSIGNED = PCI_MESSAGE_BASE + 0x10, PCI_RESOURCES_RELEASED = PCI_MESSAGE_BASE + 0x11, PCI_INVALIDATE_BLOCK = PCI_MESSAGE_BASE + 0x12, PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13, PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14, PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15, PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16, PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17, PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */ PCI_BUS_RELATIONS2 = PCI_MESSAGE_BASE + 0x19, PCI_RESOURCES_ASSIGNED3 = PCI_MESSAGE_BASE + 0x1A, PCI_CREATE_INTERRUPT_MESSAGE3 = PCI_MESSAGE_BASE + 0x1B, PCI_MESSAGE_MAXIMUM }; #define STATUS_REVISION_MISMATCH 0xC0000059 /* * Structures defining the virtual PCI Express protocol. */ union pci_version { struct { uint16_t minor_version; uint16_t major_version; } parts; uint32_t version; } __packed; /* * This representation is the one used in Windows, which is * what is expected when sending this back and forth with * the Hyper-V parent partition. */ union win_slot_encoding { struct { uint32_t slot:5; uint32_t func:3; uint32_t reserved:24; } bits; uint32_t val; } __packed; struct pci_func_desc { uint16_t v_id; /* vendor ID */ uint16_t d_id; /* device ID */ uint8_t rev; uint8_t prog_intf; uint8_t subclass; uint8_t base_class; uint32_t subsystem_id; union win_slot_encoding wslot; uint32_t ser; /* serial number */ } __packed; struct pci_func_desc2 { uint16_t v_id; /* vendor ID */ uint16_t d_id; /* device ID */ uint8_t rev; uint8_t prog_intf; uint8_t subclass; uint8_t base_class; uint32_t subsystem_id; union win_slot_encoding wslot; uint32_t ser; /* serial number */ uint32_t flags; uint16_t virtual_numa_node; uint16_t reserved; } __packed; struct hv_msi_desc { uint8_t vector; uint8_t delivery_mode; uint16_t vector_count; uint32_t reserved; uint64_t cpu_mask; } __packed; struct hv_msi_desc3 { uint32_t vector; uint8_t delivery_mode; uint8_t reserved; uint16_t vector_count; uint16_t processor_count; uint16_t processor_array[32]; } __packed; struct tran_int_desc { uint16_t reserved; uint16_t vector_count; uint32_t data; uint64_t address; } __packed; struct pci_message { uint32_t type; } __packed; struct pci_child_message { struct pci_message message_type; union win_slot_encoding wslot; } __packed; struct pci_incoming_message { struct vmbus_chanpkt_hdr hdr; struct pci_message message_type; } __packed; struct pci_response { struct vmbus_chanpkt_hdr hdr; int32_t status; /* negative values are failures */ } __packed; struct pci_packet { void (*completion_func)(void *context, struct pci_response *resp, int resp_packet_size); void *compl_ctxt; struct pci_message message[0]; }; /* * Specific message types supporting the PCI protocol. */ struct pci_version_request { struct pci_message message_type; uint32_t protocol_version; uint32_t reservedz:31; } __packed; struct pci_bus_d0_entry { struct pci_message message_type; uint32_t reserved; uint64_t mmio_base; } __packed; struct pci_bus_relations { struct pci_incoming_message incoming; uint32_t device_count; struct pci_func_desc func[0]; } __packed; struct pci_bus_relations2 { struct pci_incoming_message incoming; uint32_t device_count; struct pci_func_desc2 func[0]; } __packed; #define MAX_NUM_BARS (PCIR_MAX_BAR_0 + 1) struct pci_q_res_req_response { struct vmbus_chanpkt_hdr hdr; int32_t status; /* negative values are failures */ uint32_t probed_bar[MAX_NUM_BARS]; } __packed; struct pci_resources_assigned { struct pci_message message_type; union win_slot_encoding wslot; uint8_t memory_range[0x14][MAX_NUM_BARS]; /* unused here */ uint32_t msi_descriptors; uint32_t reserved[4]; } __packed; struct pci_resources_assigned2 { struct pci_message message_type; union win_slot_encoding wslot; uint8_t memory_range[0x14][6]; /* not used here */ uint32_t msi_descriptor_count; uint8_t reserved[70]; } __packed; struct pci_create_interrupt { struct pci_message message_type; union win_slot_encoding wslot; struct hv_msi_desc int_desc; } __packed; struct pci_create_interrupt3 { struct pci_message message_type; union win_slot_encoding wslot; struct hv_msi_desc3 int_desc; } __packed; struct pci_create_int_response { struct pci_response response; uint32_t reserved; struct tran_int_desc int_desc; } __packed; struct pci_delete_interrupt { struct pci_message message_type; union win_slot_encoding wslot; struct tran_int_desc int_desc; } __packed; struct pci_dev_incoming { struct pci_incoming_message incoming; union win_slot_encoding wslot; } __packed; struct pci_eject_response { struct pci_message message_type; union win_slot_encoding wslot; uint32_t status; } __packed; /* * Driver specific state. */ enum hv_pcibus_state { hv_pcibus_init = 0, hv_pcibus_installed, }; struct hv_pcibus { device_t pcib; device_t pci_bus; struct vmbus_pcib_softc *sc; uint16_t pci_domain; enum hv_pcibus_state state; struct resource *cfg_res; struct completion query_completion, *query_comp; struct mtx config_lock; /* Avoid two threads writing index page */ struct mtx device_list_lock; /* Protect lists below */ uint32_t protocol_version; TAILQ_HEAD(, hv_pci_dev) children; TAILQ_HEAD(, hv_dr_state) dr_list; volatile int detaching; }; struct hv_pcidev_desc { uint16_t v_id; /* vendor ID */ uint16_t d_id; /* device ID */ uint8_t rev; uint8_t prog_intf; uint8_t subclass; uint8_t base_class; uint32_t subsystem_id; union win_slot_encoding wslot; uint32_t ser; /* serial number */ uint32_t flags; uint16_t virtual_numa_node; } __packed; struct hv_pci_dev { TAILQ_ENTRY(hv_pci_dev) link; struct hv_pcidev_desc desc; bool reported_missing; struct hv_pcibus *hbus; struct task eject_task; TAILQ_HEAD(, hv_irq_desc) irq_desc_list; /* * What would be observed if one wrote 0xFFFFFFFF to a BAR and then * read it back, for each of the BAR offsets within config space. */ uint32_t probed_bar[MAX_NUM_BARS]; }; /* * Tracks "Device Relations" messages from the host, which must be both * processed in order. */ struct hv_dr_work { struct task task; struct hv_pcibus *bus; }; struct hv_dr_state { TAILQ_ENTRY(hv_dr_state) link; uint32_t device_count; struct hv_pcidev_desc func[0]; }; struct hv_irq_desc { TAILQ_ENTRY(hv_irq_desc) link; struct tran_int_desc desc; int irq; }; #define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07)) #define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f) #define PCI_FUNC(devfn) ((devfn) & 0x07) static uint32_t devfn_to_wslot(unsigned int devfn) { union win_slot_encoding wslot; wslot.val = 0; wslot.bits.slot = PCI_SLOT(devfn); wslot.bits.func = PCI_FUNC(devfn); return (wslot.val); } static unsigned int wslot_to_devfn(uint32_t wslot) { union win_slot_encoding encoding; unsigned int slot; unsigned int func; encoding.val = wslot; slot = encoding.bits.slot; func = encoding.bits.func; return (PCI_DEVFN(slot, func)); } struct vmbus_pcib_softc { struct vmbus_channel *chan; void *rx_buf; struct taskqueue *taskq; struct hv_pcibus *hbus; }; /* {44C4F61D-4444-4400-9D52-802E27EDE19F} */ static const struct hyperv_guid g_pass_through_dev_type = { .hv_guid = {0x1D, 0xF6, 0xC4, 0x44, 0x44, 0x44, 0x00, 0x44, 0x9D, 0x52, 0x80, 0x2E, 0x27, 0xED, 0xE1, 0x9F} }; struct hv_pci_compl { struct completion host_event; int32_t completion_status; }; struct q_res_req_compl { struct completion host_event; struct hv_pci_dev *hpdev; }; struct compose_comp_ctxt { struct hv_pci_compl comp_pkt; struct tran_int_desc int_desc; }; /* * It is possible the device is revoked during initialization. * Check if this happens during wait. * Return: 0 if response arrived, ENODEV if device revoked. */ static int wait_for_response(struct hv_pcibus *hbus, struct completion *c) { do { if (vmbus_chan_is_revoked(hbus->sc->chan)) { device_printf(hbus->pcib, "The device is revoked.\n"); return (ENODEV); } } while (wait_for_completion_timeout(c, hz /10) != 0); return 0; } static void hv_pci_generic_compl(void *context, struct pci_response *resp, int resp_packet_size) { struct hv_pci_compl *comp_pkt = context; if (resp_packet_size >= sizeof(struct pci_response)) comp_pkt->completion_status = resp->status; else comp_pkt->completion_status = -1; complete(&comp_pkt->host_event); } static void q_resource_requirements(void *context, struct pci_response *resp, int resp_packet_size) { struct q_res_req_compl *completion = context; struct pci_q_res_req_response *q_res_req = (struct pci_q_res_req_response *)resp; int i; if (resp->status < 0) { printf("vmbus_pcib: failed to query resource requirements\n"); } else { for (i = 0; i < MAX_NUM_BARS; i++) completion->hpdev->probed_bar[i] = q_res_req->probed_bar[i]; } complete(&completion->host_event); } static void hv_pci_compose_compl(void *context, struct pci_response *resp, int resp_packet_size) { struct compose_comp_ctxt *comp_pkt = context; struct pci_create_int_response *int_resp = (struct pci_create_int_response *)resp; comp_pkt->comp_pkt.completion_status = resp->status; comp_pkt->int_desc = int_resp->int_desc; complete(&comp_pkt->comp_pkt.host_event); } static void hv_int_desc_free(struct hv_pci_dev *hpdev, struct hv_irq_desc *hid) { struct pci_delete_interrupt *int_pkt; struct { struct pci_packet pkt; uint8_t buffer[sizeof(struct pci_delete_interrupt)]; } ctxt; memset(&ctxt, 0, sizeof(ctxt)); int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message; int_pkt->message_type.type = PCI_DELETE_INTERRUPT_MESSAGE; int_pkt->wslot.val = hpdev->desc.wslot.val; int_pkt->int_desc = hid->desc; vmbus_chan_send(hpdev->hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, 0, int_pkt, sizeof(*int_pkt), 0); free(hid, M_DEVBUF); } static void hv_pci_delete_device(struct hv_pci_dev *hpdev) { struct hv_pcibus *hbus = hpdev->hbus; struct hv_irq_desc *hid, *tmp_hid; device_t pci_dev; int devfn; devfn = wslot_to_devfn(hpdev->desc.wslot.val); bus_topo_lock(); pci_dev = pci_find_dbsf(hbus->pci_domain, 0, PCI_SLOT(devfn), PCI_FUNC(devfn)); if (pci_dev) device_delete_child(hbus->pci_bus, pci_dev); bus_topo_unlock(); mtx_lock(&hbus->device_list_lock); TAILQ_REMOVE(&hbus->children, hpdev, link); mtx_unlock(&hbus->device_list_lock); TAILQ_FOREACH_SAFE(hid, &hpdev->irq_desc_list, link, tmp_hid) hv_int_desc_free(hpdev, hid); free(hpdev, M_DEVBUF); } static struct hv_pci_dev * new_pcichild_device(struct hv_pcibus *hbus, struct hv_pcidev_desc *desc) { struct hv_pci_dev *hpdev; struct pci_child_message *res_req; struct q_res_req_compl comp_pkt; struct { struct pci_packet pkt; uint8_t buffer[sizeof(struct pci_child_message)]; } ctxt; int ret; hpdev = malloc(sizeof(*hpdev), M_DEVBUF, M_WAITOK | M_ZERO); hpdev->hbus = hbus; TAILQ_INIT(&hpdev->irq_desc_list); init_completion(&comp_pkt.host_event); comp_pkt.hpdev = hpdev; ctxt.pkt.compl_ctxt = &comp_pkt; ctxt.pkt.completion_func = q_resource_requirements; res_req = (struct pci_child_message *)&ctxt.pkt.message; res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS; res_req->wslot.val = desc->wslot.val; ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC, res_req, sizeof(*res_req), (uint64_t)(uintptr_t)&ctxt.pkt); if (ret) goto err; if (wait_for_response(hbus, &comp_pkt.host_event)) goto err; free_completion(&comp_pkt.host_event); hpdev->desc = *desc; mtx_lock(&hbus->device_list_lock); if (TAILQ_EMPTY(&hbus->children)) hbus->pci_domain = desc->ser & 0xFFFF; TAILQ_INSERT_TAIL(&hbus->children, hpdev, link); mtx_unlock(&hbus->device_list_lock); return (hpdev); err: free_completion(&comp_pkt.host_event); free(hpdev, M_DEVBUF); return (NULL); } static int pci_rescan(device_t dev) { return (BUS_RESCAN(dev)); } static void pci_devices_present_work(void *arg, int pending __unused) { struct hv_dr_work *dr_wrk = arg; struct hv_dr_state *dr = NULL; struct hv_pcibus *hbus; uint32_t child_no; bool found; struct hv_pcidev_desc *new_desc; struct hv_pci_dev *hpdev, *tmp_hpdev; struct completion *query_comp; bool need_rescan = false; hbus = dr_wrk->bus; free(dr_wrk, M_DEVBUF); /* Pull this off the queue and process it if it was the last one. */ mtx_lock(&hbus->device_list_lock); while (!TAILQ_EMPTY(&hbus->dr_list)) { dr = TAILQ_FIRST(&hbus->dr_list); TAILQ_REMOVE(&hbus->dr_list, dr, link); /* Throw this away if the list still has stuff in it. */ if (!TAILQ_EMPTY(&hbus->dr_list)) { free(dr, M_DEVBUF); continue; } } mtx_unlock(&hbus->device_list_lock); if (!dr) return; /* First, mark all existing children as reported missing. */ mtx_lock(&hbus->device_list_lock); TAILQ_FOREACH(hpdev, &hbus->children, link) hpdev->reported_missing = true; mtx_unlock(&hbus->device_list_lock); /* Next, add back any reported devices. */ for (child_no = 0; child_no < dr->device_count; child_no++) { found = false; new_desc = &dr->func[child_no]; mtx_lock(&hbus->device_list_lock); TAILQ_FOREACH(hpdev, &hbus->children, link) { if ((hpdev->desc.wslot.val == new_desc->wslot.val) && (hpdev->desc.v_id == new_desc->v_id) && (hpdev->desc.d_id == new_desc->d_id) && (hpdev->desc.ser == new_desc->ser)) { hpdev->reported_missing = false; found = true; break; } } mtx_unlock(&hbus->device_list_lock); if (!found) { if (!need_rescan) need_rescan = true; hpdev = new_pcichild_device(hbus, new_desc); if (!hpdev) printf("vmbus_pcib: failed to add a child\n"); } } /* Remove missing device(s), if any */ TAILQ_FOREACH_SAFE(hpdev, &hbus->children, link, tmp_hpdev) { if (hpdev->reported_missing) hv_pci_delete_device(hpdev); } /* Rescan the bus to find any new device, if necessary. */ if (hbus->state == hv_pcibus_installed && need_rescan) pci_rescan(hbus->pci_bus); /* Wake up hv_pci_query_relations(), if it's waiting. */ query_comp = hbus->query_comp; if (query_comp) { hbus->query_comp = NULL; complete(query_comp); } free(dr, M_DEVBUF); } static struct hv_pci_dev * get_pcichild_wslot(struct hv_pcibus *hbus, uint32_t wslot) { struct hv_pci_dev *hpdev, *ret = NULL; mtx_lock(&hbus->device_list_lock); TAILQ_FOREACH(hpdev, &hbus->children, link) { if (hpdev->desc.wslot.val == wslot) { ret = hpdev; break; } } mtx_unlock(&hbus->device_list_lock); return (ret); } static void hv_pci_devices_present(struct hv_pcibus *hbus, struct pci_bus_relations *relations) { struct hv_dr_state *dr; struct hv_dr_work *dr_wrk; unsigned long dr_size; if (hbus->detaching && relations->device_count > 0) return; dr_size = offsetof(struct hv_dr_state, func) + (sizeof(struct pci_func_desc) * relations->device_count); dr = malloc(dr_size, M_DEVBUF, M_WAITOK | M_ZERO); dr->device_count = relations->device_count; if (dr->device_count != 0) memcpy(dr->func, relations->func, sizeof(struct hv_pcidev_desc) * dr->device_count); mtx_lock(&hbus->device_list_lock); TAILQ_INSERT_TAIL(&hbus->dr_list, dr, link); mtx_unlock(&hbus->device_list_lock); dr_wrk = malloc(sizeof(*dr_wrk), M_DEVBUF, M_WAITOK | M_ZERO); dr_wrk->bus = hbus; TASK_INIT(&dr_wrk->task, 0, pci_devices_present_work, dr_wrk); taskqueue_enqueue(hbus->sc->taskq, &dr_wrk->task); } static void hv_pci_devices_present2(struct hv_pcibus *hbus, struct pci_bus_relations2 *relations) { struct hv_dr_state *dr; struct hv_dr_work *dr_wrk; unsigned long dr_size; if (hbus->detaching && relations->device_count > 0) return; dr_size = offsetof(struct hv_dr_state, func) + (sizeof(struct pci_func_desc2) * relations->device_count); dr = malloc(dr_size, M_DEVBUF, M_WAITOK | M_ZERO); dr->device_count = relations->device_count; if (dr->device_count != 0) memcpy(dr->func, relations->func, sizeof(struct pci_func_desc2) * dr->device_count); mtx_lock(&hbus->device_list_lock); TAILQ_INSERT_TAIL(&hbus->dr_list, dr, link); mtx_unlock(&hbus->device_list_lock); dr_wrk = malloc(sizeof(*dr_wrk), M_DEVBUF, M_WAITOK | M_ZERO); dr_wrk->bus = hbus; TASK_INIT(&dr_wrk->task, 0, pci_devices_present_work, dr_wrk); taskqueue_enqueue(hbus->sc->taskq, &dr_wrk->task); } static void hv_eject_device_work(void *arg, int pending __unused) { struct hv_pci_dev *hpdev = arg; union win_slot_encoding wslot = hpdev->desc.wslot; struct hv_pcibus *hbus = hpdev->hbus; struct pci_eject_response *eject_pkt; struct { struct pci_packet pkt; uint8_t buffer[sizeof(struct pci_eject_response)]; } ctxt; hv_pci_delete_device(hpdev); memset(&ctxt, 0, sizeof(ctxt)); eject_pkt = (struct pci_eject_response *)&ctxt.pkt.message; eject_pkt->message_type.type = PCI_EJECTION_COMPLETE; eject_pkt->wslot.val = wslot.val; vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, 0, eject_pkt, sizeof(*eject_pkt), 0); } static void hv_pci_eject_device(struct hv_pci_dev *hpdev) { struct hv_pcibus *hbus = hpdev->hbus; struct taskqueue *taskq; if (hbus->detaching) return; /* * Push this task into the same taskqueue on which * vmbus_pcib_attach() runs, so we're sure this task can't run * concurrently with vmbus_pcib_attach(). */ TASK_INIT(&hpdev->eject_task, 0, hv_eject_device_work, hpdev); taskq = vmbus_chan_mgmt_tq(hbus->sc->chan); taskqueue_enqueue(taskq, &hpdev->eject_task); } #define PCIB_PACKET_SIZE 0x100 static void vmbus_pcib_on_channel_callback(struct vmbus_channel *chan, void *arg) { struct vmbus_pcib_softc *sc = arg; struct hv_pcibus *hbus = sc->hbus; void *buffer; int bufferlen = PCIB_PACKET_SIZE; struct pci_packet *comp_packet; struct pci_response *response; struct pci_incoming_message *new_msg; struct pci_bus_relations *bus_rel; struct pci_bus_relations2 *bus_rel2; struct pci_dev_incoming *dev_msg; struct hv_pci_dev *hpdev; buffer = sc->rx_buf; do { struct vmbus_chanpkt_hdr *pkt = buffer; uint32_t bytes_rxed; int ret; bytes_rxed = bufferlen; ret = vmbus_chan_recv_pkt(chan, pkt, &bytes_rxed); if (ret == ENOBUFS) { /* Handle large packet */ if (bufferlen > PCIB_PACKET_SIZE) { free(buffer, M_DEVBUF); buffer = NULL; } /* alloc new buffer */ buffer = malloc(bytes_rxed, M_DEVBUF, M_WAITOK | M_ZERO); bufferlen = bytes_rxed; continue; } if (ret != 0) { /* ignore EIO or EAGAIN */ break; } if (bytes_rxed <= sizeof(struct pci_response)) continue; switch (pkt->cph_type) { case VMBUS_CHANPKT_TYPE_COMP: comp_packet = (struct pci_packet *)(uintptr_t)pkt->cph_xactid; response = (struct pci_response *)pkt; comp_packet->completion_func(comp_packet->compl_ctxt, response, bytes_rxed); break; case VMBUS_CHANPKT_TYPE_INBAND: new_msg = (struct pci_incoming_message *)buffer; switch (new_msg->message_type.type) { case PCI_BUS_RELATIONS: bus_rel = (struct pci_bus_relations *)buffer; if (bus_rel->device_count == 0) break; if (bytes_rxed < offsetof(struct pci_bus_relations, func) + (sizeof(struct pci_func_desc) * (bus_rel->device_count))) break; hv_pci_devices_present(hbus, bus_rel); break; case PCI_BUS_RELATIONS2: bus_rel2 = (struct pci_bus_relations2 *)buffer; if (bus_rel2->device_count == 0) break; if (bytes_rxed < offsetof(struct pci_bus_relations2, func) + (sizeof(struct pci_func_desc2) * (bus_rel2->device_count))) break; hv_pci_devices_present2(hbus, bus_rel2); case PCI_EJECT: dev_msg = (struct pci_dev_incoming *)buffer; hpdev = get_pcichild_wslot(hbus, dev_msg->wslot.val); if (hpdev) hv_pci_eject_device(hpdev); break; default: printf("vmbus_pcib: Unknown msg type 0x%x\n", new_msg->message_type.type); break; } break; default: printf("vmbus_pcib: Unknown VMBus msg type %hd\n", pkt->cph_type); break; } } while (1); if (bufferlen > PCIB_PACKET_SIZE) free(buffer, M_DEVBUF); } static int hv_pci_protocol_negotiation(struct hv_pcibus *hbus, enum pci_protocol_version_t version[], int num_version) { struct pci_version_request *version_req; struct hv_pci_compl comp_pkt; struct { struct pci_packet pkt; uint8_t buffer[sizeof(struct pci_version_request)]; } ctxt; int ret; int i; init_completion(&comp_pkt.host_event); ctxt.pkt.completion_func = hv_pci_generic_compl; ctxt.pkt.compl_ctxt = &comp_pkt; version_req = (struct pci_version_request *)&ctxt.pkt.message; version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION; for(i=0; i< num_version; i++) { version_req->protocol_version = version[i]; ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC, version_req, sizeof(*version_req), (uint64_t)(uintptr_t)&ctxt.pkt); if (!ret) ret = wait_for_response(hbus, &comp_pkt.host_event); if (ret) { device_printf(hbus->pcib, "vmbus_pcib failed to request version: %d\n", ret); goto out; } if (comp_pkt.completion_status >= 0) { hbus->protocol_version = version[i]; device_printf(hbus->pcib, "PCI VMBus using version 0x%x\n", hbus->protocol_version); ret = 0; goto out; } if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) { device_printf(hbus->pcib, "vmbus_pcib version negotiation failed: %x\n", comp_pkt.completion_status); ret = EPROTO; goto out; } reinit_completion(&comp_pkt.host_event); } device_printf(hbus->pcib, "PCI pass-trhpugh VSP failed to find supported version\n"); out: free_completion(&comp_pkt.host_event); return (ret); } /* Ask the host to send along the list of child devices */ static int hv_pci_query_relations(struct hv_pcibus *hbus) { struct pci_message message; int ret; message.type = PCI_QUERY_BUS_RELATIONS; ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, 0, &message, sizeof(message), 0); return (ret); } static int hv_pci_enter_d0(struct hv_pcibus *hbus) { struct pci_bus_d0_entry *d0_entry; struct hv_pci_compl comp_pkt; struct { struct pci_packet pkt; uint8_t buffer[sizeof(struct pci_bus_d0_entry)]; } ctxt; int ret; /* * Tell the host that the bus is ready to use, and moved into the * powered-on state. This includes telling the host which region * of memory-mapped I/O space has been chosen for configuration space * access. */ init_completion(&comp_pkt.host_event); ctxt.pkt.completion_func = hv_pci_generic_compl; ctxt.pkt.compl_ctxt = &comp_pkt; d0_entry = (struct pci_bus_d0_entry *)&ctxt.pkt.message; memset(d0_entry, 0, sizeof(*d0_entry)); d0_entry->message_type.type = PCI_BUS_D0ENTRY; d0_entry->mmio_base = rman_get_start(hbus->cfg_res); ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC, d0_entry, sizeof(*d0_entry), (uint64_t)(uintptr_t)&ctxt.pkt); if (!ret) ret = wait_for_response(hbus, &comp_pkt.host_event); if (ret) goto out; if (comp_pkt.completion_status < 0) { device_printf(hbus->pcib, "vmbus_pcib failed to enable D0\n"); ret = EPROTO; } else { ret = 0; } out: free_completion(&comp_pkt.host_event); return (ret); } /* * It looks this is only needed by Windows VM, but let's send the message too * just to make the host happy. */ static int hv_send_resources_allocated(struct hv_pcibus *hbus) { struct pci_resources_assigned *res_assigned; struct pci_resources_assigned2 *res_assigned2; struct hv_pci_compl comp_pkt; struct hv_pci_dev *hpdev; struct pci_packet *pkt; uint32_t wslot; int ret = 0; size_t size_res; size_res = (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_4) ? sizeof(*res_assigned) : sizeof(*res_assigned2); pkt = malloc(sizeof(*pkt) + size_res, M_DEVBUF, M_WAITOK | M_ZERO); for (wslot = 0; wslot < 256; wslot++) { hpdev = get_pcichild_wslot(hbus, wslot); if (!hpdev) continue; init_completion(&comp_pkt.host_event); memset(pkt, 0, sizeof(*pkt) + size_res); pkt->completion_func = hv_pci_generic_compl; pkt->compl_ctxt = &comp_pkt; if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_4) { res_assigned = (struct pci_resources_assigned *)&pkt->message; res_assigned->message_type.type = PCI_RESOURCES_ASSIGNED; res_assigned->wslot.val = hpdev->desc.wslot.val; } else { res_assigned2 = (struct pci_resources_assigned2 *)&pkt->message; res_assigned2->message_type.type = PCI_RESOURCES_ASSIGNED2; res_assigned2->wslot.val = hpdev->desc.wslot.val; } ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC, &pkt->message, size_res, (uint64_t)(uintptr_t)pkt); if (!ret) ret = wait_for_response(hbus, &comp_pkt.host_event); free_completion(&comp_pkt.host_event); if (ret) break; if (comp_pkt.completion_status < 0) { ret = EPROTO; device_printf(hbus->pcib, "failed to send PCI_RESOURCES_ASSIGNED\n"); break; } } free(pkt, M_DEVBUF); return (ret); } static int hv_send_resources_released(struct hv_pcibus *hbus) { struct pci_child_message pkt; struct hv_pci_dev *hpdev; uint32_t wslot; int ret; for (wslot = 0; wslot < 256; wslot++) { hpdev = get_pcichild_wslot(hbus, wslot); if (!hpdev) continue; pkt.message_type.type = PCI_RESOURCES_RELEASED; pkt.wslot.val = hpdev->desc.wslot.val; ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, 0, &pkt, sizeof(pkt), 0); if (ret) return (ret); } return (0); } #define hv_cfg_read(x, s) \ static inline uint##x##_t hv_cfg_read_##s(struct hv_pcibus *bus, \ bus_size_t offset) \ { \ return (bus_read_##s(bus->cfg_res, offset)); \ } #define hv_cfg_write(x, s) \ static inline void hv_cfg_write_##s(struct hv_pcibus *bus, \ bus_size_t offset, uint##x##_t val) \ { \ return (bus_write_##s(bus->cfg_res, offset, val)); \ } hv_cfg_read(8, 1) hv_cfg_read(16, 2) hv_cfg_read(32, 4) hv_cfg_write(8, 1) hv_cfg_write(16, 2) hv_cfg_write(32, 4) static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where, int size, uint32_t *val) { struct hv_pcibus *hbus = hpdev->hbus; bus_size_t addr = CFG_PAGE_OFFSET + where; /* * If the attempt is to read the IDs or the ROM BAR, simulate that. */ if (where + size <= PCIR_COMMAND) { memcpy(val, ((uint8_t *)&hpdev->desc.v_id) + where, size); } else if (where >= PCIR_REVID && where + size <= PCIR_CACHELNSZ) { memcpy(val, ((uint8_t *)&hpdev->desc.rev) + where - PCIR_REVID, size); } else if (where >= PCIR_SUBVEND_0 && where + size <= PCIR_BIOS) { memcpy(val, (uint8_t *)&hpdev->desc.subsystem_id + where - PCIR_SUBVEND_0, size); } else if (where >= PCIR_BIOS && where + size <= PCIR_CAP_PTR) { /* ROM BARs are unimplemented */ *val = 0; } else if ((where >= PCIR_INTLINE && where + size <= PCIR_INTPIN) ||(where == PCIR_INTPIN && size == 1)) { /* * Interrupt Line and Interrupt PIN are hard-wired to zero * because this front-end only supports message-signaled * interrupts. */ *val = 0; } else if (where + size <= CFG_PAGE_SIZE) { mtx_lock(&hbus->config_lock); /* Choose the function to be read. */ hv_cfg_write_4(hbus, 0, hpdev->desc.wslot.val); /* Make sure the function was chosen before we start reading.*/ mb(); /* Read from that function's config space. */ switch (size) { case 1: *((uint8_t *)val) = hv_cfg_read_1(hbus, addr); break; case 2: *((uint16_t *)val) = hv_cfg_read_2(hbus, addr); break; default: *((uint32_t *)val) = hv_cfg_read_4(hbus, addr); break; } /* * Make sure the write was done before we release the lock, * allowing consecutive reads/writes. */ mb(); mtx_unlock(&hbus->config_lock); } else { /* Invalid config read: it's unlikely to reach here. */ memset(val, 0, size); } } static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where, int size, uint32_t val) { struct hv_pcibus *hbus = hpdev->hbus; bus_size_t addr = CFG_PAGE_OFFSET + where; /* SSIDs and ROM BARs are read-only */ if (where >= PCIR_SUBVEND_0 && where + size <= PCIR_CAP_PTR) return; if (where >= PCIR_COMMAND && where + size <= CFG_PAGE_SIZE) { mtx_lock(&hbus->config_lock); /* Choose the function to be written. */ hv_cfg_write_4(hbus, 0, hpdev->desc.wslot.val); /* Make sure the function was chosen before we start writing.*/ wmb(); /* Write to that function's config space. */ switch (size) { case 1: hv_cfg_write_1(hbus, addr, (uint8_t)val); break; case 2: hv_cfg_write_2(hbus, addr, (uint16_t)val); break; default: hv_cfg_write_4(hbus, addr, (uint32_t)val); break; } /* * Make sure the write was done before we release the lock, * allowing consecutive reads/writes. */ mb(); mtx_unlock(&hbus->config_lock); } else { /* Invalid config write: it's unlikely to reach here. */ return; } } /* * The vPCI in some Hyper-V releases do not initialize the last 4 * bit of BAR registers. This could result weird problems causing PCI * code fail to configure BAR correctly. * * Just write all 1's to those BARs whose probed values are not zero. * This seems to make the Hyper-V vPCI and pci_write_bar() to cooperate * correctly. */ static void vmbus_pcib_prepopulate_bars(struct hv_pcibus *hbus) { struct hv_pci_dev *hpdev; int i; mtx_lock(&hbus->device_list_lock); TAILQ_FOREACH(hpdev, &hbus->children, link) { for (i = 0; i < 6; i++) { /* Ignore empty bar */ if (hpdev->probed_bar[i] == 0) continue; uint32_t bar_val = 0; _hv_pcifront_read_config(hpdev, PCIR_BAR(i), 4, &bar_val); if (hpdev->probed_bar[i] != bar_val) { if (bootverbose) printf("vmbus_pcib: initialize bar %d " "by writing all 1s\n", i); _hv_pcifront_write_config(hpdev, PCIR_BAR(i), 4, 0xffffffff); /* Now write the original value back */ _hv_pcifront_write_config(hpdev, PCIR_BAR(i), 4, bar_val); } } } mtx_unlock(&hbus->device_list_lock); } static void vmbus_pcib_set_detaching(void *arg, int pending __unused) { struct hv_pcibus *hbus = arg; atomic_set_int(&hbus->detaching, 1); } static void vmbus_pcib_pre_detach(struct hv_pcibus *hbus) { struct task task; TASK_INIT(&task, 0, vmbus_pcib_set_detaching, hbus); /* * Make sure the channel callback won't push any possible new * PCI_BUS_RELATIONS and PCI_EJECT tasks to sc->taskq. */ vmbus_chan_run_task(hbus->sc->chan, &task); taskqueue_drain_all(hbus->sc->taskq); } /* * Standard probe entry point. * */ static int vmbus_pcib_probe(device_t dev) { if (VMBUS_PROBE_GUID(device_get_parent(dev), dev, &g_pass_through_dev_type) == 0) { device_set_desc(dev, "Hyper-V PCI Express Pass Through"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } /* * Standard attach entry point. * */ static int vmbus_pcib_attach(device_t dev) { const int pci_ring_size = (4 * PAGE_SIZE); const struct hyperv_guid *inst_guid; struct vmbus_channel *channel; struct vmbus_pcib_softc *sc; struct hv_pcibus *hbus; int rid = 0; int ret; hbus = malloc(sizeof(*hbus), M_DEVBUF, M_WAITOK | M_ZERO); hbus->pcib = dev; channel = vmbus_get_channel(dev); inst_guid = vmbus_chan_guid_inst(channel); hbus->pci_domain = inst_guid->hv_guid[9] | (inst_guid->hv_guid[8] << 8); mtx_init(&hbus->config_lock, "hbcfg", NULL, MTX_DEF); mtx_init(&hbus->device_list_lock, "hbdl", NULL, MTX_DEF); TAILQ_INIT(&hbus->children); TAILQ_INIT(&hbus->dr_list); hbus->cfg_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, RM_MAX_END, PCI_CONFIG_MMIO_LENGTH, RF_ACTIVE | rman_make_alignment_flags(PAGE_SIZE)); if (!hbus->cfg_res) { device_printf(dev, "failed to get resource for cfg window\n"); ret = ENXIO; goto free_bus; } sc = device_get_softc(dev); sc->chan = channel; sc->rx_buf = malloc(PCIB_PACKET_SIZE, M_DEVBUF, M_WAITOK | M_ZERO); sc->hbus = hbus; /* * The taskq is used to handle PCI_BUS_RELATIONS and PCI_EJECT * messages. NB: we can't handle the messages in the channel callback * directly, because the message handlers need to send new messages * to the host and waits for the host's completion messages, which * must also be handled by the channel callback. */ sc->taskq = taskqueue_create("vmbus_pcib_tq", M_WAITOK, taskqueue_thread_enqueue, &sc->taskq); taskqueue_start_threads(&sc->taskq, 1, PI_NET, "vmbus_pcib_tq"); hbus->sc = sc; init_completion(&hbus->query_completion); hbus->query_comp = &hbus->query_completion; ret = vmbus_chan_open(sc->chan, pci_ring_size, pci_ring_size, NULL, 0, vmbus_pcib_on_channel_callback, sc); if (ret) goto free_res; ret = hv_pci_protocol_negotiation(hbus, pci_protocol_versions, ARRAY_SIZE(pci_protocol_versions)); if (ret) goto vmbus_close; ret = hv_pci_query_relations(hbus); if (!ret) ret = wait_for_response(hbus, hbus->query_comp); if (ret) goto vmbus_close; ret = hv_pci_enter_d0(hbus); if (ret) goto vmbus_close; ret = hv_send_resources_allocated(hbus); if (ret) goto vmbus_close; vmbus_pcib_prepopulate_bars(hbus); hbus->pci_bus = device_add_child(dev, "pci", -1); if (!hbus->pci_bus) { device_printf(dev, "failed to create pci bus\n"); ret = ENXIO; goto vmbus_close; } bus_generic_attach(dev); hbus->state = hv_pcibus_installed; return (0); vmbus_close: vmbus_pcib_pre_detach(hbus); vmbus_chan_close(sc->chan); free_res: taskqueue_free(sc->taskq); free_completion(&hbus->query_completion); free(sc->rx_buf, M_DEVBUF); bus_release_resource(dev, SYS_RES_MEMORY, 0, hbus->cfg_res); free_bus: mtx_destroy(&hbus->device_list_lock); mtx_destroy(&hbus->config_lock); free(hbus, M_DEVBUF); return (ret); } /* * Standard detach entry point */ static int vmbus_pcib_detach(device_t dev) { struct vmbus_pcib_softc *sc = device_get_softc(dev); struct hv_pcibus *hbus = sc->hbus; struct pci_message teardown_packet; struct pci_bus_relations relations; int ret; vmbus_pcib_pre_detach(hbus); if (hbus->state == hv_pcibus_installed) bus_generic_detach(dev); /* Delete any children which might still exist. */ memset(&relations, 0, sizeof(relations)); hv_pci_devices_present(hbus, &relations); ret = hv_send_resources_released(hbus); if (ret) device_printf(dev, "failed to send PCI_RESOURCES_RELEASED\n"); teardown_packet.type = PCI_BUS_D0EXIT; ret = vmbus_chan_send(sc->chan, VMBUS_CHANPKT_TYPE_INBAND, 0, &teardown_packet, sizeof(struct pci_message), 0); if (ret) device_printf(dev, "failed to send PCI_BUS_D0EXIT\n"); taskqueue_drain_all(hbus->sc->taskq); vmbus_chan_close(sc->chan); taskqueue_free(sc->taskq); free_completion(&hbus->query_completion); free(sc->rx_buf, M_DEVBUF); bus_release_resource(dev, SYS_RES_MEMORY, 0, hbus->cfg_res); mtx_destroy(&hbus->device_list_lock); mtx_destroy(&hbus->config_lock); free(hbus, M_DEVBUF); return (0); } static int vmbus_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *val) { struct vmbus_pcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *val = sc->hbus->pci_domain; return (0); case PCIB_IVAR_BUS: /* There is only bus 0. */ *val = 0; return (0); } return (ENOENT); } static int vmbus_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t val) { return (ENOENT); } static struct resource * vmbus_pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { unsigned int bar_no; struct hv_pci_dev *hpdev; struct vmbus_pcib_softc *sc = device_get_softc(dev); struct resource *res; unsigned int devfn; if (type == PCI_RES_BUS) return (pci_domain_alloc_bus(sc->hbus->pci_domain, child, rid, start, end, count, flags)); /* Devices with port I/O BAR are not supported. */ if (type == SYS_RES_IOPORT) return (NULL); if (type == SYS_RES_MEMORY) { devfn = PCI_DEVFN(pci_get_slot(child), pci_get_function(child)); hpdev = get_pcichild_wslot(sc->hbus, devfn_to_wslot(devfn)); if (!hpdev) return (NULL); bar_no = PCI_RID2BAR(*rid); if (bar_no >= MAX_NUM_BARS) return (NULL); /* Make sure a 32-bit BAR gets a 32-bit address */ if (!(hpdev->probed_bar[bar_no] & PCIM_BAR_MEM_64)) end = ulmin(end, 0xFFFFFFFF); } res = bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags); /* * If this is a request for a specific range, assume it is * correct and pass it up to the parent. */ if (res == NULL && start + count - 1 == end) res = bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags); if (res == NULL) device_printf(dev, "vmbus_pcib_alloc_resource failed\n"); return (res); } static int vmbus_pcib_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct vmbus_pcib_softc *sc = device_get_softc(dev); if (rman_get_type(r) == PCI_RES_BUS) return (pci_domain_adjust_bus(sc->hbus->pci_domain, child, r, start, end)); return (bus_generic_adjust_resource(dev, child, r, start, end)); } static int vmbus_pcib_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct vmbus_pcib_softc *sc = device_get_softc(dev); if (type == PCI_RES_BUS) return (pci_domain_release_bus(sc->hbus->pci_domain, child, rid, r)); if (type == SYS_RES_IOPORT) return (EINVAL); return (bus_generic_release_resource(dev, child, type, rid, r)); } static int -vmbus_pcib_activate_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +vmbus_pcib_activate_resource(device_t dev, device_t child, struct resource *r) { struct vmbus_pcib_softc *sc = device_get_softc(dev); - if (type == PCI_RES_BUS) + if (rman_get_type(r) == PCI_RES_BUS) return (pci_domain_activate_bus(sc->hbus->pci_domain, child, - rid, r)); - return (bus_generic_activate_resource(dev, child, type, rid, r)); + r)); + return (bus_generic_activate_resource(dev, child, r)); } static int -vmbus_pcib_deactivate_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +vmbus_pcib_deactivate_resource(device_t dev, device_t child, struct resource *r) { struct vmbus_pcib_softc *sc = device_get_softc(dev); - if (type == PCI_RES_BUS) + if (rman_get_type(r) == PCI_RES_BUS) return (pci_domain_deactivate_bus(sc->hbus->pci_domain, child, - rid, r)); - return (bus_generic_deactivate_resource(dev, child, type, rid, r)); + r)); + return (bus_generic_deactivate_resource(dev, child, r)); } static int vmbus_pcib_get_cpus(device_t pcib, device_t dev, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { return (bus_get_cpus(pcib, op, setsize, cpuset)); } static uint32_t vmbus_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct vmbus_pcib_softc *sc = device_get_softc(dev); struct hv_pci_dev *hpdev; unsigned int devfn = PCI_DEVFN(slot, func); uint32_t data = 0; KASSERT(bus == 0, ("bus should be 0, but is %u", bus)); hpdev = get_pcichild_wslot(sc->hbus, devfn_to_wslot(devfn)); if (!hpdev) return (~0); _hv_pcifront_read_config(hpdev, reg, bytes, &data); return (data); } static void vmbus_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { struct vmbus_pcib_softc *sc = device_get_softc(dev); struct hv_pci_dev *hpdev; unsigned int devfn = PCI_DEVFN(slot, func); KASSERT(bus == 0, ("bus should be 0, but is %u", bus)); hpdev = get_pcichild_wslot(sc->hbus, devfn_to_wslot(devfn)); if (!hpdev) return; _hv_pcifront_write_config(hpdev, reg, bytes, data); } static int vmbus_pcib_route_intr(device_t pcib, device_t dev, int pin) { /* We only support MSI/MSI-X and don't support INTx interrupt. */ return (PCI_INVALID_IRQ); } static int vmbus_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs) { #if defined(__amd64__) || defined(__i386__) return (PCIB_ALLOC_MSI(device_get_parent(pcib), dev, count, maxcount, irqs)); #endif #if defined(__aarch64__) return (intr_alloc_msi(pcib, dev, ACPI_MSI_XREF, count, maxcount, irqs)); #endif } static int vmbus_pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs) { #if defined(__amd64__) || defined(__i386__) return (PCIB_RELEASE_MSI(device_get_parent(pcib), dev, count, irqs)); #endif #if defined(__aarch64__) return(intr_release_msi(pcib, dev, ACPI_MSI_XREF, count, irqs)); #endif } static int vmbus_pcib_alloc_msix(device_t pcib, device_t dev, int *irq) { #if defined(__aarch64__) int ret; #if defined(INTRNG) ret = intr_alloc_msix(pcib, dev, ACPI_MSI_XREF, irq); return ret; #else return (ENXIO); #endif #else return (PCIB_ALLOC_MSIX(device_get_parent(pcib), dev, irq)); #endif /* __aarch64__ */ } static int vmbus_pcib_release_msix(device_t pcib, device_t dev, int irq) { #if defined(__aarch64__) return (intr_release_msix(pcib, dev, ACPI_MSI_XREF, irq)); #else return (PCIB_RELEASE_MSIX(device_get_parent(pcib), dev, irq)); #endif /* __aarch64__ */ } #if defined(__aarch64__) #define MSI_INTEL_ADDR_DEST 0x00000000 #define MSI_INTEL_DATA_DELFIXED 0x0 #endif #if defined(__amd64__) || defined(__i386__) #define MSI_INTEL_ADDR_DEST 0x000ff000 #define MSI_INTEL_DATA_INTVEC IOART_INTVEC /* Interrupt vector. */ #define MSI_INTEL_DATA_DELFIXED IOART_DELFIXED #endif static int vmbus_pcib_map_msi(device_t pcib, device_t child, int irq, uint64_t *addr, uint32_t *data) { unsigned int devfn; struct hv_pci_dev *hpdev; uint64_t v_addr; uint32_t v_data; struct hv_irq_desc *hid, *tmp_hid; unsigned int cpu, vcpu_id; unsigned int vector; struct vmbus_pcib_softc *sc = device_get_softc(pcib); struct compose_comp_ctxt comp; struct { struct pci_packet pkt; union { struct pci_create_interrupt v1; struct pci_create_interrupt3 v3; }int_pkts; } ctxt; int ret; uint32_t size; devfn = PCI_DEVFN(pci_get_slot(child), pci_get_function(child)); hpdev = get_pcichild_wslot(sc->hbus, devfn_to_wslot(devfn)); if (!hpdev) return (ENOENT); #if defined(__aarch64__) ret = intr_map_msi(pcib, child, ACPI_MSI_XREF, irq, &v_addr, &v_data); #else ret = PCIB_MAP_MSI(device_get_parent(pcib), child, irq, &v_addr, &v_data); #endif if (ret) return (ret); TAILQ_FOREACH_SAFE(hid, &hpdev->irq_desc_list, link, tmp_hid) { if (hid->irq == irq) { TAILQ_REMOVE(&hpdev->irq_desc_list, hid, link); hv_int_desc_free(hpdev, hid); break; } } #if defined(__aarch64__) cpu = 0; vcpu_id = VMBUS_GET_VCPU_ID(device_get_parent(pcib), pcib, cpu); vector = v_data; #else cpu = apic_cpuid((v_addr & MSI_INTEL_ADDR_DEST) >> 12); vcpu_id = VMBUS_GET_VCPU_ID(device_get_parent(pcib), pcib, cpu); vector = v_data & MSI_INTEL_DATA_INTVEC; #endif if (hpdev->hbus->protocol_version < PCI_PROTOCOL_VERSION_1_4 && vcpu_id > 63) { /* We only support vcpu_id < 64 before vPCI version 1.4 */ device_printf(pcib, "Error: " "vcpu_id %u overflowed on PCI VMBus version 0x%x\n", vcpu_id, hpdev->hbus->protocol_version); return (ENODEV); } init_completion(&comp.comp_pkt.host_event); memset(&ctxt, 0, sizeof(ctxt)); ctxt.pkt.completion_func = hv_pci_compose_compl; ctxt.pkt.compl_ctxt = ∁ switch (hpdev->hbus->protocol_version) { case PCI_PROTOCOL_VERSION_1_1: ctxt.int_pkts.v1.message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; ctxt.int_pkts.v1.wslot.val = hpdev->desc.wslot.val; ctxt.int_pkts.v1.int_desc.vector = vector; ctxt.int_pkts.v1.int_desc.vector_count = 1; ctxt.int_pkts.v1.int_desc.delivery_mode = MSI_INTEL_DATA_DELFIXED; ctxt.int_pkts.v1.int_desc.cpu_mask = 1ULL << vcpu_id; size = sizeof(ctxt.int_pkts.v1); break; case PCI_PROTOCOL_VERSION_1_4: ctxt.int_pkts.v3.message_type.type = PCI_CREATE_INTERRUPT_MESSAGE3; ctxt.int_pkts.v3.wslot.val = hpdev->desc.wslot.val; ctxt.int_pkts.v3.int_desc.vector = vector; ctxt.int_pkts.v3.int_desc.vector_count = 1; ctxt.int_pkts.v3.int_desc.reserved = 0; ctxt.int_pkts.v3.int_desc.delivery_mode = MSI_INTEL_DATA_DELFIXED; ctxt.int_pkts.v3.int_desc.processor_count = 1; ctxt.int_pkts.v3.int_desc.processor_array[0] = vcpu_id; size = sizeof(ctxt.int_pkts.v3); break; } ret = vmbus_chan_send(sc->chan, VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC, &ctxt.int_pkts, size, (uint64_t)(uintptr_t)&ctxt.pkt); if (ret) { free_completion(&comp.comp_pkt.host_event); return (ret); } wait_for_completion(&comp.comp_pkt.host_event); free_completion(&comp.comp_pkt.host_event); if (comp.comp_pkt.completion_status < 0) { device_printf(pcib, "vmbus_pcib_map_msi completion_status %d\n", comp.comp_pkt.completion_status); return (EPROTO); } *addr = comp.int_desc.address; *data = comp.int_desc.data; hid = malloc(sizeof(struct hv_irq_desc), M_DEVBUF, M_WAITOK | M_ZERO); hid->irq = irq; hid->desc = comp.int_desc; TAILQ_INSERT_TAIL(&hpdev->irq_desc_list, hid, link); return (0); } static device_method_t vmbus_pcib_methods[] = { /* Device interface */ DEVMETHOD(device_probe, vmbus_pcib_probe), DEVMETHOD(device_attach, vmbus_pcib_attach), DEVMETHOD(device_detach, vmbus_pcib_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, vmbus_pcib_read_ivar), DEVMETHOD(bus_write_ivar, vmbus_pcib_write_ivar), DEVMETHOD(bus_alloc_resource, vmbus_pcib_alloc_resource), DEVMETHOD(bus_adjust_resource, vmbus_pcib_adjust_resource), DEVMETHOD(bus_release_resource, vmbus_pcib_release_resource), DEVMETHOD(bus_activate_resource, vmbus_pcib_activate_resource), DEVMETHOD(bus_deactivate_resource, vmbus_pcib_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_cpus, vmbus_pcib_get_cpus), /* pcib interface */ DEVMETHOD(pcib_maxslots, pcib_maxslots), DEVMETHOD(pcib_read_config, vmbus_pcib_read_config), DEVMETHOD(pcib_write_config, vmbus_pcib_write_config), DEVMETHOD(pcib_route_interrupt, vmbus_pcib_route_intr), DEVMETHOD(pcib_alloc_msi, vmbus_pcib_alloc_msi), DEVMETHOD(pcib_release_msi, vmbus_pcib_release_msi), DEVMETHOD(pcib_alloc_msix, vmbus_pcib_alloc_msix), DEVMETHOD(pcib_release_msix, vmbus_pcib_release_msix), DEVMETHOD(pcib_map_msi, vmbus_pcib_map_msi), DEVMETHOD(pcib_request_feature, pcib_request_feature_allow), DEVMETHOD_END }; DEFINE_CLASS_0(pcib, vmbus_pcib_driver, vmbus_pcib_methods, sizeof(struct vmbus_pcib_softc)); DRIVER_MODULE(vmbus_pcib, vmbus, vmbus_pcib_driver, 0, 0); MODULE_DEPEND(vmbus_pcib, vmbus, 1, 1, 1); MODULE_DEPEND(vmbus_pcib, pci, 1, 1, 1); #endif /* NEW_PCIB */ diff --git a/sys/dev/ofw/ofw_pcib.c b/sys/dev/ofw/ofw_pcib.c index 4f373030dc33..f9f17b21cdc2 100644 --- a/sys/dev/ofw/ofw_pcib.c +++ b/sys/dev/ofw/ofw_pcib.c @@ -1,759 +1,750 @@ /*- * Copyright (c) 2011 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" /* * If it is necessary to set another value of this for * some platforms it should be set at fdt.h file */ #ifndef PCI_MAP_INTR #define PCI_MAP_INTR 4 #endif #define PCI_INTR_PINS 4 /* * bus interface. */ static struct rman *ofw_pcib_get_rman(device_t, int, u_int); static struct resource * ofw_pcib_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int ofw_pcib_release_resource(device_t, device_t, int, int, struct resource *); -static int ofw_pcib_activate_resource(device_t, device_t, int, int, - struct resource *); -static int ofw_pcib_deactivate_resource(device_t, device_t, int, int, - struct resource *); +static int ofw_pcib_activate_resource(device_t, device_t, struct resource *); +static int ofw_pcib_deactivate_resource(device_t, device_t, struct resource *); static int ofw_pcib_adjust_resource(device_t, device_t, struct resource *, rman_res_t, rman_res_t); static int ofw_pcib_map_resource(device_t, device_t, struct resource *, struct resource_map_request *, struct resource_map *); static int ofw_pcib_unmap_resource(device_t, device_t, struct resource *, struct resource_map *); static int ofw_pcib_translate_resource(device_t bus, int type, rman_res_t start, rman_res_t *newstart); #ifdef __powerpc__ static bus_space_tag_t ofw_pcib_bus_get_bus_tag(device_t, device_t); #endif /* * pcib interface */ static int ofw_pcib_maxslots(device_t); /* * ofw_bus interface */ static phandle_t ofw_pcib_get_node(device_t, device_t); /* * local methods */ static int ofw_pcib_fill_ranges(phandle_t, struct ofw_pci_range *); /* * Driver methods. */ static device_method_t ofw_pcib_methods[] = { /* Device interface */ DEVMETHOD(device_attach, ofw_pcib_attach), /* Bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_read_ivar, ofw_pcib_read_ivar), DEVMETHOD(bus_write_ivar, ofw_pcib_write_ivar), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_rman, ofw_pcib_get_rman), DEVMETHOD(bus_alloc_resource, ofw_pcib_alloc_resource), DEVMETHOD(bus_release_resource, ofw_pcib_release_resource), DEVMETHOD(bus_activate_resource, ofw_pcib_activate_resource), DEVMETHOD(bus_deactivate_resource, ofw_pcib_deactivate_resource), DEVMETHOD(bus_adjust_resource, ofw_pcib_adjust_resource), DEVMETHOD(bus_map_resource, ofw_pcib_map_resource), DEVMETHOD(bus_unmap_resource, ofw_pcib_unmap_resource), DEVMETHOD(bus_translate_resource, ofw_pcib_translate_resource), #ifdef __powerpc__ DEVMETHOD(bus_get_bus_tag, ofw_pcib_bus_get_bus_tag), #endif /* pcib interface */ DEVMETHOD(pcib_maxslots, ofw_pcib_maxslots), DEVMETHOD(pcib_route_interrupt, ofw_pcib_route_interrupt), DEVMETHOD(pcib_request_feature, pcib_request_feature_allow), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_node, ofw_pcib_get_node), DEVMETHOD_END }; DEFINE_CLASS_0(ofw_pcib, ofw_pcib_driver, ofw_pcib_methods, 0); int ofw_pcib_init(device_t dev) { struct ofw_pci_softc *sc; phandle_t node; u_int32_t busrange[2]; struct ofw_pci_range *rp; int i, error; struct ofw_pci_cell_info *cell_info; node = ofw_bus_get_node(dev); sc = device_get_softc(dev); sc->sc_initialized = 1; sc->sc_range = NULL; sc->sc_pci_domain = device_get_unit(dev); cell_info = (struct ofw_pci_cell_info *)malloc(sizeof(*cell_info), M_DEVBUF, M_WAITOK | M_ZERO); sc->sc_cell_info = cell_info; if (OF_getencprop(node, "bus-range", busrange, sizeof(busrange)) != 8) busrange[0] = 0; sc->sc_dev = dev; sc->sc_node = node; sc->sc_bus = busrange[0]; if (sc->sc_quirks & OFW_PCI_QUIRK_RANGES_ON_CHILDREN) { phandle_t c; int n, i; sc->sc_nrange = 0; for (c = OF_child(node); c != 0; c = OF_peer(c)) { n = ofw_pcib_nranges(c, cell_info); if (n > 0) sc->sc_nrange += n; } if (sc->sc_nrange == 0) { error = ENXIO; goto out; } sc->sc_range = malloc(sc->sc_nrange * sizeof(sc->sc_range[0]), M_DEVBUF, M_WAITOK); i = 0; for (c = OF_child(node); c != 0; c = OF_peer(c)) { n = ofw_pcib_fill_ranges(c, &sc->sc_range[i]); if (n > 0) i += n; } KASSERT(i == sc->sc_nrange, ("range count mismatch")); } else { sc->sc_nrange = ofw_pcib_nranges(node, cell_info); if (sc->sc_nrange <= 0) { device_printf(dev, "could not getranges\n"); error = ENXIO; goto out; } sc->sc_range = malloc(sc->sc_nrange * sizeof(sc->sc_range[0]), M_DEVBUF, M_WAITOK); ofw_pcib_fill_ranges(node, sc->sc_range); } sc->sc_io_rman.rm_type = RMAN_ARRAY; sc->sc_io_rman.rm_descr = "PCI I/O Ports"; error = rman_init(&sc->sc_io_rman); if (error != 0) { device_printf(dev, "rman_init() failed. error = %d\n", error); goto out; } sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "PCI Non Prefetchable Memory"; error = rman_init(&sc->sc_mem_rman); if (error != 0) { device_printf(dev, "rman_init() failed. error = %d\n", error); goto out_mem_rman; } sc->sc_pmem_rman.rm_type = RMAN_ARRAY; sc->sc_pmem_rman.rm_descr = "PCI Prefetchable Memory"; error = rman_init(&sc->sc_pmem_rman); if (error != 0) { device_printf(dev, "rman_init() failed. error = %d\n", error); goto out_pmem_rman; } for (i = 0; i < sc->sc_nrange; i++) { error = 0; rp = sc->sc_range + i; if (sc->sc_range_mask & ((uint64_t)1 << i)) continue; switch (rp->pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) { case OFW_PCI_PHYS_HI_SPACE_CONFIG: break; case OFW_PCI_PHYS_HI_SPACE_IO: error = rman_manage_region(&sc->sc_io_rman, rp->pci, rp->pci + rp->size - 1); break; case OFW_PCI_PHYS_HI_SPACE_MEM32: case OFW_PCI_PHYS_HI_SPACE_MEM64: if (rp->pci_hi & OFW_PCI_PHYS_HI_PREFETCHABLE) { sc->sc_have_pmem = 1; error = rman_manage_region(&sc->sc_pmem_rman, rp->pci, rp->pci + rp->size - 1); } else { error = rman_manage_region(&sc->sc_mem_rman, rp->pci, rp->pci + rp->size - 1); } break; } if (error != 0) { device_printf(dev, "rman_manage_region(%x, %#jx, %#jx) failed. " "error = %d\n", rp->pci_hi & OFW_PCI_PHYS_HI_SPACEMASK, rp->pci, rp->pci + rp->size - 1, error); goto out_full; } } ofw_bus_setup_iinfo(node, &sc->sc_pci_iinfo, sizeof(cell_t)); return (0); out_full: rman_fini(&sc->sc_pmem_rman); out_pmem_rman: rman_fini(&sc->sc_mem_rman); out_mem_rman: rman_fini(&sc->sc_io_rman); out: free(sc->sc_cell_info, M_DEVBUF); free(sc->sc_range, M_DEVBUF); return (error); } void ofw_pcib_fini(device_t dev) { struct ofw_pci_softc *sc; sc = device_get_softc(dev); free(sc->sc_cell_info, M_DEVBUF); free(sc->sc_range, M_DEVBUF); rman_fini(&sc->sc_io_rman); rman_fini(&sc->sc_mem_rman); rman_fini(&sc->sc_pmem_rman); } int ofw_pcib_attach(device_t dev) { struct ofw_pci_softc *sc; int error; sc = device_get_softc(dev); if (!sc->sc_initialized) { error = ofw_pcib_init(dev); if (error != 0) return (error); } device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int ofw_pcib_maxslots(device_t dev) { return (PCI_SLOTMAX); } int ofw_pcib_route_interrupt(device_t bus, device_t dev, int pin) { struct ofw_pci_softc *sc; struct ofw_pci_register reg; uint32_t pintr, mintr[PCI_MAP_INTR]; int intrcells; phandle_t iparent; sc = device_get_softc(bus); pintr = pin; /* Fabricate imap information in case this isn't an OFW device */ bzero(®, sizeof(reg)); reg.phys_hi = (pci_get_bus(dev) << OFW_PCI_PHYS_HI_BUSSHIFT) | (pci_get_slot(dev) << OFW_PCI_PHYS_HI_DEVICESHIFT) | (pci_get_function(dev) << OFW_PCI_PHYS_HI_FUNCTIONSHIFT); intrcells = ofw_bus_lookup_imap(ofw_bus_get_node(dev), &sc->sc_pci_iinfo, ®, sizeof(reg), &pintr, sizeof(pintr), mintr, sizeof(mintr), &iparent); if (intrcells != 0) { pintr = ofw_bus_map_intr(dev, iparent, intrcells, mintr); return (pintr); } /* * Maybe it's a real interrupt, not an intpin */ if (pin > PCI_INTR_PINS) return (pin); device_printf(bus, "could not route pin %d for device %d.%d\n", pin, pci_get_slot(dev), pci_get_function(dev)); return (PCI_INVALID_IRQ); } int ofw_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct ofw_pci_softc *sc; sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = sc->sc_pci_domain; return (0); case PCIB_IVAR_BUS: *result = sc->sc_bus; return (0); default: break; } return (ENOENT); } int ofw_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct ofw_pci_softc *sc; sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_bus = value; return (0); default: break; } return (ENOENT); } int ofw_pcib_nranges(phandle_t node, struct ofw_pci_cell_info *info) { ssize_t nbase_ranges; if (info == NULL) return (-1); info->host_address_cells = 1; info->size_cells = 2; info->pci_address_cell = 3; OF_getencprop(OF_parent(node), "#address-cells", &(info->host_address_cells), sizeof(info->host_address_cells)); OF_getencprop(node, "#address-cells", &(info->pci_address_cell), sizeof(info->pci_address_cell)); OF_getencprop(node, "#size-cells", &(info->size_cells), sizeof(info->size_cells)); nbase_ranges = OF_getproplen(node, "ranges"); if (nbase_ranges <= 0) return (-1); return (nbase_ranges / sizeof(cell_t) / (info->pci_address_cell + info->host_address_cells + info->size_cells)); } static struct resource * ofw_pcib_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct ofw_pci_softc *sc; sc = device_get_softc(bus); #endif switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_alloc_bus(sc->sc_pci_domain, child, rid, start, end, count, flags)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_alloc_resource(bus, child, type, rid, start, end, count, flags)); default: return (bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags)); } } static int ofw_pcib_release_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct ofw_pci_softc *sc; sc = device_get_softc(bus); #endif switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_release_bus(sc->sc_pci_domain, child, rid, res)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_release_resource(bus, child, type, rid, res)); default: return (bus_generic_release_resource(bus, child, type, rid, res)); } } static int ofw_pcib_translate_resource(device_t bus, int type, rman_res_t start, rman_res_t *newstart) { struct ofw_pci_softc *sc; struct ofw_pci_range *rp; int space; sc = device_get_softc(bus); /* * Map this through the ranges list */ for (rp = sc->sc_range; rp < sc->sc_range + sc->sc_nrange && rp->pci_hi != 0; rp++) { if (start < rp->pci || start >= rp->pci + rp->size) continue; switch (rp->pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) { case OFW_PCI_PHYS_HI_SPACE_IO: space = SYS_RES_IOPORT; break; case OFW_PCI_PHYS_HI_SPACE_MEM32: case OFW_PCI_PHYS_HI_SPACE_MEM64: space = SYS_RES_MEMORY; break; default: space = -1; } if (type == space) { start += (rp->host - rp->pci); *newstart = start; return (0); } } return (ENOENT); } static int -ofw_pcib_activate_resource(device_t bus, device_t child, int type, int rid, - struct resource *res) +ofw_pcib_activate_resource(device_t bus, device_t child, struct resource *res) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct ofw_pci_softc *sc; sc = device_get_softc(bus); #endif - switch (type) { + switch (rman_get_type(res)) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: - return (pci_domain_activate_bus(sc->sc_pci_domain, child, rid, - res)); + return (pci_domain_activate_bus(sc->sc_pci_domain, child, res)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: - return (bus_generic_rman_activate_resource(bus, child, type, rid, - res)); + return (bus_generic_rman_activate_resource(bus, child, res)); default: - return (bus_generic_activate_resource(bus, child, type, rid, - res)); + return (bus_generic_activate_resource(bus, child, res)); } } static int ofw_pcib_map_resource(device_t dev, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; struct ofw_pci_softc *sc; struct ofw_pci_range *rp; rman_res_t length, start; int error, space; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); /* * Map this through the ranges list */ sc = device_get_softc(dev); for (rp = sc->sc_range; rp < sc->sc_range + sc->sc_nrange && rp->pci_hi != 0; rp++) { if (start < rp->pci || start >= rp->pci + rp->size) continue; switch (rp->pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) { case OFW_PCI_PHYS_HI_SPACE_IO: space = SYS_RES_IOPORT; break; case OFW_PCI_PHYS_HI_SPACE_MEM32: case OFW_PCI_PHYS_HI_SPACE_MEM64: space = SYS_RES_MEMORY; break; default: space = -1; } if (rman_get_type(r) == space) { start += (rp->host - rp->pci); break; } } if (bootverbose) printf("ofw_pci mapdev: start %jx, len %jd\n", start, length); map->r_bustag = BUS_GET_BUS_TAG(child, child); if (map->r_bustag == NULL) return (ENOMEM); error = bus_space_map(map->r_bustag, start, length, 0, &map->r_bushandle); if (error != 0) return (error); /* XXX for powerpc only? */ map->r_vaddr = (void *)map->r_bushandle; map->r_size = length; return (0); } static int ofw_pcib_unmap_resource(device_t dev, device_t child, struct resource *r, struct resource_map *map) { switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: bus_space_unmap(map->r_bustag, map->r_bushandle, map->r_size); return (0); default: return (EINVAL); } } #ifdef __powerpc__ static bus_space_tag_t ofw_pcib_bus_get_bus_tag(device_t bus, device_t child) { return (&bs_le_tag); } #endif static int -ofw_pcib_deactivate_resource(device_t bus, device_t child, int type, int rid, - struct resource *res) +ofw_pcib_deactivate_resource(device_t bus, device_t child, struct resource *res) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct ofw_pci_softc *sc; sc = device_get_softc(bus); #endif - switch (type) { + switch (rman_get_type(res)) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: - return (pci_domain_deactivate_bus(sc->sc_pci_domain, child, rid, + return (pci_domain_deactivate_bus(sc->sc_pci_domain, child, res)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: - return (bus_generic_rman_deactivate_resource(bus, child, type, - rid, res)); + return (bus_generic_rman_deactivate_resource(bus, child, res)); default: - return (bus_generic_deactivate_resource(bus, child, type, rid, - res)); + return (bus_generic_deactivate_resource(bus, child, res)); } } static int ofw_pcib_adjust_resource(device_t bus, device_t child, struct resource *res, rman_res_t start, rman_res_t end) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct ofw_pci_softc *sc; sc = device_get_softc(bus); #endif switch (rman_get_type(res)) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_adjust_bus(sc->sc_pci_domain, child, res, start, end)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_adjust_resource(bus, child, res, start, end)); default: return (bus_generic_adjust_resource(bus, child, res, start, end)); } } static phandle_t ofw_pcib_get_node(device_t bus, device_t dev) { struct ofw_pci_softc *sc; sc = device_get_softc(bus); /* We only have one child, the PCI bus, which needs our own node. */ return (sc->sc_node); } static int ofw_pcib_fill_ranges(phandle_t node, struct ofw_pci_range *ranges) { int host_address_cells = 1, pci_address_cells = 3, size_cells = 2; cell_t *base_ranges; ssize_t nbase_ranges; int nranges; int i, j, k; OF_getencprop(OF_parent(node), "#address-cells", &host_address_cells, sizeof(host_address_cells)); OF_getencprop(node, "#address-cells", &pci_address_cells, sizeof(pci_address_cells)); OF_getencprop(node, "#size-cells", &size_cells, sizeof(size_cells)); nbase_ranges = OF_getproplen(node, "ranges"); if (nbase_ranges <= 0) return (-1); nranges = nbase_ranges / sizeof(cell_t) / (pci_address_cells + host_address_cells + size_cells); base_ranges = malloc(nbase_ranges, M_DEVBUF, M_WAITOK); OF_getencprop(node, "ranges", base_ranges, nbase_ranges); for (i = 0, j = 0; i < nranges; i++) { ranges[i].pci_hi = base_ranges[j++]; ranges[i].pci = 0; for (k = 0; k < pci_address_cells - 1; k++) { ranges[i].pci <<= 32; ranges[i].pci |= base_ranges[j++]; } ranges[i].host = 0; for (k = 0; k < host_address_cells; k++) { ranges[i].host <<= 32; ranges[i].host |= base_ranges[j++]; } ranges[i].size = 0; for (k = 0; k < size_cells; k++) { ranges[i].size <<= 32; ranges[i].size |= base_ranges[j++]; } } free(base_ranges, M_DEVBUF); return (nranges); } static struct rman * ofw_pcib_get_rman(device_t bus, int type, u_int flags) { struct ofw_pci_softc *sc; sc = device_get_softc(bus); switch (type) { case SYS_RES_IOPORT: return (&sc->sc_io_rman); case SYS_RES_MEMORY: if (sc->sc_have_pmem && (flags & RF_PREFETCHABLE)) return (&sc->sc_pmem_rman); else return (&sc->sc_mem_rman); default: break; } return (NULL); } diff --git a/sys/dev/pccbb/pccbb.c b/sys/dev/pccbb/pccbb.c index b2a94cc04bb0..302b9d75c864 100644 --- a/sys/dev/pccbb/pccbb.c +++ b/sys/dev/pccbb/pccbb.c @@ -1,1582 +1,1577 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2000-2001 Jonathan Chen All rights reserved. * Copyright (c) 2002-2004 M. Warner Losh * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /*- * Copyright (c) 1998, 1999 and 2000 * HAYAKAWA Koichi. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by HAYAKAWA Koichi. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Driver for PCI to CardBus Bridge chips * * References: * TI Datasheets: * http://www-s.ti.com/cgi-bin/sc/generic2.cgi?family=PCI+CARDBUS+CONTROLLERS * * Written by Jonathan Chen * The author would like to acknowledge: * * HAYAKAWA Koichi: Author of the NetBSD code for the same thing * * Warner Losh: Newbus/newcard guru and author of the pccard side of things * * YAMAMOTO Shigeru: Author of another FreeBSD cardbus driver * * David Cross: Author of the initial ugly hack for a specific cardbus card */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "power_if.h" #include "card_if.h" #include "pcib_if.h" #define DPRINTF(x) do { if (cbb_debug) printf x; } while (0) #define DEVPRINTF(x) do { if (cbb_debug) device_printf x; } while (0) #define PCI_MASK_CONFIG(DEV,REG,MASK,SIZE) \ pci_write_config(DEV, REG, pci_read_config(DEV, REG, SIZE) MASK, SIZE) #define PCI_MASK2_CONFIG(DEV,REG,MASK1,MASK2,SIZE) \ pci_write_config(DEV, REG, ( \ pci_read_config(DEV, REG, SIZE) MASK1) MASK2, SIZE) #define CBB_CARD_PRESENT(s) ((s & CBB_STATE_CD) == 0) #define CBB_START_MEM 0x88000000 #define CBB_START_32_IO 0x1000 #define CBB_START_16_IO 0x100 /* sysctl vars */ static SYSCTL_NODE(_hw, OID_AUTO, cbb, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CBB parameters"); /* There's no way to say TUNEABLE_LONG to get the right types */ u_long cbb_start_mem = CBB_START_MEM; SYSCTL_ULONG(_hw_cbb, OID_AUTO, start_memory, CTLFLAG_RWTUN, &cbb_start_mem, CBB_START_MEM, "Starting address for memory allocations"); u_long cbb_start_16_io = CBB_START_16_IO; SYSCTL_ULONG(_hw_cbb, OID_AUTO, start_16_io, CTLFLAG_RWTUN, &cbb_start_16_io, CBB_START_16_IO, "Starting ioport for 16-bit cards"); u_long cbb_start_32_io = CBB_START_32_IO; SYSCTL_ULONG(_hw_cbb, OID_AUTO, start_32_io, CTLFLAG_RWTUN, &cbb_start_32_io, CBB_START_32_IO, "Starting ioport for 32-bit cards"); int cbb_debug = 0; SYSCTL_INT(_hw_cbb, OID_AUTO, debug, CTLFLAG_RWTUN, &cbb_debug, 0, "Verbose cardbus bridge debugging"); static void cbb_insert(struct cbb_softc *sc); static void cbb_removal(struct cbb_softc *sc); static uint32_t cbb_detect_voltage(device_t brdev); static int cbb_cardbus_reset_power(device_t brdev, device_t child, int on); static int cbb_cardbus_io_open(device_t brdev, int win, uint32_t start, uint32_t end); static int cbb_cardbus_mem_open(device_t brdev, int win, uint32_t start, uint32_t end); static void cbb_cardbus_auto_open(struct cbb_softc *sc, int type); static int cbb_cardbus_activate_resource(device_t brdev, device_t child, - int type, int rid, struct resource *res); + struct resource *res); static int cbb_cardbus_deactivate_resource(device_t brdev, - device_t child, int type, int rid, struct resource *res); + device_t child, struct resource *res); static struct resource *cbb_cardbus_alloc_resource(device_t brdev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); static int cbb_cardbus_release_resource(device_t brdev, device_t child, int type, int rid, struct resource *res); static int cbb_cardbus_power_enable_socket(device_t brdev, device_t child); static int cbb_cardbus_power_disable_socket(device_t brdev, device_t child); static int cbb_func_filt(void *arg); static void cbb_func_intr(void *arg); static void cbb_remove_res(struct cbb_softc *sc, struct resource *res) { struct cbb_reslist *rle; SLIST_FOREACH(rle, &sc->rl, link) { if (rle->res == res) { SLIST_REMOVE(&sc->rl, rle, cbb_reslist, link); free(rle, M_DEVBUF); return; } } } static struct resource * cbb_find_res(struct cbb_softc *sc, int type, int rid) { struct cbb_reslist *rle; SLIST_FOREACH(rle, &sc->rl, link) if (SYS_RES_MEMORY == rle->type && rid == rle->rid) return (rle->res); return (NULL); } static void cbb_insert_res(struct cbb_softc *sc, struct resource *res, int type, int rid) { struct cbb_reslist *rle; /* * Need to record allocated resource so we can iterate through * it later. */ rle = malloc(sizeof(struct cbb_reslist), M_DEVBUF, M_NOWAIT); if (rle == NULL) panic("cbb_cardbus_alloc_resource: can't record entry!"); rle->res = res; rle->type = type; rle->rid = rid; SLIST_INSERT_HEAD(&sc->rl, rle, link); } static void cbb_destroy_res(struct cbb_softc *sc) { struct cbb_reslist *rle; while ((rle = SLIST_FIRST(&sc->rl)) != NULL) { device_printf(sc->dev, "Danger Will Robinson: Resource " "left allocated! This is a bug... " "(rid=%x, type=%d, addr=%jx)\n", rle->rid, rle->type, rman_get_start(rle->res)); SLIST_REMOVE_HEAD(&sc->rl, link); free(rle, M_DEVBUF); } } /* * Disable function interrupts by telling the bridge to generate IRQ1 * interrupts. These interrupts aren't really generated by the chip, since * IRQ1 is reserved. Some chipsets assert INTA# inappropriately during * initialization, so this helps to work around the problem. * * XXX We can't do this workaround for all chipsets, because this * XXX causes interference with the keyboard because somechipsets will * XXX actually signal IRQ1 over their serial interrupt connections to * XXX the south bridge. Disable it it for now. */ void cbb_disable_func_intr(struct cbb_softc *sc) { #if 0 uint8_t reg; reg = (exca_getb(&sc->exca, EXCA_INTR) & ~EXCA_INTR_IRQ_MASK) | EXCA_INTR_IRQ_RESERVED1; exca_putb(&sc->exca, EXCA_INTR, reg); #endif } /* * Enable function interrupts. We turn on function interrupts when the card * requests an interrupt. The PCMCIA standard says that we should set * the lower 4 bits to 0 to route via PCI. Note: we call this for both * CardBus and R2 (PC Card) cases, but it should have no effect on CardBus * cards. */ static void cbb_enable_func_intr(struct cbb_softc *sc) { uint8_t reg; reg = (exca_getb(&sc->exca, EXCA_INTR) & ~EXCA_INTR_IRQ_MASK) | EXCA_INTR_IRQ_NONE; PCI_MASK_CONFIG(sc->dev, CBBR_BRIDGECTRL, & ~CBBM_BRIDGECTRL_INTR_IREQ_ISA_EN, 2); exca_putb(&sc->exca, EXCA_INTR, reg); } int cbb_detach(device_t brdev) { struct cbb_softc *sc = device_get_softc(brdev); device_t *devlist; int tmp, tries, error, numdevs; /* * Before we delete the children (which we have to do because * attach doesn't check for children busses correctly), we have * to detach the children. Even if we didn't need to delete the * children, we have to detach them. */ error = bus_generic_detach(brdev); if (error != 0) return (error); /* * Since the attach routine doesn't search for children before it * attaches them to this device, we must delete them here in order * for the kldload/unload case to work. If we failed to do that, then * we'd get duplicate devices when cbb.ko was reloaded. */ tries = 10; do { error = device_get_children(brdev, &devlist, &numdevs); if (error == 0) break; /* * Try hard to cope with low memory. */ if (error == ENOMEM) { pause("cbbnomem", 1); continue; } } while (tries-- > 0); for (tmp = 0; tmp < numdevs; tmp++) device_delete_child(brdev, devlist[tmp]); free(devlist, M_TEMP); /* Turn off the interrupts */ cbb_set(sc, CBB_SOCKET_MASK, 0); /* reset 16-bit pcmcia bus */ exca_clrb(&sc->exca, EXCA_INTR, EXCA_INTR_RESET); /* turn off power */ cbb_power(brdev, CARD_OFF); /* Ack the interrupt */ cbb_set(sc, CBB_SOCKET_EVENT, 0xffffffff); /* * Wait for the thread to die. kproc_exit will do a wakeup * on the event thread's struct proc * so that we know it is * safe to proceed. IF the thread is running, set the please * die flag and wait for it to comply. Since the wakeup on * the event thread happens only in kproc_exit, we don't * need to loop here. */ bus_teardown_intr(brdev, sc->irq_res, sc->intrhand); mtx_lock(&sc->mtx); sc->flags |= CBB_KTHREAD_DONE; while (sc->flags & CBB_KTHREAD_RUNNING) { DEVPRINTF((sc->dev, "Waiting for thread to die\n")); wakeup(&sc->intrhand); msleep(sc->event_thread, &sc->mtx, PWAIT, "cbbun", 0); } mtx_unlock(&sc->mtx); bus_release_resource(brdev, SYS_RES_IRQ, 0, sc->irq_res); bus_release_resource(brdev, SYS_RES_MEMORY, CBBR_SOCKBASE, sc->base_res); mtx_destroy(&sc->mtx); return (0); } int cbb_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) { struct cbb_intrhand *ih; struct cbb_softc *sc = device_get_softc(dev); int err; if (filt == NULL && intr == NULL) return (EINVAL); ih = malloc(sizeof(struct cbb_intrhand), M_DEVBUF, M_NOWAIT); if (ih == NULL) return (ENOMEM); *cookiep = ih; ih->filt = filt; ih->intr = intr; ih->arg = arg; ih->sc = sc; /* * XXX need to turn on ISA interrupts, if we ever support them, but * XXX for now that's all we need to do. */ err = BUS_SETUP_INTR(device_get_parent(dev), child, irq, flags, filt ? cbb_func_filt : NULL, intr ? cbb_func_intr : NULL, ih, &ih->cookie); if (err != 0) { free(ih, M_DEVBUF); return (err); } cbb_enable_func_intr(sc); sc->cardok = 1; return 0; } int cbb_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct cbb_intrhand *ih; int err; /* XXX Need to do different things for ISA interrupts. */ ih = (struct cbb_intrhand *) cookie; err = BUS_TEARDOWN_INTR(device_get_parent(dev), child, irq, ih->cookie); if (err != 0) return (err); free(ih, M_DEVBUF); return (0); } void cbb_driver_added(device_t brdev, driver_t *driver) { struct cbb_softc *sc = device_get_softc(brdev); device_t *devlist; device_t dev; int tmp; int numdevs; int wake = 0; DEVICE_IDENTIFY(driver, brdev); tmp = device_get_children(brdev, &devlist, &numdevs); if (tmp != 0) { device_printf(brdev, "Cannot get children list, no reprobe\n"); return; } for (tmp = 0; tmp < numdevs; tmp++) { dev = devlist[tmp]; if (device_get_state(dev) == DS_NOTPRESENT && device_probe_and_attach(dev) == 0) wake++; } free(devlist, M_TEMP); if (wake > 0) wakeup(&sc->intrhand); } void cbb_child_detached(device_t brdev, device_t child) { struct cbb_softc *sc = device_get_softc(brdev); /* I'm not sure we even need this */ if (child != sc->cbdev && child != sc->exca.pccarddev) device_printf(brdev, "Unknown child detached: %s\n", device_get_nameunit(child)); } /************************************************************************/ /* Kthreads */ /************************************************************************/ void cbb_event_thread(void *arg) { struct cbb_softc *sc = arg; uint32_t status; int err; int not_a_card = 0; /* * We need to act as a power sequencer on startup. Delay 2s/channel * to ensure the other channels have had a chance to come up. We likely * should add a lock that's shared on a per-slot basis so that only * one power event can happen per slot at a time. */ pause("cbbstart", hz * device_get_unit(sc->dev) * 2); mtx_lock(&sc->mtx); sc->flags |= CBB_KTHREAD_RUNNING; while ((sc->flags & CBB_KTHREAD_DONE) == 0) { mtx_unlock(&sc->mtx); status = cbb_get(sc, CBB_SOCKET_STATE); DPRINTF(("Status is 0x%x\n", status)); if (!CBB_CARD_PRESENT(status)) { not_a_card = 0; /* We know card type */ cbb_removal(sc); } else if (status & CBB_STATE_NOT_A_CARD) { /* * Up to 10 times, try to rescan the card when we see * NOT_A_CARD. 10 is somehwat arbitrary. When this * pathology hits, there's a ~40% chance each try will * fail. 10 tries takes about 5s and results in a * 99.99% certainty of the results. */ if (not_a_card++ < 10) { DEVPRINTF((sc->dev, "Not a card bit set, rescanning\n")); cbb_setb(sc, CBB_SOCKET_FORCE, CBB_FORCE_CV_TEST); } else { device_printf(sc->dev, "Can't determine card type\n"); } } else { not_a_card = 0; /* We know card type */ cbb_insert(sc); } /* * First time through we need to tell mountroot that we're * done. */ if (sc->sc_root_token) { root_mount_rel(sc->sc_root_token); sc->sc_root_token = NULL; } /* * Wait until it has been 250ms since the last time we * get an interrupt. We handle the rest of the interrupt * at the top of the loop. Although we clear the bit in the * ISR, we signal sc->cv from the detach path after we've * set the CBB_KTHREAD_DONE bit, so we can't do a simple * 250ms sleep here. * * In our ISR, we turn off the card changed interrupt. Turn * them back on here before we wait for them to happen. We * turn them on/off so that we can tolerate a large latency * between the time we signal cbb_event_thread and it gets * a chance to run. */ mtx_lock(&sc->mtx); cbb_setb(sc, CBB_SOCKET_MASK, CBB_SOCKET_MASK_CD | CBB_SOCKET_MASK_CSTS); msleep(&sc->intrhand, &sc->mtx, 0, "-", 0); err = 0; while (err != EWOULDBLOCK && (sc->flags & CBB_KTHREAD_DONE) == 0) err = msleep(&sc->intrhand, &sc->mtx, 0, "-", hz / 5); } DEVPRINTF((sc->dev, "Thread terminating\n")); sc->flags &= ~CBB_KTHREAD_RUNNING; mtx_unlock(&sc->mtx); kproc_exit(0); } /************************************************************************/ /* Insert/removal */ /************************************************************************/ static void cbb_insert(struct cbb_softc *sc) { uint32_t sockevent, sockstate; sockevent = cbb_get(sc, CBB_SOCKET_EVENT); sockstate = cbb_get(sc, CBB_SOCKET_STATE); DEVPRINTF((sc->dev, "card inserted: event=0x%08x, state=%08x\n", sockevent, sockstate)); if (sockstate & CBB_STATE_R2_CARD) { if (device_is_attached(sc->exca.pccarddev)) { sc->flags |= CBB_16BIT_CARD; exca_insert(&sc->exca); } else { device_printf(sc->dev, "16-bit card inserted, but no pccard bus.\n"); } } else if (sockstate & CBB_STATE_CB_CARD) { if (device_is_attached(sc->cbdev)) { sc->flags &= ~CBB_16BIT_CARD; CARD_ATTACH_CARD(sc->cbdev); } else { device_printf(sc->dev, "CardBus card inserted, but no cardbus bus.\n"); } } else { /* * We should power the card down, and try again a couple of * times if this happens. XXX */ device_printf(sc->dev, "Unsupported card type detected\n"); } } static void cbb_removal(struct cbb_softc *sc) { sc->cardok = 0; if (sc->flags & CBB_16BIT_CARD) { exca_removal(&sc->exca); } else { if (device_is_attached(sc->cbdev)) CARD_DETACH_CARD(sc->cbdev); } cbb_destroy_res(sc); } /************************************************************************/ /* Interrupt Handler */ /************************************************************************/ static int cbb_func_filt(void *arg) { struct cbb_intrhand *ih = (struct cbb_intrhand *)arg; struct cbb_softc *sc = ih->sc; /* * Make sure that the card is really there. */ if (!sc->cardok) return (FILTER_STRAY); if (!CBB_CARD_PRESENT(cbb_get(sc, CBB_SOCKET_STATE))) { sc->cardok = 0; return (FILTER_HANDLED); } return ((*ih->filt)(ih->arg)); } static void cbb_func_intr(void *arg) { struct cbb_intrhand *ih = (struct cbb_intrhand *)arg; struct cbb_softc *sc = ih->sc; /* * While this check may seem redundant, it helps close a race * condition. If the card is ejected after the filter runs, but * before this ISR can be scheduled, then we need to do the same * filtering to prevent the card's ISR from being called. One could * argue that the card's ISR should be able to cope, but experience * has shown they can't always. This mitigates the problem by making * the race quite a bit smaller. Properly written client ISRs should * cope with the card going away in the middle of the ISR. We assume * that drivers that are sophisticated enough to use filters don't * need our protection. This also allows us to ensure they *ARE* * called if their filter said they needed to be called. */ if (ih->filt == NULL) { if (!sc->cardok) return; if (!CBB_CARD_PRESENT(cbb_get(sc, CBB_SOCKET_STATE))) { sc->cardok = 0; return; } } ih->intr(ih->arg); } /************************************************************************/ /* Generic Power functions */ /************************************************************************/ static uint32_t cbb_detect_voltage(device_t brdev) { struct cbb_softc *sc = device_get_softc(brdev); uint32_t psr; uint32_t vol = CARD_UKN_CARD; psr = cbb_get(sc, CBB_SOCKET_STATE); if (psr & CBB_STATE_5VCARD && psr & CBB_STATE_5VSOCK) vol |= CARD_5V_CARD; if (psr & CBB_STATE_3VCARD && psr & CBB_STATE_3VSOCK) vol |= CARD_3V_CARD; if (psr & CBB_STATE_XVCARD && psr & CBB_STATE_XVSOCK) vol |= CARD_XV_CARD; if (psr & CBB_STATE_YVCARD && psr & CBB_STATE_YVSOCK) vol |= CARD_YV_CARD; return (vol); } static uint8_t cbb_o2micro_power_hack(struct cbb_softc *sc) { uint8_t reg; /* * Issue #2: INT# not qualified with IRQ Routing Bit. An * unexpected PCI INT# may be generated during PC Card * initialization even with the IRQ Routing Bit Set with some * PC Cards. * * This is a two part issue. The first part is that some of * our older controllers have an issue in which the slot's PCI * INT# is NOT qualified by the IRQ routing bit (PCI reg. 3Eh * bit 7). Regardless of the IRQ routing bit, if NO ISA IRQ * is selected (ExCA register 03h bits 3:0, of the slot, are * cleared) we will generate INT# if IREQ# is asserted. The * second part is because some PC Cards prematurally assert * IREQ# before the ExCA registers are fully programmed. This * in turn asserts INT# because ExCA register 03h bits 3:0 * (ISA IRQ Select) are not yet programmed. * * The fix for this issue, which will work for any controller * (old or new), is to set ExCA register 03h bits 3:0 = 0001b * (select IRQ1), of the slot, before turning on slot power. * Selecting IRQ1 will result in INT# NOT being asserted * (because IRQ1 is selected), and IRQ1 won't be asserted * because our controllers don't generate IRQ1. * * Other, non O2Micro controllers will generate irq 1 in some * situations, so we can't do this hack for everybody. Reports of * keyboard controller's interrupts being suppressed occurred when * we did this. */ reg = exca_getb(&sc->exca, EXCA_INTR); exca_putb(&sc->exca, EXCA_INTR, (reg & 0xf0) | 1); return (reg); } /* * Restore the damage that cbb_o2micro_power_hack does to EXCA_INTR so * we don't have an interrupt storm on power on. This has the effect of * disabling card status change interrupts for the duration of poweron. */ static void cbb_o2micro_power_hack2(struct cbb_softc *sc, uint8_t reg) { exca_putb(&sc->exca, EXCA_INTR, reg); } int cbb_power(device_t brdev, int volts) { uint32_t status, sock_ctrl, reg_ctrl, mask; struct cbb_softc *sc = device_get_softc(brdev); int cnt, sane; int retval = 0; int on = 0; uint8_t reg = 0; sock_ctrl = cbb_get(sc, CBB_SOCKET_CONTROL); sock_ctrl &= ~CBB_SOCKET_CTRL_VCCMASK; switch (volts & CARD_VCCMASK) { case 5: sock_ctrl |= CBB_SOCKET_CTRL_VCC_5V; on++; break; case 3: sock_ctrl |= CBB_SOCKET_CTRL_VCC_3V; on++; break; case XV: sock_ctrl |= CBB_SOCKET_CTRL_VCC_XV; on++; break; case YV: sock_ctrl |= CBB_SOCKET_CTRL_VCC_YV; on++; break; case 0: break; default: return (0); /* power NEVER changed */ } /* VPP == VCC */ sock_ctrl &= ~CBB_SOCKET_CTRL_VPPMASK; sock_ctrl |= ((sock_ctrl >> 4) & 0x07); if (cbb_get(sc, CBB_SOCKET_CONTROL) == sock_ctrl) return (1); /* no change necessary */ DEVPRINTF((sc->dev, "cbb_power: %dV\n", volts)); if (volts != 0 && sc->chipset == CB_O2MICRO) reg = cbb_o2micro_power_hack(sc); /* * We have to mask the card change detect interrupt while we're * messing with the power. It is allowed to bounce while we're * messing with power as things settle down. In addition, we mask off * the card's function interrupt by routing it via the ISA bus. This * bit generally only affects 16-bit cards. Some bridges allow one to * set another bit to have it also affect 32-bit cards. Since 32-bit * cards are required to be better behaved, we don't bother to get * into those bridge specific features. * * XXX I wonder if we need to enable the READY bit interrupt in the * EXCA CSC register for 16-bit cards, and disable the CD bit? */ mask = cbb_get(sc, CBB_SOCKET_MASK); mask |= CBB_SOCKET_MASK_POWER; mask &= ~CBB_SOCKET_MASK_CD; cbb_set(sc, CBB_SOCKET_MASK, mask); PCI_MASK_CONFIG(brdev, CBBR_BRIDGECTRL, |CBBM_BRIDGECTRL_INTR_IREQ_ISA_EN, 2); cbb_set(sc, CBB_SOCKET_CONTROL, sock_ctrl); if (on) { mtx_lock(&sc->mtx); cnt = sc->powerintr; /* * We have a shortish timeout of 500ms here. Some bridges do * not generate a POWER_CYCLE event for 16-bit cards. In * those cases, we have to cope the best we can, and having * only a short delay is better than the alternatives. Others * raise the power cycle a smidge before it is really ready. * We deal with those below. */ sane = 10; while (!(cbb_get(sc, CBB_SOCKET_STATE) & CBB_STATE_POWER_CYCLE) && cnt == sc->powerintr && sane-- > 0) msleep(&sc->powerintr, &sc->mtx, 0, "-", hz / 20); mtx_unlock(&sc->mtx); /* * Relax for 100ms. Some bridges appear to assert this signal * right away, but before the card has stabilized. Other * cards need need more time to cope up reliabily. * Experiments with troublesome setups show this to be a * "cheap" way to enhance reliabilty. We need not do this for * "off" since we don't touch the card after we turn it off. */ pause("cbbPwr", min(hz / 10, 1)); /* * The TOPIC95B requires a little bit extra time to get its * act together, so delay for an additional 100ms. Also as * documented below, it doesn't seem to set the POWER_CYCLE * bit, so don't whine if it never came on. */ if (sc->chipset == CB_TOPIC95) pause("cbb95B", hz / 10); else if (sane <= 0) device_printf(sc->dev, "power timeout, doom?\n"); } /* * After the power is good, we can turn off the power interrupt. * However, the PC Card standard says that we must delay turning the * CD bit back on for a bit to allow for bouncyness on power down * (recall that we don't wait above for a power down, since we don't * get an interrupt for that). We're called either from the suspend * code in which case we don't want to turn card change on again, or * we're called from the card insertion code, in which case the cbb * thread will turn it on for us before it waits to be woken by a * change event. * * NB: Topic95B doesn't set the power cycle bit. we assume that * both it and the TOPIC95 behave the same. */ cbb_clrb(sc, CBB_SOCKET_MASK, CBB_SOCKET_MASK_POWER); status = cbb_get(sc, CBB_SOCKET_STATE); if (on && sc->chipset != CB_TOPIC95) { if ((status & CBB_STATE_POWER_CYCLE) == 0) device_printf(sc->dev, "Power not on?\n"); } if (status & CBB_STATE_BAD_VCC_REQ) { device_printf(sc->dev, "Bad Vcc requested\n"); /* * Turn off the power, and try again. Retrigger other * active interrupts via force register. From NetBSD * PR 36652, coded by me to description there. */ sock_ctrl &= ~CBB_SOCKET_CTRL_VCCMASK; sock_ctrl &= ~CBB_SOCKET_CTRL_VPPMASK; cbb_set(sc, CBB_SOCKET_CONTROL, sock_ctrl); status &= ~CBB_STATE_BAD_VCC_REQ; status &= ~CBB_STATE_DATA_LOST; status |= CBB_FORCE_CV_TEST; cbb_set(sc, CBB_SOCKET_FORCE, status); goto done; } if (sc->chipset == CB_TOPIC97) { reg_ctrl = pci_read_config(sc->dev, TOPIC_REG_CTRL, 4); reg_ctrl &= ~TOPIC97_REG_CTRL_TESTMODE; if (on) reg_ctrl |= TOPIC97_REG_CTRL_CLKRUN_ENA; else reg_ctrl &= ~TOPIC97_REG_CTRL_CLKRUN_ENA; pci_write_config(sc->dev, TOPIC_REG_CTRL, reg_ctrl, 4); } retval = 1; done:; if (volts != 0 && sc->chipset == CB_O2MICRO) cbb_o2micro_power_hack2(sc, reg); return (retval); } static int cbb_current_voltage(device_t brdev) { struct cbb_softc *sc = device_get_softc(brdev); uint32_t ctrl; ctrl = cbb_get(sc, CBB_SOCKET_CONTROL); switch (ctrl & CBB_SOCKET_CTRL_VCCMASK) { case CBB_SOCKET_CTRL_VCC_5V: return CARD_5V_CARD; case CBB_SOCKET_CTRL_VCC_3V: return CARD_3V_CARD; case CBB_SOCKET_CTRL_VCC_XV: return CARD_XV_CARD; case CBB_SOCKET_CTRL_VCC_YV: return CARD_YV_CARD; } return 0; } /* * detect the voltage for the card, and set it. Since the power * used is the square of the voltage, lower voltages is a big win * and what Windows does (and what Microsoft prefers). The MS paper * also talks about preferring the CIS entry as well, but that has * to be done elsewhere. We also optimize power sequencing here * and don't change things if we're already powered up at a supported * voltage. * * In addition, we power up with OE disabled. We'll set it later * in the power up sequence. */ static int cbb_do_power(device_t brdev) { struct cbb_softc *sc = device_get_softc(brdev); uint32_t voltage, curpwr; uint32_t status; /* Don't enable OE (output enable) until power stable */ exca_clrb(&sc->exca, EXCA_PWRCTL, EXCA_PWRCTL_OE); voltage = cbb_detect_voltage(brdev); curpwr = cbb_current_voltage(brdev); status = cbb_get(sc, CBB_SOCKET_STATE); if ((status & CBB_STATE_POWER_CYCLE) && (voltage & curpwr)) return 0; /* Prefer lowest voltage supported */ cbb_power(brdev, CARD_OFF); if (voltage & CARD_YV_CARD) cbb_power(brdev, CARD_VCC(YV)); else if (voltage & CARD_XV_CARD) cbb_power(brdev, CARD_VCC(XV)); else if (voltage & CARD_3V_CARD) cbb_power(brdev, CARD_VCC(3)); else if (voltage & CARD_5V_CARD) cbb_power(brdev, CARD_VCC(5)); else { device_printf(brdev, "Unknown card voltage\n"); return (ENXIO); } return (0); } /************************************************************************/ /* CardBus power functions */ /************************************************************************/ static int cbb_cardbus_reset_power(device_t brdev, device_t child, int on) { struct cbb_softc *sc = device_get_softc(brdev); uint32_t b, h; int delay, count, zero_seen, func; /* * Asserting reset for 20ms is necessary for most bridges. For some * reason, the Ricoh RF5C47x bridges need it asserted for 400ms. The * root cause of this is unknown, and NetBSD does the same thing. */ delay = sc->chipset == CB_RF5C47X ? 400 : 20; PCI_MASK_CONFIG(brdev, CBBR_BRIDGECTRL, |CBBM_BRIDGECTRL_RESET, 2); pause("cbbP3", hz * delay / 1000); /* * If a card exists and we're turning it on, take it out of reset. * After clearing reset, wait up to 1.1s for the first configuration * register (vendor/product) configuration register of device 0.0 to * become != 0xffffffff. The PCMCIA PC Card Host System Specification * says that when powering up the card, the PCI Spec v2.1 must be * followed. In PCI spec v2.2 Table 4-6, Trhfa (Reset High to first * Config Access) is at most 2^25 clocks, or just over 1s. Section * 2.2.1 states any card not ready to participate in bus transactions * must tristate its outputs. Therefore, any access to its * configuration registers must be ignored. In that state, the config * reg will read 0xffffffff. Section 6.2.1 states a vendor id of * 0xffff is invalid, so this can never match a real card. Print a * warning if it never returns a real id. The PCMCIA PC Card * Electrical Spec Section 5.2.7.1 implies only device 0 is present on * a cardbus bus, so that's the only register we check here. */ if (on && CBB_CARD_PRESENT(cbb_get(sc, CBB_SOCKET_STATE))) { PCI_MASK_CONFIG(brdev, CBBR_BRIDGECTRL, &~CBBM_BRIDGECTRL_RESET, 2); b = pcib_get_bus(child); count = 1100 / 20; do { pause("cbbP4", hz * 2 / 100); } while (PCIB_READ_CONFIG(brdev, b, 0, 0, PCIR_DEVVENDOR, 4) == 0xfffffffful && --count >= 0); if (count < 0) device_printf(brdev, "Warning: Bus reset timeout\n"); /* * Some cards (so far just an atheros card I have) seem to * come out of reset in a funky state. They report they are * multi-function cards, but have nonsense for some of the * higher functions. So if the card claims to be MFDEV, and * any of the higher functions' ID is 0, then we've hit the * bug and we'll try again. */ h = PCIB_READ_CONFIG(brdev, b, 0, 0, PCIR_HDRTYPE, 1); if ((h & PCIM_MFDEV) == 0) return 0; zero_seen = 0; for (func = 1; func < 8; func++) { h = PCIB_READ_CONFIG(brdev, b, 0, func, PCIR_DEVVENDOR, 4); if (h == 0) zero_seen++; } if (!zero_seen) return 0; return (EINVAL); } return 0; } static int cbb_cardbus_power_disable_socket(device_t brdev, device_t child) { cbb_power(brdev, CARD_OFF); cbb_cardbus_reset_power(brdev, child, 0); return (0); } static int cbb_cardbus_power_enable_socket(device_t brdev, device_t child) { struct cbb_softc *sc = device_get_softc(brdev); int err, count; if (!CBB_CARD_PRESENT(cbb_get(sc, CBB_SOCKET_STATE))) return (ENODEV); count = 10; do { err = cbb_do_power(brdev); if (err) return (err); err = cbb_cardbus_reset_power(brdev, child, 1); if (err) { device_printf(brdev, "Reset failed, trying again.\n"); cbb_cardbus_power_disable_socket(brdev, child); pause("cbbErr1", hz / 10); /* wait 100ms */ } } while (err != 0 && count-- > 0); return (0); } /************************************************************************/ /* CardBus Resource */ /************************************************************************/ static void cbb_activate_window(device_t brdev, int type) { PCI_ENABLE_IO(device_get_parent(brdev), brdev, type); } static int cbb_cardbus_io_open(device_t brdev, int win, uint32_t start, uint32_t end) { int basereg; int limitreg; if ((win < 0) || (win > 1)) { DEVPRINTF((brdev, "cbb_cardbus_io_open: window out of range %d\n", win)); return (EINVAL); } basereg = win * 8 + CBBR_IOBASE0; limitreg = win * 8 + CBBR_IOLIMIT0; pci_write_config(brdev, basereg, start, 4); pci_write_config(brdev, limitreg, end, 4); cbb_activate_window(brdev, SYS_RES_IOPORT); return (0); } static int cbb_cardbus_mem_open(device_t brdev, int win, uint32_t start, uint32_t end) { int basereg; int limitreg; if ((win < 0) || (win > 1)) { DEVPRINTF((brdev, "cbb_cardbus_mem_open: window out of range %d\n", win)); return (EINVAL); } basereg = win * 8 + CBBR_MEMBASE0; limitreg = win * 8 + CBBR_MEMLIMIT0; pci_write_config(brdev, basereg, start, 4); pci_write_config(brdev, limitreg, end, 4); cbb_activate_window(brdev, SYS_RES_MEMORY); return (0); } #define START_NONE 0xffffffff #define END_NONE 0 static void cbb_cardbus_auto_open(struct cbb_softc *sc, int type) { uint32_t starts[2]; uint32_t ends[2]; struct cbb_reslist *rle; int align, i; uint32_t reg; starts[0] = starts[1] = START_NONE; ends[0] = ends[1] = END_NONE; if (type == SYS_RES_MEMORY) align = CBB_MEMALIGN; else if (type == SYS_RES_IOPORT) align = CBB_IOALIGN; else align = 1; SLIST_FOREACH(rle, &sc->rl, link) { if (rle->type != type) continue; if (rle->res == NULL) continue; if (!(rman_get_flags(rle->res) & RF_ACTIVE)) continue; if (rman_get_flags(rle->res) & RF_PREFETCHABLE) i = 1; else i = 0; if (rman_get_start(rle->res) < starts[i]) starts[i] = rman_get_start(rle->res); if (rman_get_end(rle->res) > ends[i]) ends[i] = rman_get_end(rle->res); } for (i = 0; i < 2; i++) { if (starts[i] == START_NONE) continue; starts[i] &= ~(align - 1); ends[i] = roundup2(ends[i], align) - 1; } if (starts[0] != START_NONE && starts[1] != START_NONE) { if (starts[0] < starts[1]) { if (ends[0] > starts[1]) { device_printf(sc->dev, "Overlapping ranges" " for prefetch and non-prefetch memory\n"); return; } } else { if (ends[1] > starts[0]) { device_printf(sc->dev, "Overlapping ranges" " for prefetch and non-prefetch memory\n"); return; } } } if (type == SYS_RES_MEMORY) { cbb_cardbus_mem_open(sc->dev, 0, starts[0], ends[0]); cbb_cardbus_mem_open(sc->dev, 1, starts[1], ends[1]); reg = pci_read_config(sc->dev, CBBR_BRIDGECTRL, 2); reg &= ~(CBBM_BRIDGECTRL_PREFETCH_0 | CBBM_BRIDGECTRL_PREFETCH_1); if (starts[1] != START_NONE) reg |= CBBM_BRIDGECTRL_PREFETCH_1; pci_write_config(sc->dev, CBBR_BRIDGECTRL, reg, 2); if (bootverbose) { device_printf(sc->dev, "Opening memory:\n"); if (starts[0] != START_NONE) device_printf(sc->dev, "Normal: %#x-%#x\n", starts[0], ends[0]); if (starts[1] != START_NONE) device_printf(sc->dev, "Prefetch: %#x-%#x\n", starts[1], ends[1]); } } else if (type == SYS_RES_IOPORT) { cbb_cardbus_io_open(sc->dev, 0, starts[0], ends[0]); cbb_cardbus_io_open(sc->dev, 1, starts[1], ends[1]); if (bootverbose && starts[0] != START_NONE) device_printf(sc->dev, "Opening I/O: %#x-%#x\n", starts[0], ends[0]); } } static int -cbb_cardbus_activate_resource(device_t brdev, device_t child, int type, - int rid, struct resource *res) +cbb_cardbus_activate_resource(device_t brdev, device_t child, + struct resource *res) { int ret; ret = BUS_ACTIVATE_RESOURCE(device_get_parent(brdev), child, - type, rid, res); + res); if (ret != 0) return (ret); - cbb_cardbus_auto_open(device_get_softc(brdev), type); + cbb_cardbus_auto_open(device_get_softc(brdev), rman_get_type(res)); return (0); } static int -cbb_cardbus_deactivate_resource(device_t brdev, device_t child, int type, - int rid, struct resource *res) +cbb_cardbus_deactivate_resource(device_t brdev, device_t child, + struct resource *res) { int ret; ret = BUS_DEACTIVATE_RESOURCE(device_get_parent(brdev), child, - type, rid, res); + res); if (ret != 0) return (ret); - cbb_cardbus_auto_open(device_get_softc(brdev), type); + cbb_cardbus_auto_open(device_get_softc(brdev), rman_get_type(res)); return (0); } static struct resource * cbb_cardbus_alloc_resource(device_t brdev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct cbb_softc *sc = device_get_softc(brdev); int tmp; struct resource *res; rman_res_t align; switch (type) { case SYS_RES_IRQ: tmp = rman_get_start(sc->irq_res); if (start > tmp || end < tmp || count != 1) { device_printf(child, "requested interrupt %jd-%jd," "count = %jd not supported by cbb\n", start, end, count); return (NULL); } start = end = tmp; flags |= RF_SHAREABLE; break; case SYS_RES_IOPORT: if (start <= cbb_start_32_io) start = cbb_start_32_io; if (end < start) end = start; if (count > (1 << RF_ALIGNMENT(flags))) flags = (flags & ~RF_ALIGNMENT_MASK) | rman_make_alignment_flags(count); break; case SYS_RES_MEMORY: if (start <= cbb_start_mem) start = cbb_start_mem; if (end < start) end = start; if (count < CBB_MEMALIGN) align = CBB_MEMALIGN; else align = count; if (align > (1 << RF_ALIGNMENT(flags))) flags = (flags & ~RF_ALIGNMENT_MASK) | rman_make_alignment_flags(align); break; } res = BUS_ALLOC_RESOURCE(device_get_parent(brdev), child, type, rid, start, end, count, flags & ~RF_ACTIVE); if (res == NULL) { printf("cbb alloc res fail type %d rid %x\n", type, *rid); return (NULL); } cbb_insert_res(sc, res, type, *rid); if (flags & RF_ACTIVE) if (bus_activate_resource(child, type, *rid, res) != 0) { bus_release_resource(child, type, *rid, res); return (NULL); } return (res); } static int cbb_cardbus_release_resource(device_t brdev, device_t child, int type, int rid, struct resource *res) { struct cbb_softc *sc = device_get_softc(brdev); int error; if (rman_get_flags(res) & RF_ACTIVE) { error = bus_deactivate_resource(child, type, rid, res); if (error != 0) return (error); } cbb_remove_res(sc, res); return (BUS_RELEASE_RESOURCE(device_get_parent(brdev), child, type, rid, res)); } /************************************************************************/ /* PC Card Power Functions */ /************************************************************************/ static int cbb_pcic_power_enable_socket(device_t brdev, device_t child) { struct cbb_softc *sc = device_get_softc(brdev); int err; DPRINTF(("cbb_pcic_socket_enable:\n")); /* power down/up the socket to reset */ err = cbb_do_power(brdev); if (err) return (err); exca_reset(&sc->exca, child); return (0); } static int cbb_pcic_power_disable_socket(device_t brdev, device_t child) { struct cbb_softc *sc = device_get_softc(brdev); DPRINTF(("cbb_pcic_socket_disable\n")); /* Turn off the card's interrupt and leave it in reset, wait 10ms */ exca_putb(&sc->exca, EXCA_INTR, 0); pause("cbbP1", hz / 100); /* power down the socket */ cbb_power(brdev, CARD_OFF); exca_putb(&sc->exca, EXCA_PWRCTL, 0); /* wait 300ms until power fails (Tpf). */ pause("cbbP2", hz * 300 / 1000); /* enable CSC interrupts */ exca_putb(&sc->exca, EXCA_INTR, EXCA_INTR_ENABLE); return (0); } /************************************************************************/ /* POWER methods */ /************************************************************************/ int cbb_power_enable_socket(device_t brdev, device_t child) { struct cbb_softc *sc = device_get_softc(brdev); if (sc->flags & CBB_16BIT_CARD) return (cbb_pcic_power_enable_socket(brdev, child)); return (cbb_cardbus_power_enable_socket(brdev, child)); } int cbb_power_disable_socket(device_t brdev, device_t child) { struct cbb_softc *sc = device_get_softc(brdev); if (sc->flags & CBB_16BIT_CARD) return (cbb_pcic_power_disable_socket(brdev, child)); return (cbb_cardbus_power_disable_socket(brdev, child)); } static int -cbb_pcic_activate_resource(device_t brdev, device_t child, int type, int rid, +cbb_pcic_activate_resource(device_t brdev, device_t child, struct resource *res) { struct cbb_softc *sc = device_get_softc(brdev); int error; - error = exca_activate_resource(&sc->exca, child, type, rid, res); + error = exca_activate_resource(&sc->exca, child, res); if (error == 0) - cbb_activate_window(brdev, type); + cbb_activate_window(brdev, rman_get_type(res)); return (error); } static int -cbb_pcic_deactivate_resource(device_t brdev, device_t child, int type, - int rid, struct resource *res) +cbb_pcic_deactivate_resource(device_t brdev, device_t child, + struct resource *res) { struct cbb_softc *sc = device_get_softc(brdev); - return (exca_deactivate_resource(&sc->exca, child, type, rid, res)); + return (exca_deactivate_resource(&sc->exca, child, res)); } static struct resource * cbb_pcic_alloc_resource(device_t brdev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res = NULL; struct cbb_softc *sc = device_get_softc(brdev); int align; int tmp; switch (type) { case SYS_RES_MEMORY: if (start < cbb_start_mem) start = cbb_start_mem; if (end < start) end = start; if (count < CBB_MEMALIGN) align = CBB_MEMALIGN; else align = count; if (align > (1 << RF_ALIGNMENT(flags))) flags = (flags & ~RF_ALIGNMENT_MASK) | rman_make_alignment_flags(align); break; case SYS_RES_IOPORT: if (start < cbb_start_16_io) start = cbb_start_16_io; if (end < start) end = start; break; case SYS_RES_IRQ: tmp = rman_get_start(sc->irq_res); if (start > tmp || end < tmp || count != 1) { device_printf(child, "requested interrupt %jd-%jd," "count = %jd not supported by cbb\n", start, end, count); return (NULL); } flags |= RF_SHAREABLE; start = end = rman_get_start(sc->irq_res); break; } res = BUS_ALLOC_RESOURCE(device_get_parent(brdev), child, type, rid, start, end, count, flags & ~RF_ACTIVE); if (res == NULL) return (NULL); cbb_insert_res(sc, res, type, *rid); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, res) != 0) { bus_release_resource(child, type, *rid, res); return (NULL); } } return (res); } static int cbb_pcic_release_resource(device_t brdev, device_t child, int type, int rid, struct resource *res) { struct cbb_softc *sc = device_get_softc(brdev); int error; if (rman_get_flags(res) & RF_ACTIVE) { error = bus_deactivate_resource(child, type, rid, res); if (error != 0) return (error); } cbb_remove_res(sc, res); return (BUS_RELEASE_RESOURCE(device_get_parent(brdev), child, type, rid, res)); } /************************************************************************/ /* PC Card methods */ /************************************************************************/ int cbb_pcic_set_res_flags(device_t brdev, device_t child, int type, int rid, u_long flags) { struct cbb_softc *sc = device_get_softc(brdev); struct resource *res; if (type != SYS_RES_MEMORY) return (EINVAL); res = cbb_find_res(sc, type, rid); if (res == NULL) { device_printf(brdev, "set_res_flags: specified rid not found\n"); return (ENOENT); } return (exca_mem_set_flags(&sc->exca, res, flags)); } int cbb_pcic_set_memory_offset(device_t brdev, device_t child, int rid, uint32_t cardaddr, uint32_t *deltap) { struct cbb_softc *sc = device_get_softc(brdev); struct resource *res; res = cbb_find_res(sc, SYS_RES_MEMORY, rid); if (res == NULL) { device_printf(brdev, "set_memory_offset: specified rid not found\n"); return (ENOENT); } return (exca_mem_set_offset(&sc->exca, res, cardaddr, deltap)); } /************************************************************************/ /* BUS Methods */ /************************************************************************/ int -cbb_activate_resource(device_t brdev, device_t child, int type, int rid, - struct resource *r) +cbb_activate_resource(device_t brdev, device_t child, struct resource *r) { struct cbb_softc *sc = device_get_softc(brdev); if (sc->flags & CBB_16BIT_CARD) - return (cbb_pcic_activate_resource(brdev, child, type, rid, r)); + return (cbb_pcic_activate_resource(brdev, child, r)); else - return (cbb_cardbus_activate_resource(brdev, child, type, rid, - r)); + return (cbb_cardbus_activate_resource(brdev, child, r)); } int -cbb_deactivate_resource(device_t brdev, device_t child, int type, - int rid, struct resource *r) +cbb_deactivate_resource(device_t brdev, device_t child, struct resource *r) { struct cbb_softc *sc = device_get_softc(brdev); if (sc->flags & CBB_16BIT_CARD) - return (cbb_pcic_deactivate_resource(brdev, child, type, - rid, r)); + return (cbb_pcic_deactivate_resource(brdev, child, r)); else - return (cbb_cardbus_deactivate_resource(brdev, child, type, - rid, r)); + return (cbb_cardbus_deactivate_resource(brdev, child, r)); } struct resource * cbb_alloc_resource(device_t brdev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct cbb_softc *sc = device_get_softc(brdev); if (sc->flags & CBB_16BIT_CARD) return (cbb_pcic_alloc_resource(brdev, child, type, rid, start, end, count, flags)); else return (cbb_cardbus_alloc_resource(brdev, child, type, rid, start, end, count, flags)); } int cbb_release_resource(device_t brdev, device_t child, int type, int rid, struct resource *r) { struct cbb_softc *sc = device_get_softc(brdev); if (sc->flags & CBB_16BIT_CARD) return (cbb_pcic_release_resource(brdev, child, type, rid, r)); else return (cbb_cardbus_release_resource(brdev, child, type, rid, r)); } int cbb_read_ivar(device_t brdev, device_t child, int which, uintptr_t *result) { struct cbb_softc *sc = device_get_softc(brdev); switch (which) { case PCIB_IVAR_DOMAIN: *result = sc->domain; return (0); case PCIB_IVAR_BUS: *result = sc->bus.sec; return (0); case EXCA_IVAR_SLOT: *result = 0; return (0); } return (ENOENT); } int cbb_write_ivar(device_t brdev, device_t child, int which, uintptr_t value) { switch (which) { case PCIB_IVAR_DOMAIN: return (EINVAL); case PCIB_IVAR_BUS: return (EINVAL); case EXCA_IVAR_SLOT: return (EINVAL); } return (ENOENT); } int cbb_child_present(device_t parent, device_t child) { struct cbb_softc *sc = (struct cbb_softc *)device_get_softc(parent); uint32_t sockstate; sockstate = cbb_get(sc, CBB_SOCKET_STATE); return (CBB_CARD_PRESENT(sockstate) && sc->cardok); } diff --git a/sys/dev/pccbb/pccbbvar.h b/sys/dev/pccbb/pccbbvar.h index 690f215ee302..5d6529070a12 100644 --- a/sys/dev/pccbb/pccbbvar.h +++ b/sys/dev/pccbb/pccbbvar.h @@ -1,166 +1,166 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2000,2001 Jonathan Chen All rights reserved. * Copyright (c) 2003-2004 M. Warner Losh * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Structure definitions for the Cardbus Bridge driver */ struct cbb_intrhand { driver_filter_t *filt; driver_intr_t *intr; void *arg; struct cbb_softc *sc; void *cookie; }; struct cbb_reslist { SLIST_ENTRY(cbb_reslist) link; struct resource *res; int type; int rid; /* note: unlike the regular resource list, there can be * duplicate rid's in the same list. However, the * combination of rid and res->r_dev should be unique. */ bus_addr_t cardaddr; /* for 16-bit pccard memory */ }; #define CBB_AUTO_OPEN_SMALLHOLE 0x100 struct cbb_softc { device_t dev; struct exca_softc exca; struct resource *base_res; struct resource *irq_res; void *intrhand; bus_space_tag_t bst; bus_space_handle_t bsh; uint32_t domain; unsigned int pribus; struct pcib_secbus bus; struct mtx mtx; int cardok; u_int32_t flags; #define CBB_16BIT_CARD 0x20000000 #define CBB_KTHREAD_RUNNING 0x40000000 #define CBB_KTHREAD_DONE 0x80000000 int chipset; /* chipset id */ #define CB_UNKNOWN 0 /* NOT Cardbus-PCI bridge */ #define CB_TI113X 1 /* TI PCI1130/1131 */ #define CB_TI12XX 2 /* TI PCI12xx/14xx/44xx/15xx/45xx */ #define CB_TI125X 3 /* TI PCI1250/1251(B)/1450 */ #define CB_RF5C47X 4 /* RICOH RF5C475/476/477 */ #define CB_RF5C46X 5 /* RICOH RF5C465/466/467 */ #define CB_CIRRUS 6 /* Cirrus Logic CLPD683x */ #define CB_TOPIC95 7 /* Toshiba ToPIC95 */ #define CB_TOPIC97 8 /* Toshiba ToPIC97/100 */ #define CB_O2MICRO 9 /* O2Micro chips */ SLIST_HEAD(, cbb_reslist) rl; device_t cbdev; struct proc *event_thread; void (*chipinit)(struct cbb_softc *); int powerintr; struct root_hold_token *sc_root_token; }; /* result of detect_card */ #define CARD_UKN_CARD 0x00 #define CARD_5V_CARD 0x01 #define CARD_3V_CARD 0x02 #define CARD_XV_CARD 0x04 #define CARD_YV_CARD 0x08 /* for power_socket */ #define CARD_VCC(X) (X) #define CARD_VPP_VCC 0xf0 #define CARD_VCCMASK 0xf #define CARD_VCCSHIFT 0 #define XV 2 #define YV 1 #define CARD_OFF (CARD_VCC(0)) extern int cbb_debug; int cbb_activate_resource(device_t brdev, device_t child, - int type, int rid, struct resource *r); + struct resource *r); struct resource *cbb_alloc_resource(device_t brdev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); void cbb_child_detached(device_t brdev, device_t child); int cbb_child_present(device_t parent, device_t child); int cbb_deactivate_resource(device_t brdev, device_t child, - int type, int rid, struct resource *r); + struct resource *r); int cbb_detach(device_t brdev); void cbb_disable_func_intr(struct cbb_softc *sc); void cbb_driver_added(device_t brdev, driver_t *driver); void cbb_event_thread(void *arg); int cbb_pcic_set_memory_offset(device_t brdev, device_t child, int rid, uint32_t cardaddr, uint32_t *deltap); int cbb_pcic_set_res_flags(device_t brdev, device_t child, int type, int rid, u_long flags); int cbb_power(device_t brdev, int volts); int cbb_power_enable_socket(device_t brdev, device_t child); int cbb_power_disable_socket(device_t brdev, device_t child); int cbb_read_ivar(device_t brdev, device_t child, int which, uintptr_t *result); int cbb_release_resource(device_t brdev, device_t child, int type, int rid, struct resource *r); int cbb_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep); int cbb_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie); int cbb_write_ivar(device_t brdev, device_t child, int which, uintptr_t value); /* */ static __inline void cbb_set(struct cbb_softc *sc, uint32_t reg, uint32_t val) { bus_space_write_4(sc->bst, sc->bsh, reg, val); } static __inline uint32_t cbb_get(struct cbb_softc *sc, uint32_t reg) { return (bus_space_read_4(sc->bst, sc->bsh, reg)); } static __inline void cbb_setb(struct cbb_softc *sc, uint32_t reg, uint32_t bits) { cbb_set(sc, reg, cbb_get(sc, reg) | bits); } static __inline void cbb_clrb(struct cbb_softc *sc, uint32_t reg, uint32_t bits) { cbb_set(sc, reg, cbb_get(sc, reg) & ~bits); } diff --git a/sys/dev/pci/pci.c b/sys/dev/pci/pci.c index 8236b8bde41a..2797bded62fd 100644 --- a/sys/dev/pci/pci.c +++ b/sys/dev/pci/pci.c @@ -1,6836 +1,6838 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1997, Stefan Esser * Copyright (c) 2000, Michael Smith * Copyright (c) 2000, BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include "opt_acpi.h" #include "opt_iommu.h" #include "opt_bus.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) #include #endif #include #include #include #include #ifdef PCI_IOV #include #include #endif #include #include #include #include #include #include "pcib_if.h" #include "pci_if.h" #define PCIR_IS_BIOS(cfg, reg) \ (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \ ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1)) static int pci_has_quirk(uint32_t devid, int quirk); static pci_addr_t pci_mapbase(uint64_t mapreg); static const char *pci_maptype(uint64_t mapreg); static int pci_maprange(uint64_t mapreg); static pci_addr_t pci_rombase(uint64_t mapreg); static int pci_romsize(uint64_t testval); static void pci_fixancient(pcicfgregs *cfg); static int pci_printf(pcicfgregs *cfg, const char *fmt, ...); static int pci_porten(device_t dev); static int pci_memen(device_t dev); static void pci_assign_interrupt(device_t bus, device_t dev, int force_route); static int pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl, int force, int prefetch); static int pci_probe(device_t dev); static void pci_load_vendor_data(void); static int pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc); static char *pci_describe_device(device_t dev); static int pci_modevent(module_t mod, int what, void *arg); static void pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg); static void pci_read_cap(device_t pcib, pcicfgregs *cfg); static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data); #if 0 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data); #endif static void pci_read_vpd(device_t pcib, pcicfgregs *cfg); static void pci_mask_msix(device_t dev, u_int index); static void pci_unmask_msix(device_t dev, u_int index); static int pci_msi_blacklisted(void); static int pci_msix_blacklisted(void); static void pci_resume_msi(device_t dev); static void pci_resume_msix(device_t dev); static int pci_remap_intr_method(device_t bus, device_t dev, u_int irq); static void pci_hint_device_unit(device_t acdev, device_t child, const char *name, int *unitp); static int pci_reset_post(device_t dev, device_t child); static int pci_reset_prepare(device_t dev, device_t child); static int pci_reset_child(device_t dev, device_t child, int flags); static int pci_get_id_method(device_t dev, device_t child, enum pci_id_type type, uintptr_t *rid); static struct pci_devinfo * pci_fill_devinfo(device_t pcib, device_t bus, int d, int b, int s, int f, uint16_t vid, uint16_t did); static device_method_t pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pci_probe), DEVMETHOD(device_attach, pci_attach), DEVMETHOD(device_detach, pci_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, pci_resume), /* Bus interface */ DEVMETHOD(bus_print_child, pci_print_child), DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch), DEVMETHOD(bus_read_ivar, pci_read_ivar), DEVMETHOD(bus_write_ivar, pci_write_ivar), DEVMETHOD(bus_driver_added, pci_driver_added), DEVMETHOD(bus_setup_intr, pci_setup_intr), DEVMETHOD(bus_teardown_intr, pci_teardown_intr), DEVMETHOD(bus_reset_prepare, pci_reset_prepare), DEVMETHOD(bus_reset_post, pci_reset_post), DEVMETHOD(bus_reset_child, pci_reset_child), DEVMETHOD(bus_get_dma_tag, pci_get_dma_tag), DEVMETHOD(bus_get_resource_list,pci_get_resource_list), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, pci_delete_resource), DEVMETHOD(bus_alloc_resource, pci_alloc_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_release_resource, pci_release_resource), DEVMETHOD(bus_activate_resource, pci_activate_resource), DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource), DEVMETHOD(bus_child_deleted, pci_child_deleted), DEVMETHOD(bus_child_detached, pci_child_detached), DEVMETHOD(bus_child_pnpinfo, pci_child_pnpinfo_method), DEVMETHOD(bus_child_location, pci_child_location_method), DEVMETHOD(bus_get_device_path, pci_get_device_path_method), DEVMETHOD(bus_hint_device_unit, pci_hint_device_unit), DEVMETHOD(bus_remap_intr, pci_remap_intr_method), DEVMETHOD(bus_suspend_child, pci_suspend_child), DEVMETHOD(bus_resume_child, pci_resume_child), DEVMETHOD(bus_rescan, pci_rescan_method), /* PCI interface */ DEVMETHOD(pci_read_config, pci_read_config_method), DEVMETHOD(pci_write_config, pci_write_config_method), DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method), DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method), DEVMETHOD(pci_enable_io, pci_enable_io_method), DEVMETHOD(pci_disable_io, pci_disable_io_method), DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method), DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method), DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method), DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method), DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method), DEVMETHOD(pci_find_cap, pci_find_cap_method), DEVMETHOD(pci_find_next_cap, pci_find_next_cap_method), DEVMETHOD(pci_find_extcap, pci_find_extcap_method), DEVMETHOD(pci_find_next_extcap, pci_find_next_extcap_method), DEVMETHOD(pci_find_htcap, pci_find_htcap_method), DEVMETHOD(pci_find_next_htcap, pci_find_next_htcap_method), DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method), DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method), DEVMETHOD(pci_enable_msi, pci_enable_msi_method), DEVMETHOD(pci_enable_msix, pci_enable_msix_method), DEVMETHOD(pci_disable_msi, pci_disable_msi_method), DEVMETHOD(pci_remap_msix, pci_remap_msix_method), DEVMETHOD(pci_release_msi, pci_release_msi_method), DEVMETHOD(pci_msi_count, pci_msi_count_method), DEVMETHOD(pci_msix_count, pci_msix_count_method), DEVMETHOD(pci_msix_pba_bar, pci_msix_pba_bar_method), DEVMETHOD(pci_msix_table_bar, pci_msix_table_bar_method), DEVMETHOD(pci_get_id, pci_get_id_method), DEVMETHOD(pci_alloc_devinfo, pci_alloc_devinfo_method), DEVMETHOD(pci_child_added, pci_child_added_method), #ifdef PCI_IOV DEVMETHOD(pci_iov_attach, pci_iov_attach_method), DEVMETHOD(pci_iov_detach, pci_iov_detach_method), DEVMETHOD(pci_create_iov_child, pci_create_iov_child_method), #endif DEVMETHOD_END }; DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc)); EARLY_DRIVER_MODULE(pci, pcib, pci_driver, pci_modevent, NULL, BUS_PASS_BUS); MODULE_VERSION(pci, 1); static char *pci_vendordata; static size_t pci_vendordata_size; struct pci_quirk { uint32_t devid; /* Vendor/device of the card */ int type; #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */ #define PCI_QUIRK_DISABLE_MSI 2 /* Neither MSI nor MSI-X work */ #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */ #define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */ #define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */ #define PCI_QUIRK_MSI_INTX_BUG 6 /* PCIM_CMD_INTxDIS disables MSI */ #define PCI_QUIRK_REALLOC_BAR 7 /* Can't allocate memory at the default address */ int arg1; int arg2; }; static const struct pci_quirk pci_quirks[] = { /* The Intel 82371AB and 82443MX have a map register at offset 0x90. */ { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 }, { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 }, /* As does the Serverworks OSB4 (the SMBus mapping register) */ { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 }, /* * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge * or the CMIC-SL (AKA ServerWorks GC_LE). */ { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 }, /* * MSI doesn't work on earlier Intel chipsets including * E7500, E7501, E7505, 845, 865, 875/E7210, and 855. */ { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, /* * MSI doesn't work with devices behind the AMD 8131 HT-PCIX * bridge. */ { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 }, /* * Some virtualization environments emulate an older chipset * but support MSI just fine. QEMU uses the Intel 82440. */ { 0x12378086, PCI_QUIRK_ENABLE_MSI_VM, 0, 0 }, /* * HPET MMIO base address may appear in Bar1 for AMD SB600 SMBus * controller depending on SoftPciRst register (PM_IO 0x55 [7]). * It prevents us from attaching hpet(4) when the bit is unset. * Note this quirk only affects SB600 revision A13 and earlier. * For SB600 A21 and later, firmware must set the bit to hide it. * For SB700 and later, it is unused and hardcoded to zero. */ { 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 }, /* * Atheros AR8161/AR8162/E2200/E2400/E2500 Ethernet controllers have * a bug that MSI interrupt does not assert if PCIM_CMD_INTxDIS bit * of the command register is set. */ { 0x10911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, { 0xE0911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, { 0xE0A11969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, { 0xE0B11969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, { 0x10901969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* * Broadcom BCM5714(S)/BCM5715(S)/BCM5780(S) Ethernet MACs don't * issue MSI interrupts with PCIM_CMD_INTxDIS set either. */ { 0x166814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714 */ { 0x166914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714S */ { 0x166a14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780 */ { 0x166b14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780S */ { 0x167814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715 */ { 0x167914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715S */ /* * HPE Gen 10 VGA has a memory range that can't be allocated in the * expected place. */ { 0x98741002, PCI_QUIRK_REALLOC_BAR, 0, 0 }, { 0 } }; /* map register information */ #define PCI_MAPMEM 0x01 /* memory map */ #define PCI_MAPMEMP 0x02 /* prefetchable memory map */ #define PCI_MAPPORT 0x04 /* port map */ struct devlist pci_devq; uint32_t pci_generation; uint32_t pci_numdevs = 0; static int pcie_chipset, pcix_chipset; /* sysctl vars */ SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "PCI bus tuning parameters"); static int pci_enable_io_modes = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RWTUN, &pci_enable_io_modes, 1, "Enable I/O and memory bits in the config register. Some BIOSes do not" " enable these bits correctly. We'd like to do this all the time, but" " there are some peripherals that this causes problems with."); static int pci_do_realloc_bars = 1; SYSCTL_INT(_hw_pci, OID_AUTO, realloc_bars, CTLFLAG_RWTUN, &pci_do_realloc_bars, 0, "Attempt to allocate a new range for any BARs whose original " "firmware-assigned ranges fail to allocate during the initial device scan."); static int pci_do_power_nodriver = 0; SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RWTUN, &pci_do_power_nodriver, 0, "Place a function into D3 state when no driver attaches to it. 0 means" " disable. 1 means conservatively place devices into D3 state. 2 means" " aggressively place devices into D3 state. 3 means put absolutely" " everything in D3 state."); int pci_do_power_resume = 1; SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RWTUN, &pci_do_power_resume, 1, "Transition from D3 -> D0 on resume."); int pci_do_power_suspend = 1; SYSCTL_INT(_hw_pci, OID_AUTO, do_power_suspend, CTLFLAG_RWTUN, &pci_do_power_suspend, 1, "Transition from D0 -> D3 on suspend."); static int pci_do_msi = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RWTUN, &pci_do_msi, 1, "Enable support for MSI interrupts"); static int pci_do_msix = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RWTUN, &pci_do_msix, 1, "Enable support for MSI-X interrupts"); static int pci_msix_rewrite_table = 0; SYSCTL_INT(_hw_pci, OID_AUTO, msix_rewrite_table, CTLFLAG_RWTUN, &pci_msix_rewrite_table, 0, "Rewrite entire MSI-X table when updating MSI-X entries"); static int pci_honor_msi_blacklist = 1; SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RDTUN, &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI/MSI-X"); #if defined(__i386__) || defined(__amd64__) static int pci_usb_takeover = 1; #else static int pci_usb_takeover = 0; #endif SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RDTUN, &pci_usb_takeover, 1, "Enable early takeover of USB controllers. Disable this if you depend on" " BIOS emulation of USB devices, that is you use USB devices (like" " keyboard or mouse) but do not load USB drivers"); static int pci_clear_bars; SYSCTL_INT(_hw_pci, OID_AUTO, clear_bars, CTLFLAG_RDTUN, &pci_clear_bars, 0, "Ignore firmware-assigned resources for BARs."); #if defined(NEW_PCIB) && defined(PCI_RES_BUS) static int pci_clear_buses; SYSCTL_INT(_hw_pci, OID_AUTO, clear_buses, CTLFLAG_RDTUN, &pci_clear_buses, 0, "Ignore firmware-assigned bus numbers."); #endif static int pci_enable_ari = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_ari, CTLFLAG_RDTUN, &pci_enable_ari, 0, "Enable support for PCIe Alternative RID Interpretation"); int pci_enable_aspm = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_aspm, CTLFLAG_RDTUN, &pci_enable_aspm, 0, "Enable support for PCIe Active State Power Management"); static int pci_clear_aer_on_attach = 0; SYSCTL_INT(_hw_pci, OID_AUTO, clear_aer_on_attach, CTLFLAG_RWTUN, &pci_clear_aer_on_attach, 0, "Clear port and device AER state on driver attach"); static bool pci_enable_mps_tune = true; SYSCTL_BOOL(_hw_pci, OID_AUTO, enable_mps_tune, CTLFLAG_RWTUN, &pci_enable_mps_tune, 1, "Enable tuning of MPS(maximum payload size)." ); static int pci_has_quirk(uint32_t devid, int quirk) { const struct pci_quirk *q; for (q = &pci_quirks[0]; q->devid; q++) { if (q->devid == devid && q->type == quirk) return (1); } return (0); } /* Find a device_t by bus/slot/function in domain 0 */ device_t pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func) { return (pci_find_dbsf(0, bus, slot, func)); } /* Find a device_t by domain/bus/slot/function */ device_t pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func) { struct pci_devinfo *dinfo = NULL; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { if ((dinfo->cfg.domain == domain) && (dinfo->cfg.bus == bus) && (dinfo->cfg.slot == slot) && (dinfo->cfg.func == func)) { break; } } return (dinfo != NULL ? dinfo->cfg.dev : NULL); } /* Find a device_t by vendor/device ID */ device_t pci_find_device(uint16_t vendor, uint16_t device) { struct pci_devinfo *dinfo; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { if ((dinfo->cfg.vendor == vendor) && (dinfo->cfg.device == device)) { return (dinfo->cfg.dev); } } return (NULL); } device_t pci_find_class(uint8_t class, uint8_t subclass) { struct pci_devinfo *dinfo; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { if (dinfo->cfg.baseclass == class && dinfo->cfg.subclass == subclass) { return (dinfo->cfg.dev); } } return (NULL); } device_t pci_find_class_from(uint8_t class, uint8_t subclass, device_t from) { struct pci_devinfo *dinfo; bool found = false; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { if (from != NULL && found == false) { if (from != dinfo->cfg.dev) continue; found = true; continue; } if (dinfo->cfg.baseclass == class && dinfo->cfg.subclass == subclass) { return (dinfo->cfg.dev); } } return (NULL); } static int pci_printf(pcicfgregs *cfg, const char *fmt, ...) { va_list ap; int retval; retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot, cfg->func); va_start(ap, fmt); retval += vprintf(fmt, ap); va_end(ap); return (retval); } /* return base address of memory or port map */ static pci_addr_t pci_mapbase(uint64_t mapreg) { if (PCI_BAR_MEM(mapreg)) return (mapreg & PCIM_BAR_MEM_BASE); else return (mapreg & PCIM_BAR_IO_BASE); } /* return map type of memory or port map */ static const char * pci_maptype(uint64_t mapreg) { if (PCI_BAR_IO(mapreg)) return ("I/O Port"); if (mapreg & PCIM_BAR_MEM_PREFETCH) return ("Prefetchable Memory"); return ("Memory"); } /* return log2 of map size decoded for memory or port map */ int pci_mapsize(uint64_t testval) { int ln2size; testval = pci_mapbase(testval); ln2size = 0; if (testval != 0) { while ((testval & 1) == 0) { ln2size++; testval >>= 1; } } return (ln2size); } /* return base address of device ROM */ static pci_addr_t pci_rombase(uint64_t mapreg) { return (mapreg & PCIM_BIOS_ADDR_MASK); } /* return log2 of map size decided for device ROM */ static int pci_romsize(uint64_t testval) { int ln2size; testval = pci_rombase(testval); ln2size = 0; if (testval != 0) { while ((testval & 1) == 0) { ln2size++; testval >>= 1; } } return (ln2size); } /* return log2 of address range supported by map register */ static int pci_maprange(uint64_t mapreg) { int ln2range = 0; if (PCI_BAR_IO(mapreg)) ln2range = 32; else switch (mapreg & PCIM_BAR_MEM_TYPE) { case PCIM_BAR_MEM_32: ln2range = 32; break; case PCIM_BAR_MEM_1MB: ln2range = 20; break; case PCIM_BAR_MEM_64: ln2range = 64; break; } return (ln2range); } /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */ static void pci_fixancient(pcicfgregs *cfg) { if ((cfg->hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL) return; /* PCI to PCI bridges use header type 1 */ if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI) cfg->hdrtype = PCIM_HDRTYPE_BRIDGE; } /* extract header type specific config data */ static void pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg) { #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w) switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: cfg->subvendor = REG(PCIR_SUBVEND_0, 2); cfg->subdevice = REG(PCIR_SUBDEV_0, 2); cfg->mingnt = REG(PCIR_MINGNT, 1); cfg->maxlat = REG(PCIR_MAXLAT, 1); cfg->nummaps = PCI_MAXMAPS_0; break; case PCIM_HDRTYPE_BRIDGE: cfg->bridge.br_seclat = REG(PCIR_SECLAT_1, 1); cfg->bridge.br_subbus = REG(PCIR_SUBBUS_1, 1); cfg->bridge.br_secbus = REG(PCIR_SECBUS_1, 1); cfg->bridge.br_pribus = REG(PCIR_PRIBUS_1, 1); cfg->bridge.br_control = REG(PCIR_BRIDGECTL_1, 2); cfg->nummaps = PCI_MAXMAPS_1; break; case PCIM_HDRTYPE_CARDBUS: cfg->bridge.br_seclat = REG(PCIR_SECLAT_2, 1); cfg->bridge.br_subbus = REG(PCIR_SUBBUS_2, 1); cfg->bridge.br_secbus = REG(PCIR_SECBUS_2, 1); cfg->bridge.br_pribus = REG(PCIR_PRIBUS_2, 1); cfg->bridge.br_control = REG(PCIR_BRIDGECTL_2, 2); cfg->subvendor = REG(PCIR_SUBVEND_2, 2); cfg->subdevice = REG(PCIR_SUBDEV_2, 2); cfg->nummaps = PCI_MAXMAPS_2; break; } #undef REG } /* read configuration header into pcicfgregs structure */ struct pci_devinfo * pci_read_device(device_t pcib, device_t bus, int d, int b, int s, int f) { #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w) uint16_t vid, did; vid = REG(PCIR_VENDOR, 2); if (vid == PCIV_INVALID) return (NULL); did = REG(PCIR_DEVICE, 2); return (pci_fill_devinfo(pcib, bus, d, b, s, f, vid, did)); } struct pci_devinfo * pci_alloc_devinfo_method(device_t dev) { return (malloc(sizeof(struct pci_devinfo), M_DEVBUF, M_WAITOK | M_ZERO)); } static struct pci_devinfo * pci_fill_devinfo(device_t pcib, device_t bus, int d, int b, int s, int f, uint16_t vid, uint16_t did) { struct pci_devinfo *devlist_entry; pcicfgregs *cfg; devlist_entry = PCI_ALLOC_DEVINFO(bus); cfg = &devlist_entry->cfg; cfg->domain = d; cfg->bus = b; cfg->slot = s; cfg->func = f; cfg->vendor = vid; cfg->device = did; cfg->cmdreg = REG(PCIR_COMMAND, 2); cfg->statreg = REG(PCIR_STATUS, 2); cfg->baseclass = REG(PCIR_CLASS, 1); cfg->subclass = REG(PCIR_SUBCLASS, 1); cfg->progif = REG(PCIR_PROGIF, 1); cfg->revid = REG(PCIR_REVID, 1); cfg->hdrtype = REG(PCIR_HDRTYPE, 1); cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1); cfg->lattimer = REG(PCIR_LATTIMER, 1); cfg->intpin = REG(PCIR_INTPIN, 1); cfg->intline = REG(PCIR_INTLINE, 1); cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0; cfg->hdrtype &= ~PCIM_MFDEV; STAILQ_INIT(&cfg->maps); cfg->iov = NULL; pci_fixancient(cfg); pci_hdrtypedata(pcib, b, s, f, cfg); if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT) pci_read_cap(pcib, cfg); STAILQ_INSERT_TAIL(&pci_devq, devlist_entry, pci_links); devlist_entry->conf.pc_sel.pc_domain = cfg->domain; devlist_entry->conf.pc_sel.pc_bus = cfg->bus; devlist_entry->conf.pc_sel.pc_dev = cfg->slot; devlist_entry->conf.pc_sel.pc_func = cfg->func; devlist_entry->conf.pc_hdr = cfg->hdrtype; devlist_entry->conf.pc_subvendor = cfg->subvendor; devlist_entry->conf.pc_subdevice = cfg->subdevice; devlist_entry->conf.pc_vendor = cfg->vendor; devlist_entry->conf.pc_device = cfg->device; devlist_entry->conf.pc_class = cfg->baseclass; devlist_entry->conf.pc_subclass = cfg->subclass; devlist_entry->conf.pc_progif = cfg->progif; devlist_entry->conf.pc_revid = cfg->revid; pci_numdevs++; pci_generation++; return (devlist_entry); } #undef REG static void pci_ea_fill_info(device_t pcib, pcicfgregs *cfg) { #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, \ cfg->ea.ea_location + (n), w) int num_ent; int ptr; int a, b; uint32_t val; int ent_size; uint32_t dw[4]; uint64_t base, max_offset; struct pci_ea_entry *eae; if (cfg->ea.ea_location == 0) return; STAILQ_INIT(&cfg->ea.ea_entries); /* Determine the number of entries */ num_ent = REG(PCIR_EA_NUM_ENT, 2); num_ent &= PCIM_EA_NUM_ENT_MASK; /* Find the first entry to care of */ ptr = PCIR_EA_FIRST_ENT; /* Skip DWORD 2 for type 1 functions */ if ((cfg->hdrtype & PCIM_HDRTYPE) == PCIM_HDRTYPE_BRIDGE) ptr += 4; for (a = 0; a < num_ent; a++) { eae = malloc(sizeof(*eae), M_DEVBUF, M_WAITOK | M_ZERO); eae->eae_cfg_offset = cfg->ea.ea_location + ptr; /* Read a number of dwords in the entry */ val = REG(ptr, 4); ptr += 4; ent_size = (val & PCIM_EA_ES); for (b = 0; b < ent_size; b++) { dw[b] = REG(ptr, 4); ptr += 4; } eae->eae_flags = val; eae->eae_bei = (PCIM_EA_BEI & val) >> PCIM_EA_BEI_OFFSET; base = dw[0] & PCIM_EA_FIELD_MASK; max_offset = dw[1] | ~PCIM_EA_FIELD_MASK; b = 2; if (((dw[0] & PCIM_EA_IS_64) != 0) && (b < ent_size)) { base |= (uint64_t)dw[b] << 32UL; b++; } if (((dw[1] & PCIM_EA_IS_64) != 0) && (b < ent_size)) { max_offset |= (uint64_t)dw[b] << 32UL; b++; } eae->eae_base = base; eae->eae_max_offset = max_offset; STAILQ_INSERT_TAIL(&cfg->ea.ea_entries, eae, eae_link); if (bootverbose) { printf("PCI(EA) dev %04x:%04x, bei %d, flags #%x, base #%jx, max_offset #%jx\n", cfg->vendor, cfg->device, eae->eae_bei, eae->eae_flags, (uintmax_t)eae->eae_base, (uintmax_t)eae->eae_max_offset); } } } #undef REG static void pci_read_cap(device_t pcib, pcicfgregs *cfg) { #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w) #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w) #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) uint64_t addr; #endif uint32_t val; int ptr, nextptr, ptrptr; switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: case PCIM_HDRTYPE_BRIDGE: ptrptr = PCIR_CAP_PTR; break; case PCIM_HDRTYPE_CARDBUS: ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */ break; default: return; /* no extended capabilities support */ } nextptr = REG(ptrptr, 1); /* sanity check? */ /* * Read capability entries. */ while (nextptr != 0) { /* Sanity check */ if (nextptr > 255) { printf("illegal PCI extended capability offset %d\n", nextptr); return; } /* Find the next entry */ ptr = nextptr; nextptr = REG(ptr + PCICAP_NEXTPTR, 1); /* Process this entry */ switch (REG(ptr + PCICAP_ID, 1)) { case PCIY_PMG: /* PCI power management */ if (cfg->pp.pp_cap == 0) { cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2); cfg->pp.pp_status = ptr + PCIR_POWER_STATUS; cfg->pp.pp_bse = ptr + PCIR_POWER_BSE; if ((nextptr - ptr) > PCIR_POWER_DATA) cfg->pp.pp_data = ptr + PCIR_POWER_DATA; } break; case PCIY_HT: /* HyperTransport */ /* Determine HT-specific capability type. */ val = REG(ptr + PCIR_HT_COMMAND, 2); if ((val & 0xe000) == PCIM_HTCAP_SLAVE) cfg->ht.ht_slave = ptr; #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) switch (val & PCIM_HTCMD_CAP_MASK) { case PCIM_HTCAP_MSI_MAPPING: if (!(val & PCIM_HTCMD_MSI_FIXED)) { /* Sanity check the mapping window. */ addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI, 4); addr <<= 32; addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO, 4); if (addr != MSI_INTEL_ADDR_BASE) device_printf(pcib, "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n", cfg->domain, cfg->bus, cfg->slot, cfg->func, (long long)addr); } else addr = MSI_INTEL_ADDR_BASE; cfg->ht.ht_msimap = ptr; cfg->ht.ht_msictrl = val; cfg->ht.ht_msiaddr = addr; break; } #endif break; case PCIY_MSI: /* PCI MSI */ cfg->msi.msi_location = ptr; cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2); cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl & PCIM_MSICTRL_MMC_MASK)>>1); break; case PCIY_MSIX: /* PCI MSI-X */ cfg->msix.msix_location = ptr; cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2); cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl & PCIM_MSIXCTRL_TABLE_SIZE) + 1; val = REG(ptr + PCIR_MSIX_TABLE, 4); cfg->msix.msix_table_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK); cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK; val = REG(ptr + PCIR_MSIX_PBA, 4); cfg->msix.msix_pba_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK); cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK; break; case PCIY_VPD: /* PCI Vital Product Data */ cfg->vpd.vpd_reg = ptr; break; case PCIY_SUBVENDOR: /* Should always be true. */ if ((cfg->hdrtype & PCIM_HDRTYPE) == PCIM_HDRTYPE_BRIDGE) { val = REG(ptr + PCIR_SUBVENDCAP_ID, 4); cfg->subvendor = val & 0xffff; cfg->subdevice = val >> 16; } break; case PCIY_PCIX: /* PCI-X */ /* * Assume we have a PCI-X chipset if we have * at least one PCI-PCI bridge with a PCI-X * capability. Note that some systems with * PCI-express or HT chipsets might match on * this check as well. */ if ((cfg->hdrtype & PCIM_HDRTYPE) == PCIM_HDRTYPE_BRIDGE) pcix_chipset = 1; cfg->pcix.pcix_location = ptr; break; case PCIY_EXPRESS: /* PCI-express */ /* * Assume we have a PCI-express chipset if we have * at least one PCI-express device. */ pcie_chipset = 1; cfg->pcie.pcie_location = ptr; val = REG(ptr + PCIER_FLAGS, 2); cfg->pcie.pcie_type = val & PCIEM_FLAGS_TYPE; break; case PCIY_EA: /* Enhanced Allocation */ cfg->ea.ea_location = ptr; pci_ea_fill_info(pcib, cfg); break; default: break; } } #if defined(__powerpc__) /* * Enable the MSI mapping window for all HyperTransport * slaves. PCI-PCI bridges have their windows enabled via * PCIB_MAP_MSI(). */ if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 && !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) { device_printf(pcib, "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n", cfg->domain, cfg->bus, cfg->slot, cfg->func); cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE; WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl, 2); } #endif /* REG and WREG use carry through to next functions */ } /* * PCI Vital Product Data */ #define PCI_VPD_TIMEOUT 1000000 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data) { int count = PCI_VPD_TIMEOUT; KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned")); WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2); while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) { if (--count < 0) return (ENXIO); DELAY(1); /* limit looping */ } *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4)); return (0); } #if 0 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data) { int count = PCI_VPD_TIMEOUT; KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned")); WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4); WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2); while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) { if (--count < 0) return (ENXIO); DELAY(1); /* limit looping */ } return (0); } #endif #undef PCI_VPD_TIMEOUT struct vpd_readstate { device_t pcib; pcicfgregs *cfg; uint32_t val; int bytesinval; int off; uint8_t cksum; }; /* return 0 and one byte in *data if no read error, -1 else */ static int vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data) { uint32_t reg; uint8_t byte; if (vrs->bytesinval == 0) { if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®)) return (-1); vrs->val = le32toh(reg); vrs->off += 4; byte = vrs->val & 0xff; vrs->bytesinval = 3; } else { vrs->val = vrs->val >> 8; byte = vrs->val & 0xff; vrs->bytesinval--; } vrs->cksum += byte; *data = byte; return (0); } /* return 0 on match, -1 and "unget" byte on no match */ static int vpd_expectbyte(struct vpd_readstate *vrs, uint8_t expected) { uint8_t data; if (vpd_nextbyte(vrs, &data) != 0) return (-1); if (data == expected) return (0); vrs->cksum -= data; vrs->val = (vrs->val << 8) + data; vrs->bytesinval++; return (-1); } /* return size if tag matches, -1 on no match, -2 on read error */ static int vpd_read_tag_size(struct vpd_readstate *vrs, uint8_t vpd_tag) { uint8_t byte1, byte2; if (vpd_expectbyte(vrs, vpd_tag) != 0) return (-1); if ((vpd_tag & 0x80) == 0) return (vpd_tag & 0x07); if (vpd_nextbyte(vrs, &byte1) != 0) return (-2); if (vpd_nextbyte(vrs, &byte2) != 0) return (-2); return ((byte2 << 8) + byte1); } /* (re)allocate buffer in multiples of 8 elements */ static void* alloc_buffer(void* buffer, size_t element_size, int needed) { int alloc, new_alloc; alloc = roundup2(needed, 8); new_alloc = roundup2(needed + 1, 8); if (alloc != new_alloc) { buffer = reallocf(buffer, new_alloc * element_size, M_DEVBUF, M_WAITOK | M_ZERO); } return (buffer); } /* read VPD keyword and return element size, return -1 on read error */ static int vpd_read_elem_head(struct vpd_readstate *vrs, char keyword[2]) { uint8_t data; if (vpd_nextbyte(vrs, &keyword[0]) != 0) return (-1); if (vpd_nextbyte(vrs, &keyword[1]) != 0) return (-1); if (vpd_nextbyte(vrs, &data) != 0) return (-1); return (data); } /* read VPD data element of given size into allocated buffer */ static char * vpd_read_value(struct vpd_readstate *vrs, int size) { int i; char char1; char *value; value = malloc(size + 1, M_DEVBUF, M_WAITOK); for (i = 0; i < size; i++) { if (vpd_nextbyte(vrs, &char1) != 0) { free(value, M_DEVBUF); return (NULL); } value[i] = char1; } value[size] = '\0'; return (value); } /* read VPD into *keyword and *value, return length of data element */ static int vpd_read_elem_data(struct vpd_readstate *vrs, char keyword[2], char **value, int maxlen) { int len; len = vpd_read_elem_head(vrs, keyword); if (len > maxlen) return (-1); *value = vpd_read_value(vrs, len); return (len); } /* subtract all data following first byte from checksum of RV element */ static void vpd_fixup_cksum(struct vpd_readstate *vrs, char *rvstring, int len) { int i; uint8_t fixup; fixup = 0; for (i = 1; i < len; i++) fixup += rvstring[i]; vrs->cksum -= fixup; } /* fetch one read-only element and return size of heading + data */ static size_t next_vpd_ro_elem(struct vpd_readstate *vrs, int maxsize) { struct pcicfg_vpd *vpd; pcicfgregs *cfg; struct vpd_readonly *vpd_ros; int len; cfg = vrs->cfg; vpd = &cfg->vpd; if (maxsize < 3) return (-1); vpd->vpd_ros = alloc_buffer(vpd->vpd_ros, sizeof(*vpd->vpd_ros), vpd->vpd_rocnt); vpd_ros = &vpd->vpd_ros[vpd->vpd_rocnt]; maxsize -= 3; len = vpd_read_elem_data(vrs, vpd_ros->keyword, &vpd_ros->value, maxsize); if (vpd_ros->value == NULL) return (-1); vpd_ros->len = len; if (vpd_ros->keyword[0] == 'R' && vpd_ros->keyword[1] == 'V') { vpd_fixup_cksum(vrs, vpd_ros->value, len); if (vrs->cksum != 0) { pci_printf(cfg, "invalid VPD checksum %#hhx\n", vrs->cksum); return (-1); } } vpd->vpd_rocnt++; return (len + 3); } /* fetch one writable element and return size of heading + data */ static size_t next_vpd_rw_elem(struct vpd_readstate *vrs, int maxsize) { struct pcicfg_vpd *vpd; pcicfgregs *cfg; struct vpd_write *vpd_w; int len; cfg = vrs->cfg; vpd = &cfg->vpd; if (maxsize < 3) return (-1); vpd->vpd_w = alloc_buffer(vpd->vpd_w, sizeof(*vpd->vpd_w), vpd->vpd_wcnt); if (vpd->vpd_w == NULL) { pci_printf(cfg, "out of memory"); return (-1); } vpd_w = &vpd->vpd_w[vpd->vpd_wcnt]; maxsize -= 3; vpd_w->start = vrs->off + 3 - vrs->bytesinval; len = vpd_read_elem_data(vrs, vpd_w->keyword, &vpd_w->value, maxsize); if (vpd_w->value == NULL) return (-1); vpd_w->len = len; vpd->vpd_wcnt++; return (len + 3); } /* free all memory allocated for VPD data */ static void vpd_free(struct pcicfg_vpd *vpd) { int i; free(vpd->vpd_ident, M_DEVBUF); for (i = 0; i < vpd->vpd_rocnt; i++) free(vpd->vpd_ros[i].value, M_DEVBUF); free(vpd->vpd_ros, M_DEVBUF); vpd->vpd_rocnt = 0; for (i = 0; i < vpd->vpd_wcnt; i++) free(vpd->vpd_w[i].value, M_DEVBUF); free(vpd->vpd_w, M_DEVBUF); vpd->vpd_wcnt = 0; } #define VPD_TAG_END ((0x0f << 3) | 0) /* small tag, len == 0 */ #define VPD_TAG_IDENT (0x02 | 0x80) /* large tag */ #define VPD_TAG_RO (0x10 | 0x80) /* large tag */ #define VPD_TAG_RW (0x11 | 0x80) /* large tag */ static int pci_parse_vpd(device_t pcib, pcicfgregs *cfg) { struct vpd_readstate vrs; int cksumvalid; int size, elem_size; /* init vpd reader */ vrs.bytesinval = 0; vrs.off = 0; vrs.pcib = pcib; vrs.cfg = cfg; vrs.cksum = 0; /* read VPD ident element - mandatory */ size = vpd_read_tag_size(&vrs, VPD_TAG_IDENT); if (size <= 0) { pci_printf(cfg, "no VPD ident found\n"); return (0); } cfg->vpd.vpd_ident = vpd_read_value(&vrs, size); if (cfg->vpd.vpd_ident == NULL) { pci_printf(cfg, "error accessing VPD ident data\n"); return (0); } /* read VPD RO elements - mandatory */ size = vpd_read_tag_size(&vrs, VPD_TAG_RO); if (size <= 0) { pci_printf(cfg, "no read-only VPD data found\n"); return (0); } while (size > 0) { elem_size = next_vpd_ro_elem(&vrs, size); if (elem_size < 0) { pci_printf(cfg, "error accessing read-only VPD data\n"); return (-1); } size -= elem_size; } cksumvalid = (vrs.cksum == 0); if (!cksumvalid) return (-1); /* read VPD RW elements - optional */ size = vpd_read_tag_size(&vrs, VPD_TAG_RW); if (size == -2) return (-1); while (size > 0) { elem_size = next_vpd_rw_elem(&vrs, size); if (elem_size < 0) { pci_printf(cfg, "error accessing writeable VPD data\n"); return (-1); } size -= elem_size; } /* read empty END tag - mandatory */ size = vpd_read_tag_size(&vrs, VPD_TAG_END); if (size != 0) { pci_printf(cfg, "No valid VPD end tag found\n"); } return (0); } static void pci_read_vpd(device_t pcib, pcicfgregs *cfg) { int status; status = pci_parse_vpd(pcib, cfg); if (status < 0) vpd_free(&cfg->vpd); cfg->vpd.vpd_cached = 1; #undef REG #undef WREG } int pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) pci_read_vpd(device_get_parent(dev), cfg); *identptr = cfg->vpd.vpd_ident; if (*identptr == NULL) return (ENXIO); return (0); } int pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw, const char **vptr) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; int i; if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) pci_read_vpd(device_get_parent(dev), cfg); for (i = 0; i < cfg->vpd.vpd_rocnt; i++) if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword, sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) { *vptr = cfg->vpd.vpd_ros[i].value; return (0); } *vptr = NULL; return (ENXIO); } struct pcicfg_vpd * pci_fetch_vpd_list(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); pcicfgregs *cfg = &dinfo->cfg; if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) pci_read_vpd(device_get_parent(device_get_parent(dev)), cfg); return (&cfg->vpd); } /* * Find the requested HyperTransport capability and return the offset * in configuration space via the pointer provided. The function * returns 0 on success and an error code otherwise. */ int pci_find_htcap_method(device_t dev, device_t child, int capability, int *capreg) { int ptr, error; uint16_t val; error = pci_find_cap(child, PCIY_HT, &ptr); if (error) return (error); /* * Traverse the capabilities list checking each HT capability * to see if it matches the requested HT capability. */ for (;;) { val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2); if (capability == PCIM_HTCAP_SLAVE || capability == PCIM_HTCAP_HOST) val &= 0xe000; else val &= PCIM_HTCMD_CAP_MASK; if (val == capability) { if (capreg != NULL) *capreg = ptr; return (0); } /* Skip to the next HT capability. */ if (pci_find_next_cap(child, PCIY_HT, ptr, &ptr) != 0) break; } return (ENOENT); } /* * Find the next requested HyperTransport capability after start and return * the offset in configuration space via the pointer provided. The function * returns 0 on success and an error code otherwise. */ int pci_find_next_htcap_method(device_t dev, device_t child, int capability, int start, int *capreg) { int ptr; uint16_t val; KASSERT(pci_read_config(child, start + PCICAP_ID, 1) == PCIY_HT, ("start capability is not HyperTransport capability")); ptr = start; /* * Traverse the capabilities list checking each HT capability * to see if it matches the requested HT capability. */ for (;;) { /* Skip to the next HT capability. */ if (pci_find_next_cap(child, PCIY_HT, ptr, &ptr) != 0) break; val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2); if (capability == PCIM_HTCAP_SLAVE || capability == PCIM_HTCAP_HOST) val &= 0xe000; else val &= PCIM_HTCMD_CAP_MASK; if (val == capability) { if (capreg != NULL) *capreg = ptr; return (0); } } return (ENOENT); } /* * Find the requested capability and return the offset in * configuration space via the pointer provided. The function returns * 0 on success and an error code otherwise. */ int pci_find_cap_method(device_t dev, device_t child, int capability, int *capreg) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint32_t status; uint8_t ptr; /* * Check the CAP_LIST bit of the PCI status register first. */ status = pci_read_config(child, PCIR_STATUS, 2); if (!(status & PCIM_STATUS_CAPPRESENT)) return (ENXIO); /* * Determine the start pointer of the capabilities list. */ switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: case PCIM_HDRTYPE_BRIDGE: ptr = PCIR_CAP_PTR; break; case PCIM_HDRTYPE_CARDBUS: ptr = PCIR_CAP_PTR_2; break; default: /* XXX: panic? */ return (ENXIO); /* no extended capabilities support */ } ptr = pci_read_config(child, ptr, 1); /* * Traverse the capabilities list. */ while (ptr != 0) { if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) { if (capreg != NULL) *capreg = ptr; return (0); } ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1); } return (ENOENT); } /* * Find the next requested capability after start and return the offset in * configuration space via the pointer provided. The function returns * 0 on success and an error code otherwise. */ int pci_find_next_cap_method(device_t dev, device_t child, int capability, int start, int *capreg) { uint8_t ptr; KASSERT(pci_read_config(child, start + PCICAP_ID, 1) == capability, ("start capability is not expected capability")); ptr = pci_read_config(child, start + PCICAP_NEXTPTR, 1); while (ptr != 0) { if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) { if (capreg != NULL) *capreg = ptr; return (0); } ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1); } return (ENOENT); } /* * Find the requested extended capability and return the offset in * configuration space via the pointer provided. The function returns * 0 on success and an error code otherwise. */ int pci_find_extcap_method(device_t dev, device_t child, int capability, int *capreg) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint32_t ecap; uint16_t ptr; /* Only supported for PCI-express devices. */ if (cfg->pcie.pcie_location == 0) return (ENXIO); ptr = PCIR_EXTCAP; ecap = pci_read_config(child, ptr, 4); if (ecap == 0xffffffff || ecap == 0) return (ENOENT); for (;;) { if (PCI_EXTCAP_ID(ecap) == capability) { if (capreg != NULL) *capreg = ptr; return (0); } ptr = PCI_EXTCAP_NEXTPTR(ecap); if (ptr == 0) break; ecap = pci_read_config(child, ptr, 4); } return (ENOENT); } /* * Find the next requested extended capability after start and return the * offset in configuration space via the pointer provided. The function * returns 0 on success and an error code otherwise. */ int pci_find_next_extcap_method(device_t dev, device_t child, int capability, int start, int *capreg) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint32_t ecap; uint16_t ptr; /* Only supported for PCI-express devices. */ if (cfg->pcie.pcie_location == 0) return (ENXIO); ecap = pci_read_config(child, start, 4); KASSERT(PCI_EXTCAP_ID(ecap) == capability, ("start extended capability is not expected capability")); ptr = PCI_EXTCAP_NEXTPTR(ecap); while (ptr != 0) { ecap = pci_read_config(child, ptr, 4); if (PCI_EXTCAP_ID(ecap) == capability) { if (capreg != NULL) *capreg = ptr; return (0); } ptr = PCI_EXTCAP_NEXTPTR(ecap); } return (ENOENT); } /* * Support for MSI-X message interrupts. */ static void pci_write_msix_entry(device_t dev, u_int index, uint64_t address, uint32_t data) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; uint32_t offset; KASSERT(msix->msix_table_len > index, ("bogus index")); offset = msix->msix_table_offset + index * 16; bus_write_4(msix->msix_table_res, offset, address & 0xffffffff); bus_write_4(msix->msix_table_res, offset + 4, address >> 32); bus_write_4(msix->msix_table_res, offset + 8, data); } void pci_enable_msix_method(device_t dev, device_t child, u_int index, uint64_t address, uint32_t data) { if (pci_msix_rewrite_table) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; /* * Some VM hosts require MSIX to be disabled in the * control register before updating the MSIX table * entries are allowed. It is not enough to only * disable MSIX while updating a single entry. MSIX * must be disabled while updating all entries in the * table. */ pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL, msix->msix_ctrl & ~PCIM_MSIXCTRL_MSIX_ENABLE, 2); pci_resume_msix(child); } else pci_write_msix_entry(child, index, address, data); /* Enable MSI -> HT mapping. */ pci_ht_map_msi(child, address); } void pci_mask_msix(device_t dev, u_int index) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; uint32_t offset, val; KASSERT(msix->msix_msgnum > index, ("bogus index")); offset = msix->msix_table_offset + index * 16 + 12; val = bus_read_4(msix->msix_table_res, offset); val |= PCIM_MSIX_VCTRL_MASK; /* * Some devices (e.g. Samsung PM961) do not support reads of this * register, so always write the new value. */ bus_write_4(msix->msix_table_res, offset, val); } void pci_unmask_msix(device_t dev, u_int index) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; uint32_t offset, val; KASSERT(msix->msix_table_len > index, ("bogus index")); offset = msix->msix_table_offset + index * 16 + 12; val = bus_read_4(msix->msix_table_res, offset); val &= ~PCIM_MSIX_VCTRL_MASK; /* * Some devices (e.g. Samsung PM961) do not support reads of this * register, so always write the new value. */ bus_write_4(msix->msix_table_res, offset, val); } int pci_pending_msix(device_t dev, u_int index) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; uint32_t offset, bit; KASSERT(msix->msix_table_len > index, ("bogus index")); offset = msix->msix_pba_offset + (index / 32) * 4; bit = 1 << index % 32; return (bus_read_4(msix->msix_pba_res, offset) & bit); } /* * Restore MSI-X registers and table during resume. If MSI-X is * enabled then walk the virtual table to restore the actual MSI-X * table. */ static void pci_resume_msix(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; struct msix_table_entry *mte; struct msix_vector *mv; int i; if (msix->msix_alloc > 0) { /* First, mask all vectors. */ for (i = 0; i < msix->msix_msgnum; i++) pci_mask_msix(dev, i); /* Second, program any messages with at least one handler. */ for (i = 0; i < msix->msix_table_len; i++) { mte = &msix->msix_table[i]; if (mte->mte_vector == 0 || mte->mte_handlers == 0) continue; mv = &msix->msix_vectors[mte->mte_vector - 1]; pci_write_msix_entry(dev, i, mv->mv_address, mv->mv_data); pci_unmask_msix(dev, i); } } pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL, msix->msix_ctrl, 2); } /* * Attempt to allocate *count MSI-X messages. The actual number allocated is * returned in *count. After this function returns, each message will be * available to the driver as SYS_RES_IRQ resources starting at rid 1. */ int pci_alloc_msix_method(device_t dev, device_t child, int *count) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; struct resource_list_entry *rle; int actual, error, i, irq, max; /* Don't let count == 0 get us into trouble. */ if (*count == 0) return (EINVAL); /* If rid 0 is allocated, then fail. */ rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); if (rle != NULL && rle->res != NULL) return (ENXIO); /* Already have allocated messages? */ if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0) return (ENXIO); /* If MSI-X is blacklisted for this system, fail. */ if (pci_msix_blacklisted()) return (ENXIO); /* MSI-X capability present? */ if (cfg->msix.msix_location == 0 || !pci_do_msix) return (ENODEV); /* Make sure the appropriate BARs are mapped. */ rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, cfg->msix.msix_table_bar); if (rle == NULL || rle->res == NULL || !(rman_get_flags(rle->res) & RF_ACTIVE)) return (ENXIO); cfg->msix.msix_table_res = rle->res; if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) { rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, cfg->msix.msix_pba_bar); if (rle == NULL || rle->res == NULL || !(rman_get_flags(rle->res) & RF_ACTIVE)) return (ENXIO); } cfg->msix.msix_pba_res = rle->res; if (bootverbose) device_printf(child, "attempting to allocate %d MSI-X vectors (%d supported)\n", *count, cfg->msix.msix_msgnum); max = min(*count, cfg->msix.msix_msgnum); for (i = 0; i < max; i++) { /* Allocate a message. */ error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq); if (error) { if (i == 0) return (error); break; } resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq, irq, 1); } actual = i; if (bootverbose) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1); if (actual == 1) device_printf(child, "using IRQ %ju for MSI-X\n", rle->start); else { int run; /* * Be fancy and try to print contiguous runs of * IRQ values as ranges. 'irq' is the previous IRQ. * 'run' is true if we are in a range. */ device_printf(child, "using IRQs %ju", rle->start); irq = rle->start; run = 0; for (i = 1; i < actual; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); /* Still in a run? */ if (rle->start == irq + 1) { run = 1; irq++; continue; } /* Finish previous range. */ if (run) { printf("-%d", irq); run = 0; } /* Start new range. */ printf(",%ju", rle->start); irq = rle->start; } /* Unfinished range? */ if (run) printf("-%d", irq); printf(" for MSI-X\n"); } } /* Mask all vectors. */ for (i = 0; i < cfg->msix.msix_msgnum; i++) pci_mask_msix(child, i); /* Allocate and initialize vector data and virtual table. */ cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual, M_DEVBUF, M_WAITOK | M_ZERO); cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual, M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < actual; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); cfg->msix.msix_vectors[i].mv_irq = rle->start; cfg->msix.msix_table[i].mte_vector = i + 1; } /* Update control register to enable MSI-X. */ cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL, cfg->msix.msix_ctrl, 2); /* Update counts of alloc'd messages. */ cfg->msix.msix_alloc = actual; cfg->msix.msix_table_len = actual; *count = actual; return (0); } /* * By default, pci_alloc_msix() will assign the allocated IRQ * resources consecutively to the first N messages in the MSI-X table. * However, device drivers may want to use different layouts if they * either receive fewer messages than they asked for, or they wish to * populate the MSI-X table sparsely. This method allows the driver * to specify what layout it wants. It must be called after a * successful pci_alloc_msix() but before any of the associated * SYS_RES_IRQ resources are allocated via bus_alloc_resource(). * * The 'vectors' array contains 'count' message vectors. The array * maps directly to the MSI-X table in that index 0 in the array * specifies the vector for the first message in the MSI-X table, etc. * The vector value in each array index can either be 0 to indicate * that no vector should be assigned to a message slot, or it can be a * number from 1 to N (where N is the count returned from a * succcessful call to pci_alloc_msix()) to indicate which message * vector (IRQ) to be used for the corresponding message. * * On successful return, each message with a non-zero vector will have * an associated SYS_RES_IRQ whose rid is equal to the array index + * 1. Additionally, if any of the IRQs allocated via the previous * call to pci_alloc_msix() are not used in the mapping, those IRQs * will be freed back to the system automatically. * * For example, suppose a driver has a MSI-X table with 6 messages and * asks for 6 messages, but pci_alloc_msix() only returns a count of * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and * C. After the call to pci_alloc_msix(), the device will be setup to * have an MSI-X table of ABC--- (where - means no vector assigned). * If the driver then passes a vector array of { 1, 0, 1, 2, 0, 2 }, * then the MSI-X table will look like A-AB-B, and the 'C' vector will * be freed back to the system. This device will also have valid * SYS_RES_IRQ rids of 1, 3, 4, and 6. * * In any case, the SYS_RES_IRQ rid X will always map to the message * at MSI-X table index X - 1 and will only be valid if a vector is * assigned to that table entry. */ int pci_remap_msix_method(device_t dev, device_t child, int count, const u_int *vectors) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; struct resource_list_entry *rle; int i, irq, j, *used; /* * Have to have at least one message in the table but the * table can't be bigger than the actual MSI-X table in the * device. */ if (count == 0 || count > msix->msix_msgnum) return (EINVAL); /* Sanity check the vectors. */ for (i = 0; i < count; i++) if (vectors[i] > msix->msix_alloc) return (EINVAL); /* * Make sure there aren't any holes in the vectors to be used. * It's a big pain to support it, and it doesn't really make * sense anyway. Also, at least one vector must be used. */ used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < count; i++) if (vectors[i] != 0) used[vectors[i] - 1] = 1; for (i = 0; i < msix->msix_alloc - 1; i++) if (used[i] == 0 && used[i + 1] == 1) { free(used, M_DEVBUF); return (EINVAL); } if (used[0] != 1) { free(used, M_DEVBUF); return (EINVAL); } /* Make sure none of the resources are allocated. */ for (i = 0; i < msix->msix_table_len; i++) { if (msix->msix_table[i].mte_vector == 0) continue; if (msix->msix_table[i].mte_handlers > 0) { free(used, M_DEVBUF); return (EBUSY); } rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); KASSERT(rle != NULL, ("missing resource")); if (rle->res != NULL) { free(used, M_DEVBUF); return (EBUSY); } } /* Free the existing resource list entries. */ for (i = 0; i < msix->msix_table_len; i++) { if (msix->msix_table[i].mte_vector == 0) continue; resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); } /* * Build the new virtual table keeping track of which vectors are * used. */ free(msix->msix_table, M_DEVBUF); msix->msix_table = malloc(sizeof(struct msix_table_entry) * count, M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < count; i++) msix->msix_table[i].mte_vector = vectors[i]; msix->msix_table_len = count; /* Free any unused IRQs and resize the vectors array if necessary. */ j = msix->msix_alloc - 1; if (used[j] == 0) { struct msix_vector *vec; while (used[j] == 0) { PCIB_RELEASE_MSIX(device_get_parent(dev), child, msix->msix_vectors[j].mv_irq); j--; } vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF, M_WAITOK); bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) * (j + 1)); free(msix->msix_vectors, M_DEVBUF); msix->msix_vectors = vec; msix->msix_alloc = j + 1; } free(used, M_DEVBUF); /* Map the IRQs onto the rids. */ for (i = 0; i < count; i++) { if (vectors[i] == 0) continue; irq = msix->msix_vectors[vectors[i] - 1].mv_irq; resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq, irq, 1); } if (bootverbose) { device_printf(child, "Remapped MSI-X IRQs as: "); for (i = 0; i < count; i++) { if (i != 0) printf(", "); if (vectors[i] == 0) printf("---"); else printf("%d", msix->msix_vectors[vectors[i] - 1].mv_irq); } printf("\n"); } return (0); } static int pci_release_msix(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; struct resource_list_entry *rle; int i; /* Do we have any messages to release? */ if (msix->msix_alloc == 0) return (ENODEV); /* Make sure none of the resources are allocated. */ for (i = 0; i < msix->msix_table_len; i++) { if (msix->msix_table[i].mte_vector == 0) continue; if (msix->msix_table[i].mte_handlers > 0) return (EBUSY); rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); KASSERT(rle != NULL, ("missing resource")); if (rle->res != NULL) return (EBUSY); } /* Update control register to disable MSI-X. */ msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE; pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL, msix->msix_ctrl, 2); /* Free the resource list entries. */ for (i = 0; i < msix->msix_table_len; i++) { if (msix->msix_table[i].mte_vector == 0) continue; resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); } free(msix->msix_table, M_DEVBUF); msix->msix_table_len = 0; /* Release the IRQs. */ for (i = 0; i < msix->msix_alloc; i++) PCIB_RELEASE_MSIX(device_get_parent(dev), child, msix->msix_vectors[i].mv_irq); free(msix->msix_vectors, M_DEVBUF); msix->msix_alloc = 0; return (0); } /* * Return the max supported MSI-X messages this device supports. * Basically, assuming the MD code can alloc messages, this function * should return the maximum value that pci_alloc_msix() can return. * Thus, it is subject to the tunables, etc. */ int pci_msix_count_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; if (pci_do_msix && msix->msix_location != 0) return (msix->msix_msgnum); return (0); } int pci_msix_pba_bar_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; if (pci_do_msix && msix->msix_location != 0) return (msix->msix_pba_bar); return (-1); } int pci_msix_table_bar_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; if (pci_do_msix && msix->msix_location != 0) return (msix->msix_table_bar); return (-1); } /* * HyperTransport MSI mapping control */ void pci_ht_map_msi(device_t dev, uint64_t addr) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_ht *ht = &dinfo->cfg.ht; if (!ht->ht_msimap) return; if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) && ht->ht_msiaddr >> 20 == addr >> 20) { /* Enable MSI -> HT mapping. */ ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE; pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND, ht->ht_msictrl, 2); } if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) { /* Disable MSI -> HT mapping. */ ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE; pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND, ht->ht_msictrl, 2); } } int pci_get_relaxed_ordering_enabled(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; uint16_t val; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (0); val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); val &= PCIEM_CTL_RELAXED_ORD_ENABLE; return (val != 0); } int pci_get_max_payload(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; uint16_t val; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (0); val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); val &= PCIEM_CTL_MAX_PAYLOAD; val >>= 5; return (1 << (val + 7)); } int pci_get_max_read_req(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; uint16_t val; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (0); val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); val &= PCIEM_CTL_MAX_READ_REQUEST; val >>= 12; return (1 << (val + 7)); } int pci_set_max_read_req(device_t dev, int size) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; uint16_t val; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (0); if (size < 128) size = 128; if (size > 4096) size = 4096; size = (1 << (fls(size) - 1)); val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); val &= ~PCIEM_CTL_MAX_READ_REQUEST; val |= (fls(size) - 8) << 12; pci_write_config(dev, cap + PCIER_DEVICE_CTL, val, 2); return (size); } uint32_t pcie_read_config(device_t dev, int reg, int width) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) { if (width == 2) return (0xffff); return (0xffffffff); } return (pci_read_config(dev, cap + reg, width)); } void pcie_write_config(device_t dev, int reg, uint32_t value, int width) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return; pci_write_config(dev, cap + reg, value, width); } /* * Adjusts a PCI-e capability register by clearing the bits in mask * and setting the bits in (value & mask). Bits not set in mask are * not adjusted. * * Returns the old value on success or all ones on failure. */ uint32_t pcie_adjust_config(device_t dev, int reg, uint32_t mask, uint32_t value, int width) { struct pci_devinfo *dinfo = device_get_ivars(dev); uint32_t old, new; int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) { if (width == 2) return (0xffff); return (0xffffffff); } old = pci_read_config(dev, cap + reg, width); new = old & ~mask; new |= (value & mask); pci_write_config(dev, cap + reg, new, width); return (old); } /* * Support for MSI message signalled interrupts. */ void pci_enable_msi_method(device_t dev, device_t child, uint64_t address, uint16_t data) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msi *msi = &dinfo->cfg.msi; /* Write data and address values. */ pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR, address & 0xffffffff, 4); if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) { pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR_HIGH, address >> 32, 4); pci_write_config(child, msi->msi_location + PCIR_MSI_DATA_64BIT, data, 2); } else pci_write_config(child, msi->msi_location + PCIR_MSI_DATA, data, 2); /* Enable MSI in the control register. */ msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE; pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 2); /* Enable MSI -> HT mapping. */ pci_ht_map_msi(child, address); } void pci_disable_msi_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msi *msi = &dinfo->cfg.msi; /* Disable MSI -> HT mapping. */ pci_ht_map_msi(child, 0); /* Disable MSI in the control register. */ msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE; pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 2); } /* * Restore MSI registers during resume. If MSI is enabled then * restore the data and address registers in addition to the control * register. */ static void pci_resume_msi(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msi *msi = &dinfo->cfg.msi; uint64_t address; uint16_t data; if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) { address = msi->msi_addr; data = msi->msi_data; pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR, address & 0xffffffff, 4); if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) { pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH, address >> 32, 4); pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT, data, 2); } else pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data, 2); } pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 2); } static int pci_remap_intr_method(device_t bus, device_t dev, u_int irq) { struct pci_devinfo *dinfo = device_get_ivars(dev); pcicfgregs *cfg = &dinfo->cfg; struct resource_list_entry *rle; struct msix_table_entry *mte; struct msix_vector *mv; uint64_t addr; uint32_t data; int error, i, j; /* * Handle MSI first. We try to find this IRQ among our list * of MSI IRQs. If we find it, we request updated address and * data registers and apply the results. */ if (cfg->msi.msi_alloc > 0) { /* If we don't have any active handlers, nothing to do. */ if (cfg->msi.msi_handlers == 0) return (0); for (i = 0; i < cfg->msi.msi_alloc; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); if (rle->start == irq) { error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, &addr, &data); if (error) return (error); pci_disable_msi(dev); dinfo->cfg.msi.msi_addr = addr; dinfo->cfg.msi.msi_data = data; pci_enable_msi(dev, addr, data); return (0); } } return (ENOENT); } /* * For MSI-X, we check to see if we have this IRQ. If we do, * we request the updated mapping info. If that works, we go * through all the slots that use this IRQ and update them. */ if (cfg->msix.msix_alloc > 0) { bool found = false; for (i = 0; i < cfg->msix.msix_alloc; i++) { mv = &cfg->msix.msix_vectors[i]; if (mv->mv_irq == irq) { error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, &addr, &data); if (error) return (error); mv->mv_address = addr; mv->mv_data = data; for (j = 0; j < cfg->msix.msix_table_len; j++) { mte = &cfg->msix.msix_table[j]; if (mte->mte_vector != i + 1) continue; if (mte->mte_handlers == 0) continue; pci_mask_msix(dev, j); pci_enable_msix(dev, j, addr, data); pci_unmask_msix(dev, j); } found = true; } } return (found ? 0 : ENOENT); } return (ENOENT); } /* * Returns true if the specified device is blacklisted because MSI * doesn't work. */ int pci_msi_device_blacklisted(device_t dev) { if (!pci_honor_msi_blacklist) return (0); return (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSI)); } /* * Determine if MSI is blacklisted globally on this system. Currently, * we just check for blacklisted chipsets as represented by the * host-PCI bridge at device 0:0:0. In the future, it may become * necessary to check other system attributes, such as the kenv values * that give the motherboard manufacturer and model number. */ static int pci_msi_blacklisted(void) { device_t dev; if (!pci_honor_msi_blacklist) return (0); /* Blacklist all non-PCI-express and non-PCI-X chipsets. */ if (!(pcie_chipset || pcix_chipset)) { if (vm_guest != VM_GUEST_NO) { /* * Whitelist older chipsets in virtual * machines known to support MSI. */ dev = pci_find_bsf(0, 0, 0); if (dev != NULL) return (!pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_ENABLE_MSI_VM)); } return (1); } dev = pci_find_bsf(0, 0, 0); if (dev != NULL) return (pci_msi_device_blacklisted(dev)); return (0); } /* * Returns true if the specified device is blacklisted because MSI-X * doesn't work. Note that this assumes that if MSI doesn't work, * MSI-X doesn't either. */ int pci_msix_device_blacklisted(device_t dev) { if (!pci_honor_msi_blacklist) return (0); if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX)) return (1); return (pci_msi_device_blacklisted(dev)); } /* * Determine if MSI-X is blacklisted globally on this system. If MSI * is blacklisted, assume that MSI-X is as well. Check for additional * chipsets where MSI works but MSI-X does not. */ static int pci_msix_blacklisted(void) { device_t dev; if (!pci_honor_msi_blacklist) return (0); dev = pci_find_bsf(0, 0, 0); if (dev != NULL && pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX)) return (1); return (pci_msi_blacklisted()); } /* * Attempt to allocate *count MSI messages. The actual number allocated is * returned in *count. After this function returns, each message will be * available to the driver as SYS_RES_IRQ resources starting at a rid 1. */ int pci_alloc_msi_method(device_t dev, device_t child, int *count) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; struct resource_list_entry *rle; int actual, error, i, irqs[32]; uint16_t ctrl; /* Don't let count == 0 get us into trouble. */ if (*count == 0) return (EINVAL); /* If rid 0 is allocated, then fail. */ rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); if (rle != NULL && rle->res != NULL) return (ENXIO); /* Already have allocated messages? */ if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0) return (ENXIO); /* If MSI is blacklisted for this system, fail. */ if (pci_msi_blacklisted()) return (ENXIO); /* MSI capability present? */ if (cfg->msi.msi_location == 0 || !pci_do_msi) return (ENODEV); if (bootverbose) device_printf(child, "attempting to allocate %d MSI vectors (%d supported)\n", *count, cfg->msi.msi_msgnum); /* Don't ask for more than the device supports. */ actual = min(*count, cfg->msi.msi_msgnum); /* Don't ask for more than 32 messages. */ actual = min(actual, 32); /* MSI requires power of 2 number of messages. */ if (!powerof2(actual)) return (EINVAL); for (;;) { /* Try to allocate N messages. */ error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual, actual, irqs); if (error == 0) break; if (actual == 1) return (error); /* Try N / 2. */ actual >>= 1; } /* * We now have N actual messages mapped onto SYS_RES_IRQ * resources in the irqs[] array, so add new resources * starting at rid 1. */ for (i = 0; i < actual; i++) resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irqs[i], irqs[i], 1); if (bootverbose) { if (actual == 1) device_printf(child, "using IRQ %d for MSI\n", irqs[0]); else { int run; /* * Be fancy and try to print contiguous runs * of IRQ values as ranges. 'run' is true if * we are in a range. */ device_printf(child, "using IRQs %d", irqs[0]); run = 0; for (i = 1; i < actual; i++) { /* Still in a run? */ if (irqs[i] == irqs[i - 1] + 1) { run = 1; continue; } /* Finish previous range. */ if (run) { printf("-%d", irqs[i - 1]); run = 0; } /* Start new range. */ printf(",%d", irqs[i]); } /* Unfinished range? */ if (run) printf("-%d", irqs[actual - 1]); printf(" for MSI\n"); } } /* Update control register with actual count. */ ctrl = cfg->msi.msi_ctrl; ctrl &= ~PCIM_MSICTRL_MME_MASK; ctrl |= (ffs(actual) - 1) << 4; cfg->msi.msi_ctrl = ctrl; pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2); /* Update counts of alloc'd messages. */ cfg->msi.msi_alloc = actual; cfg->msi.msi_handlers = 0; *count = actual; return (0); } /* Release the MSI messages associated with this device. */ int pci_release_msi_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msi *msi = &dinfo->cfg.msi; struct resource_list_entry *rle; int error, i, irqs[32]; /* Try MSI-X first. */ error = pci_release_msix(dev, child); if (error != ENODEV) return (error); /* Do we have any messages to release? */ if (msi->msi_alloc == 0) return (ENODEV); KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages")); /* Make sure none of the resources are allocated. */ if (msi->msi_handlers > 0) return (EBUSY); for (i = 0; i < msi->msi_alloc; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); KASSERT(rle != NULL, ("missing MSI resource")); if (rle->res != NULL) return (EBUSY); irqs[i] = rle->start; } /* Update control register with 0 count. */ KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE), ("%s: MSI still enabled", __func__)); msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK; pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 2); /* Release the messages. */ PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs); for (i = 0; i < msi->msi_alloc; i++) resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); /* Update alloc count. */ msi->msi_alloc = 0; msi->msi_addr = 0; msi->msi_data = 0; return (0); } /* * Return the max supported MSI messages this device supports. * Basically, assuming the MD code can alloc messages, this function * should return the maximum value that pci_alloc_msi() can return. * Thus, it is subject to the tunables, etc. */ int pci_msi_count_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msi *msi = &dinfo->cfg.msi; if (pci_do_msi && msi->msi_location != 0) return (msi->msi_msgnum); return (0); } /* free pcicfgregs structure and all depending data structures */ int pci_freecfg(struct pci_devinfo *dinfo) { struct devlist *devlist_head; struct pci_map *pm, *next; devlist_head = &pci_devq; if (dinfo->cfg.vpd.vpd_reg) vpd_free(&dinfo->cfg.vpd); STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) { free(pm, M_DEVBUF); } STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links); free(dinfo, M_DEVBUF); /* increment the generation count */ pci_generation++; /* we're losing one device */ pci_numdevs--; return (0); } /* * PCI power manangement */ int pci_set_powerstate_method(device_t dev, device_t child, int state) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint16_t status; int oldstate, highest, delay; if (cfg->pp.pp_cap == 0) return (EOPNOTSUPP); /* * Optimize a no state change request away. While it would be OK to * write to the hardware in theory, some devices have shown odd * behavior when going from D3 -> D3. */ oldstate = pci_get_powerstate(child); if (oldstate == state) return (0); /* * The PCI power management specification states that after a state * transition between PCI power states, system software must * guarantee a minimal delay before the function accesses the device. * Compute the worst case delay that we need to guarantee before we * access the device. Many devices will be responsive much more * quickly than this delay, but there are some that don't respond * instantly to state changes. Transitions to/from D3 state require * 10ms, while D2 requires 200us, and D0/1 require none. The delay * is done below with DELAY rather than a sleeper function because * this function can be called from contexts where we cannot sleep. */ highest = (oldstate > state) ? oldstate : state; if (highest == PCI_POWERSTATE_D3) delay = 10000; else if (highest == PCI_POWERSTATE_D2) delay = 200; else delay = 0; status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2) & ~PCIM_PSTAT_DMASK; switch (state) { case PCI_POWERSTATE_D0: status |= PCIM_PSTAT_D0; break; case PCI_POWERSTATE_D1: if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0) return (EOPNOTSUPP); status |= PCIM_PSTAT_D1; break; case PCI_POWERSTATE_D2: if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0) return (EOPNOTSUPP); status |= PCIM_PSTAT_D2; break; case PCI_POWERSTATE_D3: status |= PCIM_PSTAT_D3; break; default: return (EINVAL); } if (bootverbose) pci_printf(cfg, "Transition from D%d to D%d\n", oldstate, state); PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2); if (delay) DELAY(delay); return (0); } int pci_get_powerstate_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint16_t status; int result; if (cfg->pp.pp_cap != 0) { status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2); switch (status & PCIM_PSTAT_DMASK) { case PCIM_PSTAT_D0: result = PCI_POWERSTATE_D0; break; case PCIM_PSTAT_D1: result = PCI_POWERSTATE_D1; break; case PCIM_PSTAT_D2: result = PCI_POWERSTATE_D2; break; case PCIM_PSTAT_D3: result = PCI_POWERSTATE_D3; break; default: result = PCI_POWERSTATE_UNKNOWN; break; } } else { /* No support, device is always at D0 */ result = PCI_POWERSTATE_D0; } return (result); } /* * Some convenience functions for PCI device drivers. */ static __inline void pci_set_command_bit(device_t dev, device_t child, uint16_t bit) { uint16_t command; command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2); command |= bit; PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2); } static __inline void pci_clear_command_bit(device_t dev, device_t child, uint16_t bit) { uint16_t command; command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2); command &= ~bit; PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2); } int pci_enable_busmaster_method(device_t dev, device_t child) { pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN); return (0); } int pci_disable_busmaster_method(device_t dev, device_t child) { pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN); return (0); } int pci_enable_io_method(device_t dev, device_t child, int space) { uint16_t bit; switch(space) { case SYS_RES_IOPORT: bit = PCIM_CMD_PORTEN; break; case SYS_RES_MEMORY: bit = PCIM_CMD_MEMEN; break; default: return (EINVAL); } pci_set_command_bit(dev, child, bit); return (0); } int pci_disable_io_method(device_t dev, device_t child, int space) { uint16_t bit; switch(space) { case SYS_RES_IOPORT: bit = PCIM_CMD_PORTEN; break; case SYS_RES_MEMORY: bit = PCIM_CMD_MEMEN; break; default: return (EINVAL); } pci_clear_command_bit(dev, child, bit); return (0); } /* * New style pci driver. Parent device is either a pci-host-bridge or a * pci-pci-bridge. Both kinds are represented by instances of pcib. */ void pci_print_verbose(struct pci_devinfo *dinfo) { if (bootverbose) { pcicfgregs *cfg = &dinfo->cfg; printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n", cfg->vendor, cfg->device, cfg->revid); printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n", cfg->domain, cfg->bus, cfg->slot, cfg->func); printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n", cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype, cfg->mfdev); printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n", cfg->cmdreg, cfg->statreg, cfg->cachelnsz); printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n", cfg->lattimer, cfg->lattimer * 30, cfg->mingnt, cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250); if (cfg->intpin > 0) printf("\tintpin=%c, irq=%d\n", cfg->intpin +'a' -1, cfg->intline); if (cfg->pp.pp_cap) { uint16_t status; status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2); printf("\tpowerspec %d supports D0%s%s D3 current D%d\n", cfg->pp.pp_cap & PCIM_PCAP_SPEC, cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "", cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "", status & PCIM_PSTAT_DMASK); } if (cfg->msi.msi_location) { int ctrl; ctrl = cfg->msi.msi_ctrl; printf("\tMSI supports %d message%s%s%s\n", cfg->msi.msi_msgnum, (cfg->msi.msi_msgnum == 1) ? "" : "s", (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "", (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":""); } if (cfg->msix.msix_location) { printf("\tMSI-X supports %d message%s ", cfg->msix.msix_msgnum, (cfg->msix.msix_msgnum == 1) ? "" : "s"); if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar) printf("in map 0x%x\n", cfg->msix.msix_table_bar); else printf("in maps 0x%x and 0x%x\n", cfg->msix.msix_table_bar, cfg->msix.msix_pba_bar); } } } static int pci_porten(device_t dev) { return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0; } static int pci_memen(device_t dev) { return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0; } void pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp, int *bar64) { struct pci_devinfo *dinfo; pci_addr_t map, testval; int ln2range; uint16_t cmd; /* * The device ROM BAR is special. It is always a 32-bit * memory BAR. Bit 0 is special and should not be set when * sizing the BAR. */ dinfo = device_get_ivars(dev); if (PCIR_IS_BIOS(&dinfo->cfg, reg)) { map = pci_read_config(dev, reg, 4); pci_write_config(dev, reg, 0xfffffffe, 4); testval = pci_read_config(dev, reg, 4); pci_write_config(dev, reg, map, 4); *mapp = map; *testvalp = testval; if (bar64 != NULL) *bar64 = 0; return; } map = pci_read_config(dev, reg, 4); ln2range = pci_maprange(map); if (ln2range == 64) map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32; /* * Disable decoding via the command register before * determining the BAR's length since we will be placing it in * a weird state. */ cmd = pci_read_config(dev, PCIR_COMMAND, 2); pci_write_config(dev, PCIR_COMMAND, cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2); /* * Determine the BAR's length by writing all 1's. The bottom * log_2(size) bits of the BAR will stick as 0 when we read * the value back. * * NB: according to the PCI Local Bus Specification, rev. 3.0: * "Software writes 0FFFFFFFFh to both registers, reads them back, * and combines the result into a 64-bit value." (section 6.2.5.1) * * Writes to both registers must be performed before attempting to * read back the size value. */ testval = 0; pci_write_config(dev, reg, 0xffffffff, 4); if (ln2range == 64) { pci_write_config(dev, reg + 4, 0xffffffff, 4); testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32; } testval |= pci_read_config(dev, reg, 4); /* * Restore the original value of the BAR. We may have reprogrammed * the BAR of the low-level console device and when booting verbose, * we need the console device addressable. */ pci_write_config(dev, reg, map, 4); if (ln2range == 64) pci_write_config(dev, reg + 4, map >> 32, 4); pci_write_config(dev, PCIR_COMMAND, cmd, 2); *mapp = map; *testvalp = testval; if (bar64 != NULL) *bar64 = (ln2range == 64); } static void pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base) { struct pci_devinfo *dinfo; int ln2range; /* The device ROM BAR is always a 32-bit memory BAR. */ dinfo = device_get_ivars(dev); if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg)) ln2range = 32; else ln2range = pci_maprange(pm->pm_value); pci_write_config(dev, pm->pm_reg, base, 4); if (ln2range == 64) pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4); pm->pm_value = pci_read_config(dev, pm->pm_reg, 4); if (ln2range == 64) pm->pm_value |= (pci_addr_t)pci_read_config(dev, pm->pm_reg + 4, 4) << 32; } struct pci_map * pci_find_bar(device_t dev, int reg) { struct pci_devinfo *dinfo; struct pci_map *pm; dinfo = device_get_ivars(dev); STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) { if (pm->pm_reg == reg) return (pm); } return (NULL); } struct pci_map * pci_first_bar(device_t dev) { struct pci_devinfo *dinfo; dinfo = device_get_ivars(dev); return (STAILQ_FIRST(&dinfo->cfg.maps)); } struct pci_map * pci_next_bar(struct pci_map *pm) { return (STAILQ_NEXT(pm, pm_link)); } int pci_bar_enabled(device_t dev, struct pci_map *pm) { struct pci_devinfo *dinfo; uint16_t cmd; dinfo = device_get_ivars(dev); if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) && !(pm->pm_value & PCIM_BIOS_ENABLE)) return (0); #ifdef PCI_IOV if ((dinfo->cfg.flags & PCICFG_VF) != 0) { struct pcicfg_iov *iov; iov = dinfo->cfg.iov; cmd = pci_read_config(iov->iov_pf, iov->iov_pos + PCIR_SRIOV_CTL, 2); return ((cmd & PCIM_SRIOV_VF_MSE) != 0); } #endif cmd = pci_read_config(dev, PCIR_COMMAND, 2); if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value)) return ((cmd & PCIM_CMD_MEMEN) != 0); else return ((cmd & PCIM_CMD_PORTEN) != 0); } struct pci_map * pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size) { struct pci_devinfo *dinfo; struct pci_map *pm, *prev; dinfo = device_get_ivars(dev); pm = malloc(sizeof(*pm), M_DEVBUF, M_WAITOK | M_ZERO); pm->pm_reg = reg; pm->pm_value = value; pm->pm_size = size; STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) { KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x", reg)); if (STAILQ_NEXT(prev, pm_link) == NULL || STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg) break; } if (prev != NULL) STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link); else STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link); return (pm); } static void pci_restore_bars(device_t dev) { struct pci_devinfo *dinfo; struct pci_map *pm; int ln2range; dinfo = device_get_ivars(dev); STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) { if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg)) ln2range = 32; else ln2range = pci_maprange(pm->pm_value); pci_write_config(dev, pm->pm_reg, pm->pm_value, 4); if (ln2range == 64) pci_write_config(dev, pm->pm_reg + 4, pm->pm_value >> 32, 4); } } /* * Add a resource based on a pci map register. Return 1 if the map * register is a 32bit map register or 2 if it is a 64bit register. */ static int pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl, int force, int prefetch) { struct pci_map *pm; pci_addr_t base, map, testval; pci_addr_t start, end, count; int barlen, basezero, flags, maprange, mapsize, type; uint16_t cmd; struct resource *res; /* * The BAR may already exist if the device is a CardBus card * whose CIS is stored in this BAR. */ pm = pci_find_bar(dev, reg); if (pm != NULL) { maprange = pci_maprange(pm->pm_value); barlen = maprange == 64 ? 2 : 1; return (barlen); } pci_read_bar(dev, reg, &map, &testval, NULL); if (PCI_BAR_MEM(map)) { type = SYS_RES_MEMORY; if (map & PCIM_BAR_MEM_PREFETCH) prefetch = 1; } else type = SYS_RES_IOPORT; mapsize = pci_mapsize(testval); base = pci_mapbase(map); #ifdef __PCI_BAR_ZERO_VALID basezero = 0; #else basezero = base == 0; #endif maprange = pci_maprange(map); barlen = maprange == 64 ? 2 : 1; /* * For I/O registers, if bottom bit is set, and the next bit up * isn't clear, we know we have a BAR that doesn't conform to the * spec, so ignore it. Also, sanity check the size of the data * areas to the type of memory involved. Memory must be at least * 16 bytes in size, while I/O ranges must be at least 4. */ if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0) return (barlen); if ((type == SYS_RES_MEMORY && mapsize < 4) || (type == SYS_RES_IOPORT && mapsize < 2)) return (barlen); /* Save a record of this BAR. */ pm = pci_add_bar(dev, reg, map, mapsize); if (bootverbose) { printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d", reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize); if (type == SYS_RES_IOPORT && !pci_porten(dev)) printf(", port disabled\n"); else if (type == SYS_RES_MEMORY && !pci_memen(dev)) printf(", memory disabled\n"); else printf(", enabled\n"); } /* * If base is 0, then we have problems if this architecture does * not allow that. It is best to ignore such entries for the * moment. These will be allocated later if the driver specifically * requests them. However, some removable buses look better when * all resources are allocated, so allow '0' to be overridden. * * Similarly treat maps whose values is the same as the test value * read back. These maps have had all f's written to them by the * BIOS in an attempt to disable the resources. */ if (!force && (basezero || map == testval)) return (barlen); if ((u_long)base != base) { device_printf(bus, "pci%d:%d:%d:%d bar %#x too many address bits", pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), reg); return (barlen); } /* * This code theoretically does the right thing, but has * undesirable side effects in some cases where peripherals * respond oddly to having these bits enabled. Let the user * be able to turn them off (since pci_enable_io_modes is 1 by * default). */ if (pci_enable_io_modes) { /* Turn on resources that have been left off by a lazy BIOS */ if (type == SYS_RES_IOPORT && !pci_porten(dev)) { cmd = pci_read_config(dev, PCIR_COMMAND, 2); cmd |= PCIM_CMD_PORTEN; pci_write_config(dev, PCIR_COMMAND, cmd, 2); } if (type == SYS_RES_MEMORY && !pci_memen(dev)) { cmd = pci_read_config(dev, PCIR_COMMAND, 2); cmd |= PCIM_CMD_MEMEN; pci_write_config(dev, PCIR_COMMAND, cmd, 2); } } else { if (type == SYS_RES_IOPORT && !pci_porten(dev)) return (barlen); if (type == SYS_RES_MEMORY && !pci_memen(dev)) return (barlen); } count = (pci_addr_t)1 << mapsize; flags = RF_ALIGNMENT_LOG2(mapsize); if (prefetch) flags |= RF_PREFETCHABLE; if (basezero || base == pci_mapbase(testval) || pci_clear_bars) { start = 0; /* Let the parent decide. */ end = ~0; } else { start = base; end = base + count - 1; } resource_list_add(rl, type, reg, start, end, count); /* * Try to allocate the resource for this BAR from our parent * so that this resource range is already reserved. The * driver for this device will later inherit this resource in * pci_alloc_resource(). */ res = resource_list_reserve(rl, bus, dev, type, ®, start, end, count, flags); if ((pci_do_realloc_bars || pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_REALLOC_BAR)) && res == NULL && (start != 0 || end != ~0)) { /* * If the allocation fails, try to allocate a resource for * this BAR using any available range. The firmware felt * it was important enough to assign a resource, so don't * disable decoding if we can help it. */ resource_list_delete(rl, type, reg); resource_list_add(rl, type, reg, 0, ~0, count); res = resource_list_reserve(rl, bus, dev, type, ®, 0, ~0, count, flags); } if (res == NULL) { /* * If the allocation fails, delete the resource list entry * and disable decoding for this device. * * If the driver requests this resource in the future, * pci_reserve_map() will try to allocate a fresh * resource range. */ resource_list_delete(rl, type, reg); pci_disable_io(dev, type); if (bootverbose) device_printf(bus, "pci%d:%d:%d:%d bar %#x failed to allocate\n", pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), reg); } else { start = rman_get_start(res); pci_write_bar(dev, pm, start); } return (barlen); } /* * For ATA devices we need to decide early what addressing mode to use. * Legacy demands that the primary and secondary ATA ports sits on the * same addresses that old ISA hardware did. This dictates that we use * those addresses and ignore the BAR's if we cannot set PCI native * addressing mode. */ static void pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force, uint32_t prefetchmask) { int rid, type, progif; #if 0 /* if this device supports PCI native addressing use it */ progif = pci_read_config(dev, PCIR_PROGIF, 1); if ((progif & 0x8a) == 0x8a) { if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) && pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) { printf("Trying ATA native PCI addressing mode\n"); pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1); } } #endif progif = pci_read_config(dev, PCIR_PROGIF, 1); type = SYS_RES_IOPORT; if (progif & PCIP_STORAGE_IDE_MODEPRIM) { pci_add_map(bus, dev, PCIR_BAR(0), rl, force, prefetchmask & (1 << 0)); pci_add_map(bus, dev, PCIR_BAR(1), rl, force, prefetchmask & (1 << 1)); } else { rid = PCIR_BAR(0); resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8); (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x1f0, 0x1f7, 8, 0); rid = PCIR_BAR(1); resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1); (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x3f6, 0x3f6, 1, 0); } if (progif & PCIP_STORAGE_IDE_MODESEC) { pci_add_map(bus, dev, PCIR_BAR(2), rl, force, prefetchmask & (1 << 2)); pci_add_map(bus, dev, PCIR_BAR(3), rl, force, prefetchmask & (1 << 3)); } else { rid = PCIR_BAR(2); resource_list_add(rl, type, rid, 0x170, 0x177, 8); (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x170, 0x177, 8, 0); rid = PCIR_BAR(3); resource_list_add(rl, type, rid, 0x376, 0x376, 1); (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x376, 0x376, 1, 0); } pci_add_map(bus, dev, PCIR_BAR(4), rl, force, prefetchmask & (1 << 4)); pci_add_map(bus, dev, PCIR_BAR(5), rl, force, prefetchmask & (1 << 5)); } static void pci_assign_interrupt(device_t bus, device_t dev, int force_route) { struct pci_devinfo *dinfo = device_get_ivars(dev); pcicfgregs *cfg = &dinfo->cfg; char tunable_name[64]; int irq; /* Has to have an intpin to have an interrupt. */ if (cfg->intpin == 0) return; /* Let the user override the IRQ with a tunable. */ irq = PCI_INVALID_IRQ; snprintf(tunable_name, sizeof(tunable_name), "hw.pci%d.%d.%d.INT%c.irq", cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1); if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0)) irq = PCI_INVALID_IRQ; /* * If we didn't get an IRQ via the tunable, then we either use the * IRQ value in the intline register or we ask the bus to route an * interrupt for us. If force_route is true, then we only use the * value in the intline register if the bus was unable to assign an * IRQ. */ if (!PCI_INTERRUPT_VALID(irq)) { if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route) irq = PCI_ASSIGN_INTERRUPT(bus, dev); if (!PCI_INTERRUPT_VALID(irq)) irq = cfg->intline; } /* If after all that we don't have an IRQ, just bail. */ if (!PCI_INTERRUPT_VALID(irq)) return; /* Update the config register if it changed. */ if (irq != cfg->intline) { cfg->intline = irq; pci_write_config(dev, PCIR_INTLINE, irq, 1); } /* Add this IRQ as rid 0 interrupt resource. */ resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1); } /* Perform early OHCI takeover from SMM. */ static void ohci_early_takeover(device_t self) { struct resource *res; uint32_t ctl; int rid; int i; rid = PCIR_BAR(0); res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (res == NULL) return; ctl = bus_read_4(res, OHCI_CONTROL); if (ctl & OHCI_IR) { if (bootverbose) printf("ohci early: " "SMM active, request owner change\n"); bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR); for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) { DELAY(1000); ctl = bus_read_4(res, OHCI_CONTROL); } if (ctl & OHCI_IR) { if (bootverbose) printf("ohci early: " "SMM does not respond, resetting\n"); bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET); } /* Disable interrupts */ bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS); } bus_release_resource(self, SYS_RES_MEMORY, rid, res); } /* Perform early UHCI takeover from SMM. */ static void uhci_early_takeover(device_t self) { struct resource *res; int rid; /* * Set the PIRQD enable bit and switch off all the others. We don't * want legacy support to interfere with us XXX Does this also mean * that the BIOS won't touch the keyboard anymore if it is connected * to the ports of the root hub? */ pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2); /* Disable interrupts */ rid = PCI_UHCI_BASE_REG; res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (res != NULL) { bus_write_2(res, UHCI_INTR, 0); bus_release_resource(self, SYS_RES_IOPORT, rid, res); } } /* Perform early EHCI takeover from SMM. */ static void ehci_early_takeover(device_t self) { struct resource *res; uint32_t cparams; uint32_t eec; uint8_t eecp; uint8_t bios_sem; uint8_t offs; int rid; int i; rid = PCIR_BAR(0); res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (res == NULL) return; cparams = bus_read_4(res, EHCI_HCCPARAMS); /* Synchronise with the BIOS if it owns the controller. */ for (eecp = EHCI_HCC_EECP(cparams); eecp != 0; eecp = EHCI_EECP_NEXT(eec)) { eec = pci_read_config(self, eecp, 4); if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) { continue; } bios_sem = pci_read_config(self, eecp + EHCI_LEGSUP_BIOS_SEM, 1); if (bios_sem == 0) { continue; } if (bootverbose) printf("ehci early: " "SMM active, request owner change\n"); pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1); for (i = 0; (i < 100) && (bios_sem != 0); i++) { DELAY(1000); bios_sem = pci_read_config(self, eecp + EHCI_LEGSUP_BIOS_SEM, 1); } if (bios_sem != 0) { if (bootverbose) printf("ehci early: " "SMM does not respond\n"); } /* Disable interrupts */ offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION)); bus_write_4(res, offs + EHCI_USBINTR, 0); } bus_release_resource(self, SYS_RES_MEMORY, rid, res); } /* Perform early XHCI takeover from SMM. */ static void xhci_early_takeover(device_t self) { struct resource *res; uint32_t cparams; uint32_t eec; uint8_t eecp; uint8_t bios_sem; uint8_t offs; int rid; int i; rid = PCIR_BAR(0); res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (res == NULL) return; cparams = bus_read_4(res, XHCI_HCSPARAMS0); eec = -1; /* Synchronise with the BIOS if it owns the controller. */ for (eecp = XHCI_HCS0_XECP(cparams) << 2; eecp != 0 && XHCI_XECP_NEXT(eec); eecp += XHCI_XECP_NEXT(eec) << 2) { eec = bus_read_4(res, eecp); if (XHCI_XECP_ID(eec) != XHCI_ID_USB_LEGACY) continue; bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM); if (bios_sem == 0) continue; if (bootverbose) printf("xhci early: " "SMM active, request owner change\n"); bus_write_1(res, eecp + XHCI_XECP_OS_SEM, 1); /* wait a maximum of 5 second */ for (i = 0; (i < 5000) && (bios_sem != 0); i++) { DELAY(1000); bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM); } if (bios_sem != 0) { if (bootverbose) printf("xhci early: " "SMM does not respond\n"); } /* Disable interrupts */ offs = bus_read_1(res, XHCI_CAPLENGTH); bus_write_4(res, offs + XHCI_USBCMD, 0); bus_read_4(res, offs + XHCI_USBSTS); } bus_release_resource(self, SYS_RES_MEMORY, rid, res); } #if defined(NEW_PCIB) && defined(PCI_RES_BUS) static void pci_reserve_secbus(device_t bus, device_t dev, pcicfgregs *cfg, struct resource_list *rl) { struct resource *res; char *cp; rman_res_t start, end, count; int rid, sec_bus, sec_reg, sub_bus, sub_reg, sup_bus; switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_BRIDGE: sec_reg = PCIR_SECBUS_1; sub_reg = PCIR_SUBBUS_1; break; case PCIM_HDRTYPE_CARDBUS: sec_reg = PCIR_SECBUS_2; sub_reg = PCIR_SUBBUS_2; break; default: return; } /* * If the existing bus range is valid, attempt to reserve it * from our parent. If this fails for any reason, clear the * secbus and subbus registers. * * XXX: Should we reset sub_bus to sec_bus if it is < sec_bus? * This would at least preserve the existing sec_bus if it is * valid. */ sec_bus = PCI_READ_CONFIG(bus, dev, sec_reg, 1); sub_bus = PCI_READ_CONFIG(bus, dev, sub_reg, 1); /* Quirk handling. */ switch (pci_get_devid(dev)) { case 0x12258086: /* Intel 82454KX/GX (Orion) */ sup_bus = pci_read_config(dev, 0x41, 1); if (sup_bus != 0xff) { sec_bus = sup_bus + 1; sub_bus = sup_bus + 1; PCI_WRITE_CONFIG(bus, dev, sec_reg, sec_bus, 1); PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1); } break; case 0x00dd10de: /* Compaq R3000 BIOS sets wrong subordinate bus number. */ if ((cp = kern_getenv("smbios.planar.maker")) == NULL) break; if (strncmp(cp, "Compal", 6) != 0) { freeenv(cp); break; } freeenv(cp); if ((cp = kern_getenv("smbios.planar.product")) == NULL) break; if (strncmp(cp, "08A0", 4) != 0) { freeenv(cp); break; } freeenv(cp); if (sub_bus < 0xa) { sub_bus = 0xa; PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1); } break; } if (bootverbose) printf("\tsecbus=%d, subbus=%d\n", sec_bus, sub_bus); if (sec_bus > 0 && sub_bus >= sec_bus) { start = sec_bus; end = sub_bus; count = end - start + 1; resource_list_add(rl, PCI_RES_BUS, 0, 0, ~0, count); /* * If requested, clear secondary bus registers in * bridge devices to force a complete renumbering * rather than reserving the existing range. However, * preserve the existing size. */ if (pci_clear_buses) goto clear; rid = 0; res = resource_list_reserve(rl, bus, dev, PCI_RES_BUS, &rid, start, end, count, 0); if (res != NULL) return; if (bootverbose) device_printf(bus, "pci%d:%d:%d:%d secbus failed to allocate\n", pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)); } clear: PCI_WRITE_CONFIG(bus, dev, sec_reg, 0, 1); PCI_WRITE_CONFIG(bus, dev, sub_reg, 0, 1); } static struct resource * pci_alloc_secbus(device_t dev, device_t child, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct pci_devinfo *dinfo; pcicfgregs *cfg; struct resource_list *rl; struct resource *res; int sec_reg, sub_reg; dinfo = device_get_ivars(child); cfg = &dinfo->cfg; rl = &dinfo->resources; switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_BRIDGE: sec_reg = PCIR_SECBUS_1; sub_reg = PCIR_SUBBUS_1; break; case PCIM_HDRTYPE_CARDBUS: sec_reg = PCIR_SECBUS_2; sub_reg = PCIR_SUBBUS_2; break; default: return (NULL); } if (*rid != 0) return (NULL); if (resource_list_find(rl, PCI_RES_BUS, *rid) == NULL) resource_list_add(rl, PCI_RES_BUS, *rid, start, end, count); if (!resource_list_reserved(rl, PCI_RES_BUS, *rid)) { res = resource_list_reserve(rl, dev, child, PCI_RES_BUS, rid, start, end, count, flags & ~RF_ACTIVE); if (res == NULL) { resource_list_delete(rl, PCI_RES_BUS, *rid); device_printf(child, "allocating %ju bus%s failed\n", count, count == 1 ? "" : "es"); return (NULL); } if (bootverbose) device_printf(child, "Lazy allocation of %ju bus%s at %ju\n", count, count == 1 ? "" : "es", rman_get_start(res)); PCI_WRITE_CONFIG(dev, child, sec_reg, rman_get_start(res), 1); PCI_WRITE_CONFIG(dev, child, sub_reg, rman_get_end(res), 1); } return (resource_list_alloc(rl, dev, child, PCI_RES_BUS, rid, start, end, count, flags)); } #endif static int pci_ea_bei_to_rid(device_t dev, int bei) { #ifdef PCI_IOV struct pci_devinfo *dinfo; int iov_pos; struct pcicfg_iov *iov; dinfo = device_get_ivars(dev); iov = dinfo->cfg.iov; if (iov != NULL) iov_pos = iov->iov_pos; else iov_pos = 0; #endif /* Check if matches BAR */ if ((bei >= PCIM_EA_BEI_BAR_0) && (bei <= PCIM_EA_BEI_BAR_5)) return (PCIR_BAR(bei)); /* Check ROM */ if (bei == PCIM_EA_BEI_ROM) return (PCIR_BIOS); #ifdef PCI_IOV /* Check if matches VF_BAR */ if ((iov != NULL) && (bei >= PCIM_EA_BEI_VF_BAR_0) && (bei <= PCIM_EA_BEI_VF_BAR_5)) return (PCIR_SRIOV_BAR(bei - PCIM_EA_BEI_VF_BAR_0) + iov_pos); #endif return (-1); } int pci_ea_is_enabled(device_t dev, int rid) { struct pci_ea_entry *ea; struct pci_devinfo *dinfo; dinfo = device_get_ivars(dev); STAILQ_FOREACH(ea, &dinfo->cfg.ea.ea_entries, eae_link) { if (pci_ea_bei_to_rid(dev, ea->eae_bei) == rid) return ((ea->eae_flags & PCIM_EA_ENABLE) > 0); } return (0); } void pci_add_resources_ea(device_t bus, device_t dev, int alloc_iov) { struct pci_ea_entry *ea; struct pci_devinfo *dinfo; pci_addr_t start, end, count; struct resource_list *rl; int type, flags, rid; struct resource *res; uint32_t tmp; #ifdef PCI_IOV struct pcicfg_iov *iov; #endif dinfo = device_get_ivars(dev); rl = &dinfo->resources; flags = 0; #ifdef PCI_IOV iov = dinfo->cfg.iov; #endif if (dinfo->cfg.ea.ea_location == 0) return; STAILQ_FOREACH(ea, &dinfo->cfg.ea.ea_entries, eae_link) { /* * TODO: Ignore EA-BAR if is not enabled. * Currently the EA implementation supports * only situation, where EA structure contains * predefined entries. In case they are not enabled * leave them unallocated and proceed with * a legacy-BAR mechanism. */ if ((ea->eae_flags & PCIM_EA_ENABLE) == 0) continue; switch ((ea->eae_flags & PCIM_EA_PP) >> PCIM_EA_PP_OFFSET) { case PCIM_EA_P_MEM_PREFETCH: case PCIM_EA_P_VF_MEM_PREFETCH: flags = RF_PREFETCHABLE; /* FALLTHROUGH */ case PCIM_EA_P_VF_MEM: case PCIM_EA_P_MEM: type = SYS_RES_MEMORY; break; case PCIM_EA_P_IO: type = SYS_RES_IOPORT; break; default: continue; } if (alloc_iov != 0) { #ifdef PCI_IOV /* Allocating IOV, confirm BEI matches */ if ((ea->eae_bei < PCIM_EA_BEI_VF_BAR_0) || (ea->eae_bei > PCIM_EA_BEI_VF_BAR_5)) continue; #else continue; #endif } else { /* Allocating BAR, confirm BEI matches */ if (((ea->eae_bei < PCIM_EA_BEI_BAR_0) || (ea->eae_bei > PCIM_EA_BEI_BAR_5)) && (ea->eae_bei != PCIM_EA_BEI_ROM)) continue; } rid = pci_ea_bei_to_rid(dev, ea->eae_bei); if (rid < 0) continue; /* Skip resources already allocated by EA */ if ((resource_list_find(rl, SYS_RES_MEMORY, rid) != NULL) || (resource_list_find(rl, SYS_RES_IOPORT, rid) != NULL)) continue; start = ea->eae_base; count = ea->eae_max_offset + 1; #ifdef PCI_IOV if (iov != NULL) count = count * iov->iov_num_vfs; #endif end = start + count - 1; if (count == 0) continue; resource_list_add(rl, type, rid, start, end, count); res = resource_list_reserve(rl, bus, dev, type, &rid, start, end, count, flags); if (res == NULL) { resource_list_delete(rl, type, rid); /* * Failed to allocate using EA, disable entry. * Another attempt to allocation will be performed * further, but this time using legacy BAR registers */ tmp = pci_read_config(dev, ea->eae_cfg_offset, 4); tmp &= ~PCIM_EA_ENABLE; pci_write_config(dev, ea->eae_cfg_offset, tmp, 4); /* * Disabling entry might fail in case it is hardwired. * Read flags again to match current status. */ ea->eae_flags = pci_read_config(dev, ea->eae_cfg_offset, 4); continue; } /* As per specification, fill BAR with zeros */ pci_write_config(dev, rid, 0, 4); } } void pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask) { struct pci_devinfo *dinfo; pcicfgregs *cfg; struct resource_list *rl; const struct pci_quirk *q; uint32_t devid; int i; dinfo = device_get_ivars(dev); cfg = &dinfo->cfg; rl = &dinfo->resources; devid = (cfg->device << 16) | cfg->vendor; /* Allocate resources using Enhanced Allocation */ pci_add_resources_ea(bus, dev, 0); /* ATA devices needs special map treatment */ if ((pci_get_class(dev) == PCIC_STORAGE) && (pci_get_subclass(dev) == PCIS_STORAGE_IDE) && ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) || (!pci_read_config(dev, PCIR_BAR(0), 4) && !pci_read_config(dev, PCIR_BAR(2), 4))) ) pci_ata_maps(bus, dev, rl, force, prefetchmask); else for (i = 0; i < cfg->nummaps;) { /* Skip resources already managed by EA */ if ((resource_list_find(rl, SYS_RES_MEMORY, PCIR_BAR(i)) != NULL) || (resource_list_find(rl, SYS_RES_IOPORT, PCIR_BAR(i)) != NULL) || pci_ea_is_enabled(dev, PCIR_BAR(i))) { i++; continue; } /* * Skip quirked resources. */ for (q = &pci_quirks[0]; q->devid != 0; q++) if (q->devid == devid && q->type == PCI_QUIRK_UNMAP_REG && q->arg1 == PCIR_BAR(i)) break; if (q->devid != 0) { i++; continue; } i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force, prefetchmask & (1 << i)); } /* * Add additional, quirked resources. */ for (q = &pci_quirks[0]; q->devid != 0; q++) if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG) pci_add_map(bus, dev, q->arg1, rl, force, 0); if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) { #ifdef __PCI_REROUTE_INTERRUPT /* * Try to re-route interrupts. Sometimes the BIOS or * firmware may leave bogus values in these registers. * If the re-route fails, then just stick with what we * have. */ pci_assign_interrupt(bus, dev, 1); #else pci_assign_interrupt(bus, dev, 0); #endif } if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS && pci_get_subclass(dev) == PCIS_SERIALBUS_USB) { if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_XHCI) xhci_early_takeover(dev); else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI) ehci_early_takeover(dev); else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI) ohci_early_takeover(dev); else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI) uhci_early_takeover(dev); } #if defined(NEW_PCIB) && defined(PCI_RES_BUS) /* * Reserve resources for secondary bus ranges behind bridge * devices. */ pci_reserve_secbus(bus, dev, cfg, rl); #endif } static struct pci_devinfo * pci_identify_function(device_t pcib, device_t dev, int domain, int busno, int slot, int func) { struct pci_devinfo *dinfo; dinfo = pci_read_device(pcib, dev, domain, busno, slot, func); if (dinfo != NULL) pci_add_child(dev, dinfo); return (dinfo); } void pci_add_children(device_t dev, int domain, int busno) { #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w) device_t pcib = device_get_parent(dev); struct pci_devinfo *dinfo; int maxslots; int s, f, pcifunchigh; uint8_t hdrtype; int first_func; /* * Try to detect a device at slot 0, function 0. If it exists, try to * enable ARI. We must enable ARI before detecting the rest of the * functions on this bus as ARI changes the set of slots and functions * that are legal on this bus. */ dinfo = pci_identify_function(pcib, dev, domain, busno, 0, 0); if (dinfo != NULL && pci_enable_ari) PCIB_TRY_ENABLE_ARI(pcib, dinfo->cfg.dev); /* * Start looking for new devices on slot 0 at function 1 because we * just identified the device at slot 0, function 0. */ first_func = 1; maxslots = PCIB_MAXSLOTS(pcib); for (s = 0; s <= maxslots; s++, first_func = 0) { pcifunchigh = 0; f = 0; DELAY(1); /* If function 0 is not present, skip to the next slot. */ if (REG(PCIR_VENDOR, 2) == PCIV_INVALID) continue; hdrtype = REG(PCIR_HDRTYPE, 1); if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) continue; if (hdrtype & PCIM_MFDEV) pcifunchigh = PCIB_MAXFUNCS(pcib); for (f = first_func; f <= pcifunchigh; f++) pci_identify_function(pcib, dev, domain, busno, s, f); } #undef REG } int pci_rescan_method(device_t dev) { #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w) device_t pcib = device_get_parent(dev); device_t child, *devlist, *unchanged; int devcount, error, i, j, maxslots, oldcount; int busno, domain, s, f, pcifunchigh; uint8_t hdrtype; /* No need to check for ARI on a rescan. */ error = device_get_children(dev, &devlist, &devcount); if (error) return (error); if (devcount != 0) { unchanged = malloc(devcount * sizeof(device_t), M_TEMP, M_NOWAIT | M_ZERO); if (unchanged == NULL) { free(devlist, M_TEMP); return (ENOMEM); } } else unchanged = NULL; domain = pcib_get_domain(dev); busno = pcib_get_bus(dev); maxslots = PCIB_MAXSLOTS(pcib); for (s = 0; s <= maxslots; s++) { /* If function 0 is not present, skip to the next slot. */ f = 0; if (REG(PCIR_VENDOR, 2) == PCIV_INVALID) continue; pcifunchigh = 0; hdrtype = REG(PCIR_HDRTYPE, 1); if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) continue; if (hdrtype & PCIM_MFDEV) pcifunchigh = PCIB_MAXFUNCS(pcib); for (f = 0; f <= pcifunchigh; f++) { if (REG(PCIR_VENDOR, 2) == PCIV_INVALID) continue; /* * Found a valid function. Check if a * device_t for this device already exists. */ for (i = 0; i < devcount; i++) { child = devlist[i]; if (child == NULL) continue; if (pci_get_slot(child) == s && pci_get_function(child) == f) { unchanged[i] = child; goto next_func; } } pci_identify_function(pcib, dev, domain, busno, s, f); next_func:; } } /* Remove devices that are no longer present. */ for (i = 0; i < devcount; i++) { if (unchanged[i] != NULL) continue; device_delete_child(dev, devlist[i]); } free(devlist, M_TEMP); oldcount = devcount; /* Try to attach the devices just added. */ error = device_get_children(dev, &devlist, &devcount); if (error) { free(unchanged, M_TEMP); return (error); } for (i = 0; i < devcount; i++) { for (j = 0; j < oldcount; j++) { if (devlist[i] == unchanged[j]) goto next_device; } device_probe_and_attach(devlist[i]); next_device:; } free(unchanged, M_TEMP); free(devlist, M_TEMP); return (0); #undef REG } #ifdef PCI_IOV device_t pci_add_iov_child(device_t bus, device_t pf, uint16_t rid, uint16_t vid, uint16_t did) { struct pci_devinfo *vf_dinfo; device_t pcib; int busno, slot, func; pcib = device_get_parent(bus); PCIB_DECODE_RID(pcib, rid, &busno, &slot, &func); vf_dinfo = pci_fill_devinfo(pcib, bus, pci_get_domain(pcib), busno, slot, func, vid, did); vf_dinfo->cfg.flags |= PCICFG_VF; pci_add_child(bus, vf_dinfo); return (vf_dinfo->cfg.dev); } device_t pci_create_iov_child_method(device_t bus, device_t pf, uint16_t rid, uint16_t vid, uint16_t did) { return (pci_add_iov_child(bus, pf, rid, vid, did)); } #endif /* * For PCIe device set Max_Payload_Size to match PCIe root's. */ static void pcie_setup_mps(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); device_t root; uint16_t rmps, mmps, mps; if (dinfo->cfg.pcie.pcie_location == 0) return; root = pci_find_pcie_root_port(dev); if (root == NULL) return; /* Check whether the MPS is already configured. */ rmps = pcie_read_config(root, PCIER_DEVICE_CTL, 2) & PCIEM_CTL_MAX_PAYLOAD; mps = pcie_read_config(dev, PCIER_DEVICE_CTL, 2) & PCIEM_CTL_MAX_PAYLOAD; if (mps == rmps) return; /* Check whether the device is capable of the root's MPS. */ mmps = (pcie_read_config(dev, PCIER_DEVICE_CAP, 2) & PCIEM_CAP_MAX_PAYLOAD) << 5; if (rmps > mmps) { /* * The device is unable to handle root's MPS. Limit root. * XXX: We should traverse through all the tree, applying * it to all the devices. */ pcie_adjust_config(root, PCIER_DEVICE_CTL, PCIEM_CTL_MAX_PAYLOAD, mmps, 2); } else { pcie_adjust_config(dev, PCIER_DEVICE_CTL, PCIEM_CTL_MAX_PAYLOAD, rmps, 2); } } static void pci_add_child_clear_aer(device_t dev, struct pci_devinfo *dinfo) { int aer; uint32_t r; uint16_t r2; if (dinfo->cfg.pcie.pcie_location != 0 && dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT) { r2 = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_ROOT_CTL, 2); r2 &= ~(PCIEM_ROOT_CTL_SERR_CORR | PCIEM_ROOT_CTL_SERR_NONFATAL | PCIEM_ROOT_CTL_SERR_FATAL); pci_write_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_ROOT_CTL, r2, 2); } if (pci_find_extcap(dev, PCIZ_AER, &aer) == 0) { r = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4); pci_write_config(dev, aer + PCIR_AER_UC_STATUS, r, 4); if (r != 0 && bootverbose) { pci_printf(&dinfo->cfg, "clearing AER UC 0x%08x -> 0x%08x\n", r, pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4)); } r = pci_read_config(dev, aer + PCIR_AER_UC_MASK, 4); r &= ~(PCIM_AER_UC_TRAINING_ERROR | PCIM_AER_UC_DL_PROTOCOL_ERROR | PCIM_AER_UC_SURPRISE_LINK_DOWN | PCIM_AER_UC_POISONED_TLP | PCIM_AER_UC_FC_PROTOCOL_ERROR | PCIM_AER_UC_COMPLETION_TIMEOUT | PCIM_AER_UC_COMPLETER_ABORT | PCIM_AER_UC_UNEXPECTED_COMPLETION | PCIM_AER_UC_RECEIVER_OVERFLOW | PCIM_AER_UC_MALFORMED_TLP | PCIM_AER_UC_ECRC_ERROR | PCIM_AER_UC_UNSUPPORTED_REQUEST | PCIM_AER_UC_ACS_VIOLATION | PCIM_AER_UC_INTERNAL_ERROR | PCIM_AER_UC_MC_BLOCKED_TLP | PCIM_AER_UC_ATOMIC_EGRESS_BLK | PCIM_AER_UC_TLP_PREFIX_BLOCKED); pci_write_config(dev, aer + PCIR_AER_UC_MASK, r, 4); r = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4); pci_write_config(dev, aer + PCIR_AER_COR_STATUS, r, 4); if (r != 0 && bootverbose) { pci_printf(&dinfo->cfg, "clearing AER COR 0x%08x -> 0x%08x\n", r, pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4)); } r = pci_read_config(dev, aer + PCIR_AER_COR_MASK, 4); r &= ~(PCIM_AER_COR_RECEIVER_ERROR | PCIM_AER_COR_BAD_TLP | PCIM_AER_COR_BAD_DLLP | PCIM_AER_COR_REPLAY_ROLLOVER | PCIM_AER_COR_REPLAY_TIMEOUT | PCIM_AER_COR_ADVISORY_NF_ERROR | PCIM_AER_COR_INTERNAL_ERROR | PCIM_AER_COR_HEADER_LOG_OVFLOW); pci_write_config(dev, aer + PCIR_AER_COR_MASK, r, 4); r = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_CTL, 2); r |= PCIEM_CTL_COR_ENABLE | PCIEM_CTL_NFER_ENABLE | PCIEM_CTL_FER_ENABLE | PCIEM_CTL_URR_ENABLE; pci_write_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_CTL, r, 2); } } void pci_add_child(device_t bus, struct pci_devinfo *dinfo) { device_t dev; dinfo->cfg.dev = dev = device_add_child(bus, NULL, -1); device_set_ivars(dev, dinfo); resource_list_init(&dinfo->resources); pci_cfg_save(dev, dinfo, 0); pci_cfg_restore(dev, dinfo); pci_print_verbose(dinfo); pci_add_resources(bus, dev, 0, 0); if (pci_enable_mps_tune) pcie_setup_mps(dev); pci_child_added(dinfo->cfg.dev); if (pci_clear_aer_on_attach) pci_add_child_clear_aer(dev, dinfo); EVENTHANDLER_INVOKE(pci_add_device, dinfo->cfg.dev); } void pci_child_added_method(device_t dev, device_t child) { } static int pci_probe(device_t dev) { device_set_desc(dev, "PCI bus"); /* Allow other subclasses to override this driver. */ return (BUS_PROBE_GENERIC); } int pci_attach_common(device_t dev) { struct pci_softc *sc; int busno, domain; #ifdef PCI_RES_BUS int rid; #endif sc = device_get_softc(dev); domain = pcib_get_domain(dev); busno = pcib_get_bus(dev); #ifdef PCI_RES_BUS rid = 0; sc->sc_bus = bus_alloc_resource(dev, PCI_RES_BUS, &rid, busno, busno, 1, 0); if (sc->sc_bus == NULL) { device_printf(dev, "failed to allocate bus number\n"); return (ENXIO); } #endif if (bootverbose) device_printf(dev, "domain=%d, physical bus=%d\n", domain, busno); sc->sc_dma_tag = bus_get_dma_tag(dev); return (0); } int pci_attach(device_t dev) { int busno, domain, error; error = pci_attach_common(dev); if (error) return (error); /* * Since there can be multiple independently numbered PCI * buses on systems with multiple PCI domains, we can't use * the unit number to decide which bus we are probing. We ask * the parent pcib what our domain and bus numbers are. */ domain = pcib_get_domain(dev); busno = pcib_get_bus(dev); pci_add_children(dev, domain, busno); return (bus_generic_attach(dev)); } int pci_detach(device_t dev) { #ifdef PCI_RES_BUS struct pci_softc *sc; #endif int error; error = bus_generic_detach(dev); if (error) return (error); #ifdef PCI_RES_BUS sc = device_get_softc(dev); error = bus_release_resource(dev, PCI_RES_BUS, 0, sc->sc_bus); if (error) return (error); #endif return (device_delete_children(dev)); } static void pci_hint_device_unit(device_t dev, device_t child, const char *name, int *unitp) { int line, unit; const char *at; char me1[24], me2[32]; uint8_t b, s, f; uint32_t d; device_location_cache_t *cache; d = pci_get_domain(child); b = pci_get_bus(child); s = pci_get_slot(child); f = pci_get_function(child); snprintf(me1, sizeof(me1), "pci%u:%u:%u", b, s, f); snprintf(me2, sizeof(me2), "pci%u:%u:%u:%u", d, b, s, f); line = 0; cache = dev_wired_cache_init(); while (resource_find_dev(&line, name, &unit, "at", NULL) == 0) { resource_string_value(name, unit, "at", &at); if (strcmp(at, me1) == 0 || strcmp(at, me2) == 0) { *unitp = unit; break; } if (dev_wired_cache_match(cache, child, at)) { *unitp = unit; break; } } dev_wired_cache_fini(cache); } static void pci_set_power_child(device_t dev, device_t child, int state) { device_t pcib; int dstate; /* * Set the device to the given state. If the firmware suggests * a different power state, use it instead. If power management * is not present, the firmware is responsible for managing * device power. Skip children who aren't attached since they * are handled separately. */ pcib = device_get_parent(dev); dstate = state; if (device_is_attached(child) && PCIB_POWER_FOR_SLEEP(pcib, child, &dstate) == 0) pci_set_powerstate(child, dstate); } int pci_suspend_child(device_t dev, device_t child) { struct pci_devinfo *dinfo; struct resource_list_entry *rle; int error; dinfo = device_get_ivars(child); /* * Save the PCI configuration space for the child and set the * device in the appropriate power state for this sleep state. */ pci_cfg_save(child, dinfo, 0); /* Suspend devices before potentially powering them down. */ error = bus_generic_suspend_child(dev, child); if (error) return (error); if (pci_do_power_suspend) { /* * Make sure this device's interrupt handler is not invoked * in the case the device uses a shared interrupt that can * be raised by some other device. * This is applicable only to regular (legacy) PCI interrupts * as MSI/MSI-X interrupts are never shared. */ rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); if (rle != NULL && rle->res != NULL) (void)bus_suspend_intr(child, rle->res); pci_set_power_child(dev, child, PCI_POWERSTATE_D3); } return (0); } int pci_resume_child(device_t dev, device_t child) { struct pci_devinfo *dinfo; struct resource_list_entry *rle; if (pci_do_power_resume) pci_set_power_child(dev, child, PCI_POWERSTATE_D0); dinfo = device_get_ivars(child); pci_cfg_restore(child, dinfo); if (!device_is_attached(child)) pci_cfg_save(child, dinfo, 1); bus_generic_resume_child(dev, child); /* * Allow interrupts only after fully resuming the driver and hardware. */ if (pci_do_power_suspend) { /* See pci_suspend_child for details. */ rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); if (rle != NULL && rle->res != NULL) (void)bus_resume_intr(child, rle->res); } return (0); } int pci_resume(device_t dev) { device_t child, *devlist; int error, i, numdevs; if ((error = device_get_children(dev, &devlist, &numdevs)) != 0) return (error); /* * Resume critical devices first, then everything else later. */ for (i = 0; i < numdevs; i++) { child = devlist[i]; switch (pci_get_class(child)) { case PCIC_DISPLAY: case PCIC_MEMORY: case PCIC_BRIDGE: case PCIC_BASEPERIPH: BUS_RESUME_CHILD(dev, child); break; } } for (i = 0; i < numdevs; i++) { child = devlist[i]; switch (pci_get_class(child)) { case PCIC_DISPLAY: case PCIC_MEMORY: case PCIC_BRIDGE: case PCIC_BASEPERIPH: break; default: BUS_RESUME_CHILD(dev, child); } } free(devlist, M_TEMP); return (0); } static void pci_load_vendor_data(void) { caddr_t data; void *ptr; size_t sz; data = preload_search_by_type("pci_vendor_data"); if (data != NULL) { ptr = preload_fetch_addr(data); sz = preload_fetch_size(data); if (ptr != NULL && sz != 0) { pci_vendordata = ptr; pci_vendordata_size = sz; /* terminate the database */ pci_vendordata[pci_vendordata_size] = '\n'; } } } void pci_driver_added(device_t dev, driver_t *driver) { int numdevs; device_t *devlist; device_t child; struct pci_devinfo *dinfo; int i; if (bootverbose) device_printf(dev, "driver added\n"); DEVICE_IDENTIFY(driver, dev); if (device_get_children(dev, &devlist, &numdevs) != 0) return; for (i = 0; i < numdevs; i++) { child = devlist[i]; if (device_get_state(child) != DS_NOTPRESENT) continue; dinfo = device_get_ivars(child); pci_print_verbose(dinfo); if (bootverbose) pci_printf(&dinfo->cfg, "reprobing on driver added\n"); pci_cfg_restore(child, dinfo); if (device_probe_and_attach(child) != 0) pci_child_detached(dev, child); } free(devlist, M_TEMP); } int pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { struct pci_devinfo *dinfo; struct msix_table_entry *mte; struct msix_vector *mv; uint64_t addr; uint32_t data; void *cookie; int error, rid; error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr, arg, &cookie); if (error) return (error); /* If this is not a direct child, just bail out. */ if (device_get_parent(child) != dev) { *cookiep = cookie; return(0); } rid = rman_get_rid(irq); if (rid == 0) { /* Make sure that INTx is enabled */ pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS); } else { /* * Check to see if the interrupt is MSI or MSI-X. * Ask our parent to map the MSI and give * us the address and data register values. * If we fail for some reason, teardown the * interrupt handler. */ dinfo = device_get_ivars(child); if (dinfo->cfg.msi.msi_alloc > 0) { if (dinfo->cfg.msi.msi_addr == 0) { KASSERT(dinfo->cfg.msi.msi_handlers == 0, ("MSI has handlers, but vectors not mapped")); error = PCIB_MAP_MSI(device_get_parent(dev), child, rman_get_start(irq), &addr, &data); if (error) goto bad; dinfo->cfg.msi.msi_addr = addr; dinfo->cfg.msi.msi_data = data; } if (dinfo->cfg.msi.msi_handlers == 0) pci_enable_msi(child, dinfo->cfg.msi.msi_addr, dinfo->cfg.msi.msi_data); dinfo->cfg.msi.msi_handlers++; } else { KASSERT(dinfo->cfg.msix.msix_alloc > 0, ("No MSI or MSI-X interrupts allocated")); KASSERT(rid <= dinfo->cfg.msix.msix_table_len, ("MSI-X index too high")); mte = &dinfo->cfg.msix.msix_table[rid - 1]; KASSERT(mte->mte_vector != 0, ("no message vector")); mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1]; KASSERT(mv->mv_irq == rman_get_start(irq), ("IRQ mismatch")); if (mv->mv_address == 0) { KASSERT(mte->mte_handlers == 0, ("MSI-X table entry has handlers, but vector not mapped")); error = PCIB_MAP_MSI(device_get_parent(dev), child, rman_get_start(irq), &addr, &data); if (error) goto bad; mv->mv_address = addr; mv->mv_data = data; } /* * The MSIX table entry must be made valid by * incrementing the mte_handlers before * calling pci_enable_msix() and * pci_resume_msix(). Else the MSIX rewrite * table quirk will not work as expected. */ mte->mte_handlers++; if (mte->mte_handlers == 1) { pci_enable_msix(child, rid - 1, mv->mv_address, mv->mv_data); pci_unmask_msix(child, rid - 1); } } /* * Make sure that INTx is disabled if we are using MSI/MSI-X, * unless the device is affected by PCI_QUIRK_MSI_INTX_BUG, * in which case we "enable" INTx so MSI/MSI-X actually works. */ if (!pci_has_quirk(pci_get_devid(child), PCI_QUIRK_MSI_INTX_BUG)) pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS); else pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS); bad: if (error) { (void)bus_generic_teardown_intr(dev, child, irq, cookie); return (error); } } *cookiep = cookie; return (0); } int pci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct msix_table_entry *mte; struct resource_list_entry *rle; struct pci_devinfo *dinfo; int error, rid; if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE)) return (EINVAL); /* If this isn't a direct child, just bail out */ if (device_get_parent(child) != dev) return(bus_generic_teardown_intr(dev, child, irq, cookie)); rid = rman_get_rid(irq); if (rid == 0) { /* Mask INTx */ pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS); } else { /* * Check to see if the interrupt is MSI or MSI-X. If so, * decrement the appropriate handlers count and mask the * MSI-X message, or disable MSI messages if the count * drops to 0. */ dinfo = device_get_ivars(child); rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid); if (rle->res != irq) return (EINVAL); if (dinfo->cfg.msi.msi_alloc > 0) { KASSERT(rid <= dinfo->cfg.msi.msi_alloc, ("MSI-X index too high")); if (dinfo->cfg.msi.msi_handlers == 0) return (EINVAL); dinfo->cfg.msi.msi_handlers--; if (dinfo->cfg.msi.msi_handlers == 0) pci_disable_msi(child); } else { KASSERT(dinfo->cfg.msix.msix_alloc > 0, ("No MSI or MSI-X interrupts allocated")); KASSERT(rid <= dinfo->cfg.msix.msix_table_len, ("MSI-X index too high")); mte = &dinfo->cfg.msix.msix_table[rid - 1]; if (mte->mte_handlers == 0) return (EINVAL); mte->mte_handlers--; if (mte->mte_handlers == 0) pci_mask_msix(child, rid - 1); } } error = bus_generic_teardown_intr(dev, child, irq, cookie); if (rid > 0) KASSERT(error == 0, ("%s: generic teardown failed for MSI/MSI-X", __func__)); return (error); } int pci_print_child(device_t dev, device_t child) { struct pci_devinfo *dinfo; struct resource_list *rl; int retval = 0; dinfo = device_get_ivars(child); rl = &dinfo->resources; retval += bus_print_child_header(dev, child); retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx"); retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); if (device_get_flags(dev)) retval += printf(" flags %#x", device_get_flags(dev)); retval += printf(" at device %d.%d", pci_get_slot(child), pci_get_function(child)); retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } static const struct { int class; int subclass; int report; /* 0 = bootverbose, 1 = always */ const char *desc; } pci_nomatch_tab[] = { {PCIC_OLD, -1, 1, "old"}, {PCIC_OLD, PCIS_OLD_NONVGA, 1, "non-VGA display device"}, {PCIC_OLD, PCIS_OLD_VGA, 1, "VGA-compatible display device"}, {PCIC_STORAGE, -1, 1, "mass storage"}, {PCIC_STORAGE, PCIS_STORAGE_SCSI, 1, "SCSI"}, {PCIC_STORAGE, PCIS_STORAGE_IDE, 1, "ATA"}, {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, 1, "floppy disk"}, {PCIC_STORAGE, PCIS_STORAGE_IPI, 1, "IPI"}, {PCIC_STORAGE, PCIS_STORAGE_RAID, 1, "RAID"}, {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, 1, "ATA (ADMA)"}, {PCIC_STORAGE, PCIS_STORAGE_SATA, 1, "SATA"}, {PCIC_STORAGE, PCIS_STORAGE_SAS, 1, "SAS"}, {PCIC_STORAGE, PCIS_STORAGE_NVM, 1, "NVM"}, {PCIC_NETWORK, -1, 1, "network"}, {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, 1, "ethernet"}, {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, 1, "token ring"}, {PCIC_NETWORK, PCIS_NETWORK_FDDI, 1, "fddi"}, {PCIC_NETWORK, PCIS_NETWORK_ATM, 1, "ATM"}, {PCIC_NETWORK, PCIS_NETWORK_ISDN, 1, "ISDN"}, {PCIC_DISPLAY, -1, 1, "display"}, {PCIC_DISPLAY, PCIS_DISPLAY_VGA, 1, "VGA"}, {PCIC_DISPLAY, PCIS_DISPLAY_XGA, 1, "XGA"}, {PCIC_DISPLAY, PCIS_DISPLAY_3D, 1, "3D"}, {PCIC_MULTIMEDIA, -1, 1, "multimedia"}, {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, 1, "video"}, {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, 1, "audio"}, {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, 1, "telephony"}, {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, 1, "HDA"}, {PCIC_MEMORY, -1, 1, "memory"}, {PCIC_MEMORY, PCIS_MEMORY_RAM, 1, "RAM"}, {PCIC_MEMORY, PCIS_MEMORY_FLASH, 1, "flash"}, {PCIC_BRIDGE, -1, 1, "bridge"}, {PCIC_BRIDGE, PCIS_BRIDGE_HOST, 1, "HOST-PCI"}, {PCIC_BRIDGE, PCIS_BRIDGE_ISA, 1, "PCI-ISA"}, {PCIC_BRIDGE, PCIS_BRIDGE_EISA, 1, "PCI-EISA"}, {PCIC_BRIDGE, PCIS_BRIDGE_MCA, 1, "PCI-MCA"}, {PCIC_BRIDGE, PCIS_BRIDGE_PCI, 1, "PCI-PCI"}, {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, 1, "PCI-PCMCIA"}, {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, 1, "PCI-NuBus"}, {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, 1, "PCI-CardBus"}, {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, 1, "PCI-RACEway"}, {PCIC_SIMPLECOMM, -1, 1, "simple comms"}, {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, 1, "UART"}, /* could detect 16550 */ {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, 1, "parallel port"}, {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, 1, "multiport serial"}, {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, 1, "generic modem"}, {PCIC_BASEPERIPH, -1, 0, "base peripheral"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, 1, "interrupt controller"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, 1, "DMA controller"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, 1, "timer"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, 1, "realtime clock"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, 1, "PCI hot-plug controller"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, 1, "SD host controller"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_IOMMU, 1, "IOMMU"}, {PCIC_INPUTDEV, -1, 1, "input device"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, 1, "keyboard"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,1, "digitizer"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, 1, "mouse"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, 1, "scanner"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, 1, "gameport"}, {PCIC_DOCKING, -1, 1, "docking station"}, {PCIC_PROCESSOR, -1, 1, "processor"}, {PCIC_SERIALBUS, -1, 1, "serial bus"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, 1, "FireWire"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, 1, "AccessBus"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, 1, "SSA"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, 1, "USB"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, 1, "Fibre Channel"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, 0, "SMBus"}, {PCIC_WIRELESS, -1, 1, "wireless controller"}, {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, 1, "iRDA"}, {PCIC_WIRELESS, PCIS_WIRELESS_IR, 1, "IR"}, {PCIC_WIRELESS, PCIS_WIRELESS_RF, 1, "RF"}, {PCIC_INTELLIIO, -1, 1, "intelligent I/O controller"}, {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, 1, "I2O"}, {PCIC_SATCOM, -1, 1, "satellite communication"}, {PCIC_SATCOM, PCIS_SATCOM_TV, 1, "sat TV"}, {PCIC_SATCOM, PCIS_SATCOM_AUDIO, 1, "sat audio"}, {PCIC_SATCOM, PCIS_SATCOM_VOICE, 1, "sat voice"}, {PCIC_SATCOM, PCIS_SATCOM_DATA, 1, "sat data"}, {PCIC_CRYPTO, -1, 1, "encrypt/decrypt"}, {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, 1, "network/computer crypto"}, {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, 1, "entertainment crypto"}, {PCIC_DASP, -1, 0, "dasp"}, {PCIC_DASP, PCIS_DASP_DPIO, 1, "DPIO module"}, {PCIC_DASP, PCIS_DASP_PERFCNTRS, 1, "performance counters"}, {PCIC_DASP, PCIS_DASP_COMM_SYNC, 1, "communication synchronizer"}, {PCIC_DASP, PCIS_DASP_MGMT_CARD, 1, "signal processing management"}, {PCIC_INSTRUMENT, -1, 0, "non-essential instrumentation"}, {0, 0, 0, NULL} }; void pci_probe_nomatch(device_t dev, device_t child) { int i, report; const char *cp, *scp; char *device; /* * Look for a listing for this device in a loaded device database. */ report = 1; if ((device = pci_describe_device(child)) != NULL) { device_printf(dev, "<%s>", device); free(device, M_DEVBUF); } else { /* * Scan the class/subclass descriptions for a general * description. */ cp = "unknown"; scp = NULL; for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) { if (pci_nomatch_tab[i].class == pci_get_class(child)) { if (pci_nomatch_tab[i].subclass == -1) { cp = pci_nomatch_tab[i].desc; report = pci_nomatch_tab[i].report; } else if (pci_nomatch_tab[i].subclass == pci_get_subclass(child)) { scp = pci_nomatch_tab[i].desc; report = pci_nomatch_tab[i].report; } } } if (report || bootverbose) { device_printf(dev, "<%s%s%s>", cp ? cp : "", ((cp != NULL) && (scp != NULL)) ? ", " : "", scp ? scp : ""); } } if (report || bootverbose) { printf(" at device %d.%d (no driver attached)\n", pci_get_slot(child), pci_get_function(child)); } pci_cfg_save(child, device_get_ivars(child), 1); } void pci_child_detached(device_t dev, device_t child) { struct pci_devinfo *dinfo; struct resource_list *rl; dinfo = device_get_ivars(child); rl = &dinfo->resources; /* * Have to deallocate IRQs before releasing any MSI messages and * have to release MSI messages before deallocating any memory * BARs. */ if (resource_list_release_active(rl, dev, child, SYS_RES_IRQ) != 0) pci_printf(&dinfo->cfg, "Device leaked IRQ resources\n"); if (dinfo->cfg.msi.msi_alloc != 0 || dinfo->cfg.msix.msix_alloc != 0) { if (dinfo->cfg.msi.msi_alloc != 0) pci_printf(&dinfo->cfg, "Device leaked %d MSI " "vectors\n", dinfo->cfg.msi.msi_alloc); else pci_printf(&dinfo->cfg, "Device leaked %d MSI-X " "vectors\n", dinfo->cfg.msix.msix_alloc); (void)pci_release_msi(child); } if (resource_list_release_active(rl, dev, child, SYS_RES_MEMORY) != 0) pci_printf(&dinfo->cfg, "Device leaked memory resources\n"); if (resource_list_release_active(rl, dev, child, SYS_RES_IOPORT) != 0) pci_printf(&dinfo->cfg, "Device leaked I/O resources\n"); #ifdef PCI_RES_BUS if (resource_list_release_active(rl, dev, child, PCI_RES_BUS) != 0) pci_printf(&dinfo->cfg, "Device leaked PCI bus numbers\n"); #endif pci_cfg_save(child, dinfo, 1); } /* * Parse the PCI device database, if loaded, and return a pointer to a * description of the device. * * The database is flat text formatted as follows: * * Any line not in a valid format is ignored. * Lines are terminated with newline '\n' characters. * * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then * the vendor name. * * A DEVICE line is entered immediately below the corresponding VENDOR ID. * - devices cannot be listed without a corresponding VENDOR line. * A DEVICE line consists of a TAB, the 4 digit (hex) device code, * another TAB, then the device name. */ /* * Assuming (ptr) points to the beginning of a line in the database, * return the vendor or device and description of the next entry. * The value of (vendor) or (device) inappropriate for the entry type * is set to -1. Returns nonzero at the end of the database. * * Note that this is slightly unrobust in the face of corrupt data; * we attempt to safeguard against this by spamming the end of the * database with a newline when we initialise. */ static int pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc) { char *cp = *ptr; int left; *device = -1; *vendor = -1; **desc = '\0'; for (;;) { left = pci_vendordata_size - (cp - pci_vendordata); if (left <= 0) { *ptr = cp; return(1); } /* vendor entry? */ if (*cp != '\t' && sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2) break; /* device entry? */ if (*cp == '\t' && sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2) break; /* skip to next line */ while (*cp != '\n' && left > 0) { cp++; left--; } if (*cp == '\n') { cp++; left--; } } /* skip to next line */ while (*cp != '\n' && left > 0) { cp++; left--; } if (*cp == '\n' && left > 0) cp++; *ptr = cp; return(0); } static char * pci_describe_device(device_t dev) { int vendor, device; char *desc, *vp, *dp, *line; desc = vp = dp = NULL; /* * If we have no vendor data, we can't do anything. */ if (pci_vendordata == NULL) goto out; /* * Scan the vendor data looking for this device */ line = pci_vendordata; if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL) goto out; for (;;) { if (pci_describe_parse_line(&line, &vendor, &device, &vp)) goto out; if (vendor == pci_get_vendor(dev)) break; } if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL) goto out; for (;;) { if (pci_describe_parse_line(&line, &vendor, &device, &dp)) { *dp = 0; break; } if (vendor != -1) { *dp = 0; break; } if (device == pci_get_device(dev)) break; } if (dp[0] == '\0') snprintf(dp, 80, "0x%x", pci_get_device(dev)); if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) != NULL) sprintf(desc, "%s, %s", vp, dp); out: if (vp != NULL) free(vp, M_DEVBUF); if (dp != NULL) free(dp, M_DEVBUF); return(desc); } int pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct pci_devinfo *dinfo; pcicfgregs *cfg; dinfo = device_get_ivars(child); cfg = &dinfo->cfg; switch (which) { case PCI_IVAR_ETHADDR: /* * The generic accessor doesn't deal with failure, so * we set the return value, then return an error. */ *((uint8_t **) result) = NULL; return (EINVAL); case PCI_IVAR_SUBVENDOR: *result = cfg->subvendor; break; case PCI_IVAR_SUBDEVICE: *result = cfg->subdevice; break; case PCI_IVAR_VENDOR: *result = cfg->vendor; break; case PCI_IVAR_DEVICE: *result = cfg->device; break; case PCI_IVAR_DEVID: *result = (cfg->device << 16) | cfg->vendor; break; case PCI_IVAR_CLASS: *result = cfg->baseclass; break; case PCI_IVAR_SUBCLASS: *result = cfg->subclass; break; case PCI_IVAR_PROGIF: *result = cfg->progif; break; case PCI_IVAR_REVID: *result = cfg->revid; break; case PCI_IVAR_INTPIN: *result = cfg->intpin; break; case PCI_IVAR_IRQ: *result = cfg->intline; break; case PCI_IVAR_DOMAIN: *result = cfg->domain; break; case PCI_IVAR_BUS: *result = cfg->bus; break; case PCI_IVAR_SLOT: *result = cfg->slot; break; case PCI_IVAR_FUNCTION: *result = cfg->func; break; case PCI_IVAR_CMDREG: *result = cfg->cmdreg; break; case PCI_IVAR_CACHELNSZ: *result = cfg->cachelnsz; break; case PCI_IVAR_MINGNT: if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) { *result = -1; return (EINVAL); } *result = cfg->mingnt; break; case PCI_IVAR_MAXLAT: if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) { *result = -1; return (EINVAL); } *result = cfg->maxlat; break; case PCI_IVAR_LATTIMER: *result = cfg->lattimer; break; default: return (ENOENT); } return (0); } int pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct pci_devinfo *dinfo; dinfo = device_get_ivars(child); switch (which) { case PCI_IVAR_INTPIN: dinfo->cfg.intpin = value; return (0); case PCI_IVAR_ETHADDR: case PCI_IVAR_SUBVENDOR: case PCI_IVAR_SUBDEVICE: case PCI_IVAR_VENDOR: case PCI_IVAR_DEVICE: case PCI_IVAR_DEVID: case PCI_IVAR_CLASS: case PCI_IVAR_SUBCLASS: case PCI_IVAR_PROGIF: case PCI_IVAR_REVID: case PCI_IVAR_IRQ: case PCI_IVAR_DOMAIN: case PCI_IVAR_BUS: case PCI_IVAR_SLOT: case PCI_IVAR_FUNCTION: return (EINVAL); /* disallow for now */ default: return (ENOENT); } } #include "opt_ddb.h" #ifdef DDB #include #include /* * List resources based on pci map registers, used for within ddb */ DB_SHOW_COMMAND_FLAGS(pciregs, db_pci_dump, DB_CMD_MEMSAFE) { struct pci_devinfo *dinfo; struct devlist *devlist_head; struct pci_conf *p; const char *name; int i, error, none_count; none_count = 0; /* get the head of the device queue */ devlist_head = &pci_devq; /* * Go through the list of devices and print out devices */ for (error = 0, i = 0, dinfo = STAILQ_FIRST(devlist_head); (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit; dinfo = STAILQ_NEXT(dinfo, pci_links), i++) { /* Populate pd_name and pd_unit */ name = NULL; if (dinfo->cfg.dev) name = device_get_name(dinfo->cfg.dev); p = &dinfo->conf; db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x " "chip=0x%08x rev=0x%02x hdr=0x%02x\n", (name && *name) ? name : "none", (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) : none_count++, p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev, p->pc_sel.pc_func, (p->pc_class << 16) | (p->pc_subclass << 8) | p->pc_progif, (p->pc_subdevice << 16) | p->pc_subvendor, (p->pc_device << 16) | p->pc_vendor, p->pc_revid, p->pc_hdr); } } #endif /* DDB */ struct resource * pci_reserve_map(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int num, u_int flags) { struct pci_devinfo *dinfo = device_get_ivars(child); struct resource_list *rl = &dinfo->resources; struct resource *res; struct pci_map *pm; uint16_t cmd; pci_addr_t map, testval; int mapsize; res = NULL; /* If rid is managed by EA, ignore it */ if (pci_ea_is_enabled(child, *rid)) goto out; pm = pci_find_bar(child, *rid); if (pm != NULL) { /* This is a BAR that we failed to allocate earlier. */ mapsize = pm->pm_size; map = pm->pm_value; } else { /* * Weed out the bogons, and figure out how large the * BAR/map is. BARs that read back 0 here are bogus * and unimplemented. Note: atapci in legacy mode are * special and handled elsewhere in the code. If you * have a atapci device in legacy mode and it fails * here, that other code is broken. */ pci_read_bar(child, *rid, &map, &testval, NULL); /* * Determine the size of the BAR and ignore BARs with a size * of 0. Device ROM BARs use a different mask value. */ if (PCIR_IS_BIOS(&dinfo->cfg, *rid)) mapsize = pci_romsize(testval); else mapsize = pci_mapsize(testval); if (mapsize == 0) goto out; pm = pci_add_bar(child, *rid, map, mapsize); } if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) { if (type != SYS_RES_MEMORY) { if (bootverbose) device_printf(dev, "child %s requested type %d for rid %#x," " but the BAR says it is an memio\n", device_get_nameunit(child), type, *rid); goto out; } } else { if (type != SYS_RES_IOPORT) { if (bootverbose) device_printf(dev, "child %s requested type %d for rid %#x," " but the BAR says it is an ioport\n", device_get_nameunit(child), type, *rid); goto out; } } /* * For real BARs, we need to override the size that * the driver requests, because that's what the BAR * actually uses and we would otherwise have a * situation where we might allocate the excess to * another driver, which won't work. */ count = ((pci_addr_t)1 << mapsize) * num; if (RF_ALIGNMENT(flags) < mapsize) flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize); if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH)) flags |= RF_PREFETCHABLE; /* * Allocate enough resource, and then write back the * appropriate BAR for that resource. */ resource_list_add(rl, type, *rid, start, end, count); res = resource_list_reserve(rl, dev, child, type, rid, start, end, count, flags & ~RF_ACTIVE); if (res == NULL) { resource_list_delete(rl, type, *rid); device_printf(child, "%#jx bytes of rid %#x res %d failed (%#jx, %#jx).\n", count, *rid, type, start, end); goto out; } if (bootverbose) device_printf(child, "Lazy allocation of %#jx bytes rid %#x type %d at %#jx\n", count, *rid, type, rman_get_start(res)); /* Disable decoding via the CMD register before updating the BAR */ cmd = pci_read_config(child, PCIR_COMMAND, 2); pci_write_config(child, PCIR_COMMAND, cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2); map = rman_get_start(res); pci_write_bar(child, pm, map); /* Restore the original value of the CMD register */ pci_write_config(child, PCIR_COMMAND, cmd, 2); out: return (res); } struct resource * pci_alloc_multi_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_long num, u_int flags) { struct pci_devinfo *dinfo; struct resource_list *rl; struct resource_list_entry *rle; struct resource *res; pcicfgregs *cfg; /* * Perform lazy resource allocation */ dinfo = device_get_ivars(child); rl = &dinfo->resources; cfg = &dinfo->cfg; switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_alloc_secbus(dev, child, rid, start, end, count, flags)); #endif case SYS_RES_IRQ: /* * Can't alloc legacy interrupt once MSI messages have * been allocated. */ if (*rid == 0 && (cfg->msi.msi_alloc > 0 || cfg->msix.msix_alloc > 0)) return (NULL); /* * If the child device doesn't have an interrupt * routed and is deserving of an interrupt, try to * assign it one. */ if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) && (cfg->intpin != 0)) pci_assign_interrupt(dev, child, 0); break; case SYS_RES_IOPORT: case SYS_RES_MEMORY: #ifdef NEW_PCIB /* * PCI-PCI bridge I/O window resources are not BARs. * For those allocations just pass the request up the * tree. */ if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) { switch (*rid) { case PCIR_IOBASEL_1: case PCIR_MEMBASE_1: case PCIR_PMBASEL_1: /* * XXX: Should we bother creating a resource * list entry? */ return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); } } #endif /* Reserve resources for this BAR if needed. */ rle = resource_list_find(rl, type, *rid); if (rle == NULL) { res = pci_reserve_map(dev, child, type, rid, start, end, count, num, flags); if (res == NULL) return (NULL); } } return (resource_list_alloc(rl, dev, child, type, rid, start, end, count, flags)); } struct resource * pci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #ifdef PCI_IOV struct pci_devinfo *dinfo; #endif if (device_get_parent(child) != dev) return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid, start, end, count, flags)); #ifdef PCI_IOV dinfo = device_get_ivars(child); if (dinfo->cfg.flags & PCICFG_VF) { switch (type) { /* VFs can't have I/O BARs. */ case SYS_RES_IOPORT: return (NULL); case SYS_RES_MEMORY: return (pci_vf_alloc_mem_resource(dev, child, rid, start, end, count, flags)); } /* Fall through for other types of resource allocations. */ } #endif return (pci_alloc_multi_resource(dev, child, type, rid, start, end, count, 1, flags)); } int pci_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct pci_devinfo *dinfo; struct resource_list *rl; pcicfgregs *cfg __unused; if (device_get_parent(child) != dev) return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child, type, rid, r)); dinfo = device_get_ivars(child); cfg = &dinfo->cfg; #ifdef PCI_IOV if (cfg->flags & PCICFG_VF) { switch (type) { /* VFs can't have I/O BARs. */ case SYS_RES_IOPORT: return (EDOOFUS); case SYS_RES_MEMORY: return (pci_vf_release_mem_resource(dev, child, rid, r)); } /* Fall through for other types of resource allocations. */ } #endif #ifdef NEW_PCIB /* * PCI-PCI bridge I/O window resources are not BARs. For * those allocations just pass the request up the tree. */ if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE && (type == SYS_RES_IOPORT || type == SYS_RES_MEMORY)) { switch (rid) { case PCIR_IOBASEL_1: case PCIR_MEMBASE_1: case PCIR_PMBASEL_1: return (bus_generic_release_resource(dev, child, type, rid, r)); } } #endif rl = &dinfo->resources; return (resource_list_release(rl, dev, child, type, rid, r)); } int -pci_activate_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +pci_activate_resource(device_t dev, device_t child, struct resource *r) { struct pci_devinfo *dinfo; - int error; + int error, rid, type; - error = bus_generic_activate_resource(dev, child, type, rid, r); + error = bus_generic_activate_resource(dev, child, r); if (error) return (error); /* Enable decoding in the command register when activating BARs. */ if (device_get_parent(child) == dev) { /* Device ROMs need their decoding explicitly enabled. */ dinfo = device_get_ivars(child); + rid = rman_get_rid(r); + type = rman_get_type(r); if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid)) pci_write_bar(child, pci_find_bar(child, rid), rman_get_start(r) | PCIM_BIOS_ENABLE); switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: error = PCI_ENABLE_IO(dev, child, type); break; } } return (error); } int -pci_deactivate_resource(device_t dev, device_t child, int type, - int rid, struct resource *r) +pci_deactivate_resource(device_t dev, device_t child, struct resource *r) { struct pci_devinfo *dinfo; - int error; + int error, rid, type; - error = bus_generic_deactivate_resource(dev, child, type, rid, r); + error = bus_generic_deactivate_resource(dev, child, r); if (error) return (error); /* Disable decoding for device ROMs. */ if (device_get_parent(child) == dev) { dinfo = device_get_ivars(child); + rid = rman_get_rid(r); + type = rman_get_type(r); if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid)) pci_write_bar(child, pci_find_bar(child, rid), rman_get_start(r)); } return (0); } void pci_child_deleted(device_t dev, device_t child) { struct resource_list_entry *rle; struct resource_list *rl; struct pci_devinfo *dinfo; dinfo = device_get_ivars(child); rl = &dinfo->resources; EVENTHANDLER_INVOKE(pci_delete_device, child); /* Turn off access to resources we're about to free */ if (bus_child_present(child) != 0) { pci_write_config(child, PCIR_COMMAND, pci_read_config(child, PCIR_COMMAND, 2) & ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN), 2); pci_disable_busmaster(child); } /* Free all allocated resources */ STAILQ_FOREACH(rle, rl, link) { if (rle->res) { if (rman_get_flags(rle->res) & RF_ACTIVE || resource_list_busy(rl, rle->type, rle->rid)) { pci_printf(&dinfo->cfg, "Resource still owned, oops. " "(type=%d, rid=%d, addr=%lx)\n", rle->type, rle->rid, rman_get_start(rle->res)); bus_release_resource(child, rle->type, rle->rid, rle->res); } resource_list_unreserve(rl, dev, child, rle->type, rle->rid); } } resource_list_free(rl); pci_freecfg(dinfo); } void pci_delete_resource(device_t dev, device_t child, int type, int rid) { struct pci_devinfo *dinfo; struct resource_list *rl; struct resource_list_entry *rle; if (device_get_parent(child) != dev) return; dinfo = device_get_ivars(child); rl = &dinfo->resources; rle = resource_list_find(rl, type, rid); if (rle == NULL) return; if (rle->res) { if (rman_get_flags(rle->res) & RF_ACTIVE || resource_list_busy(rl, type, rid)) { device_printf(dev, "delete_resource: " "Resource still owned by child, oops. " "(type=%d, rid=%d, addr=%jx)\n", type, rid, rman_get_start(rle->res)); return; } resource_list_unreserve(rl, dev, child, type, rid); } resource_list_delete(rl, type, rid); } struct resource_list * pci_get_resource_list (device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } #ifdef IOMMU bus_dma_tag_t pci_get_dma_tag(device_t bus, device_t dev) { bus_dma_tag_t tag; struct pci_softc *sc; if (device_get_parent(dev) == bus) { /* try iommu and return if it works */ tag = iommu_get_dma_tag(bus, dev); } else tag = NULL; if (tag == NULL) { sc = device_get_softc(bus); tag = sc->sc_dma_tag; } return (tag); } #else bus_dma_tag_t pci_get_dma_tag(device_t bus, device_t dev) { struct pci_softc *sc = device_get_softc(bus); return (sc->sc_dma_tag); } #endif uint32_t pci_read_config_method(device_t dev, device_t child, int reg, int width) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; #ifdef PCI_IOV /* * SR-IOV VFs don't implement the VID or DID registers, so we have to * emulate them here. */ if (cfg->flags & PCICFG_VF) { if (reg == PCIR_VENDOR) { switch (width) { case 4: return (cfg->device << 16 | cfg->vendor); case 2: return (cfg->vendor); case 1: return (cfg->vendor & 0xff); default: return (0xffffffff); } } else if (reg == PCIR_DEVICE) { switch (width) { /* Note that an unaligned 4-byte read is an error. */ case 2: return (cfg->device); case 1: return (cfg->device & 0xff); default: return (0xffffffff); } } } #endif return (PCIB_READ_CONFIG(device_get_parent(dev), cfg->bus, cfg->slot, cfg->func, reg, width)); } void pci_write_config_method(device_t dev, device_t child, int reg, uint32_t val, int width) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; PCIB_WRITE_CONFIG(device_get_parent(dev), cfg->bus, cfg->slot, cfg->func, reg, val, width); } int pci_child_location_method(device_t dev, device_t child, struct sbuf *sb) { sbuf_printf(sb, "slot=%d function=%d dbsf=pci%d:%d:%d:%d", pci_get_slot(child), pci_get_function(child), pci_get_domain(child), pci_get_bus(child), pci_get_slot(child), pci_get_function(child)); return (0); } int pci_child_pnpinfo_method(device_t dev, device_t child, struct sbuf *sb) { struct pci_devinfo *dinfo; pcicfgregs *cfg; dinfo = device_get_ivars(child); cfg = &dinfo->cfg; sbuf_printf(sb, "vendor=0x%04x device=0x%04x subvendor=0x%04x " "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device, cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass, cfg->progif); return (0); } int pci_get_device_path_method(device_t bus, device_t child, const char *locator, struct sbuf *sb) { device_t parent = device_get_parent(bus); int rv; if (strcmp(locator, BUS_LOCATOR_UEFI) == 0) { rv = bus_generic_get_device_path(parent, bus, locator, sb); if (rv == 0) { sbuf_printf(sb, "/Pci(0x%x,0x%x)", pci_get_slot(child), pci_get_function(child)); } return (0); } return (bus_generic_get_device_path(bus, child, locator, sb)); } int pci_assign_interrupt_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child, cfg->intpin)); } static void pci_lookup(void *arg, const char *name, device_t *dev) { long val; char *end; int domain, bus, slot, func; if (*dev != NULL) return; /* * Accept pciconf-style selectors of either pciD:B:S:F or * pciB:S:F. In the latter case, the domain is assumed to * be zero. */ if (strncmp(name, "pci", 3) != 0) return; val = strtol(name + 3, &end, 10); if (val < 0 || val > INT_MAX || *end != ':') return; domain = val; val = strtol(end + 1, &end, 10); if (val < 0 || val > INT_MAX || *end != ':') return; bus = val; val = strtol(end + 1, &end, 10); if (val < 0 || val > INT_MAX) return; slot = val; if (*end == ':') { val = strtol(end + 1, &end, 10); if (val < 0 || val > INT_MAX || *end != '\0') return; func = val; } else if (*end == '\0') { func = slot; slot = bus; bus = domain; domain = 0; } else return; if (domain > PCI_DOMAINMAX || bus > PCI_BUSMAX || slot > PCI_SLOTMAX || func > PCIE_ARI_FUNCMAX || (slot != 0 && func > PCI_FUNCMAX)) return; *dev = pci_find_dbsf(domain, bus, slot, func); } static int pci_modevent(module_t mod, int what, void *arg) { static struct cdev *pci_cdev; static eventhandler_tag tag; switch (what) { case MOD_LOAD: STAILQ_INIT(&pci_devq); pci_generation = 0; pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644, "pci"); pci_load_vendor_data(); tag = EVENTHANDLER_REGISTER(dev_lookup, pci_lookup, NULL, 1000); break; case MOD_UNLOAD: if (tag != NULL) EVENTHANDLER_DEREGISTER(dev_lookup, tag); destroy_dev(pci_cdev); break; } return (0); } static void pci_cfg_restore_pcie(device_t dev, struct pci_devinfo *dinfo) { #define WREG(n, v) pci_write_config(dev, pos + (n), (v), 2) struct pcicfg_pcie *cfg; int version, pos; cfg = &dinfo->cfg.pcie; pos = cfg->pcie_location; version = cfg->pcie_flags & PCIEM_FLAGS_VERSION; WREG(PCIER_DEVICE_CTL, cfg->pcie_device_ctl); if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || cfg->pcie_type == PCIEM_TYPE_ENDPOINT || cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT) WREG(PCIER_LINK_CTL, cfg->pcie_link_ctl); if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT && (cfg->pcie_flags & PCIEM_FLAGS_SLOT)))) WREG(PCIER_SLOT_CTL, cfg->pcie_slot_ctl); if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || cfg->pcie_type == PCIEM_TYPE_ROOT_EC) WREG(PCIER_ROOT_CTL, cfg->pcie_root_ctl); if (version > 1) { WREG(PCIER_DEVICE_CTL2, cfg->pcie_device_ctl2); WREG(PCIER_LINK_CTL2, cfg->pcie_link_ctl2); WREG(PCIER_SLOT_CTL2, cfg->pcie_slot_ctl2); } #undef WREG } static void pci_cfg_restore_pcix(device_t dev, struct pci_devinfo *dinfo) { pci_write_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, dinfo->cfg.pcix.pcix_command, 2); } void pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo) { /* * Restore the device to full power mode. We must do this * before we restore the registers because moving from D3 to * D0 will cause the chip's BARs and some other registers to * be reset to some unknown power on reset values. Cut down * the noise on boot by doing nothing if we are already in * state D0. */ if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) pci_set_powerstate(dev, PCI_POWERSTATE_D0); pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1); pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1); pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1); pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1); pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1); pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1); switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1); pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1); break; case PCIM_HDRTYPE_BRIDGE: pci_write_config(dev, PCIR_SECLAT_1, dinfo->cfg.bridge.br_seclat, 1); pci_write_config(dev, PCIR_SUBBUS_1, dinfo->cfg.bridge.br_subbus, 1); pci_write_config(dev, PCIR_SECBUS_1, dinfo->cfg.bridge.br_secbus, 1); pci_write_config(dev, PCIR_PRIBUS_1, dinfo->cfg.bridge.br_pribus, 1); pci_write_config(dev, PCIR_BRIDGECTL_1, dinfo->cfg.bridge.br_control, 2); break; case PCIM_HDRTYPE_CARDBUS: pci_write_config(dev, PCIR_SECLAT_2, dinfo->cfg.bridge.br_seclat, 1); pci_write_config(dev, PCIR_SUBBUS_2, dinfo->cfg.bridge.br_subbus, 1); pci_write_config(dev, PCIR_SECBUS_2, dinfo->cfg.bridge.br_secbus, 1); pci_write_config(dev, PCIR_PRIBUS_2, dinfo->cfg.bridge.br_pribus, 1); pci_write_config(dev, PCIR_BRIDGECTL_2, dinfo->cfg.bridge.br_control, 2); break; } pci_restore_bars(dev); if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_BRIDGE) pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2); /* * Restore extended capabilities for PCI-Express and PCI-X */ if (dinfo->cfg.pcie.pcie_location != 0) pci_cfg_restore_pcie(dev, dinfo); if (dinfo->cfg.pcix.pcix_location != 0) pci_cfg_restore_pcix(dev, dinfo); /* Restore MSI and MSI-X configurations if they are present. */ if (dinfo->cfg.msi.msi_location != 0) pci_resume_msi(dev); if (dinfo->cfg.msix.msix_location != 0) pci_resume_msix(dev); #ifdef PCI_IOV if (dinfo->cfg.iov != NULL) pci_iov_cfg_restore(dev, dinfo); #endif } static void pci_cfg_save_pcie(device_t dev, struct pci_devinfo *dinfo) { #define RREG(n) pci_read_config(dev, pos + (n), 2) struct pcicfg_pcie *cfg; int version, pos; cfg = &dinfo->cfg.pcie; pos = cfg->pcie_location; cfg->pcie_flags = RREG(PCIER_FLAGS); version = cfg->pcie_flags & PCIEM_FLAGS_VERSION; cfg->pcie_device_ctl = RREG(PCIER_DEVICE_CTL); if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || cfg->pcie_type == PCIEM_TYPE_ENDPOINT || cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT) cfg->pcie_link_ctl = RREG(PCIER_LINK_CTL); if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT && (cfg->pcie_flags & PCIEM_FLAGS_SLOT)))) cfg->pcie_slot_ctl = RREG(PCIER_SLOT_CTL); if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || cfg->pcie_type == PCIEM_TYPE_ROOT_EC) cfg->pcie_root_ctl = RREG(PCIER_ROOT_CTL); if (version > 1) { cfg->pcie_device_ctl2 = RREG(PCIER_DEVICE_CTL2); cfg->pcie_link_ctl2 = RREG(PCIER_LINK_CTL2); cfg->pcie_slot_ctl2 = RREG(PCIER_SLOT_CTL2); } #undef RREG } static void pci_cfg_save_pcix(device_t dev, struct pci_devinfo *dinfo) { dinfo->cfg.pcix.pcix_command = pci_read_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, 2); } void pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate) { uint32_t cls; int ps; /* * Some drivers apparently write to these registers w/o updating our * cached copy. No harm happens if we update the copy, so do so here * so we can restore them. The COMMAND register is modified by the * bus w/o updating the cache. This should represent the normally * writable portion of the 'defined' part of type 0/1/2 headers. */ dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2); dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2); dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2); dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1); dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1); dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1); dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1); dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1); dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1); switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2); dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2); dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1); dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1); break; case PCIM_HDRTYPE_BRIDGE: dinfo->cfg.bridge.br_seclat = pci_read_config(dev, PCIR_SECLAT_1, 1); dinfo->cfg.bridge.br_subbus = pci_read_config(dev, PCIR_SUBBUS_1, 1); dinfo->cfg.bridge.br_secbus = pci_read_config(dev, PCIR_SECBUS_1, 1); dinfo->cfg.bridge.br_pribus = pci_read_config(dev, PCIR_PRIBUS_1, 1); dinfo->cfg.bridge.br_control = pci_read_config(dev, PCIR_BRIDGECTL_1, 2); break; case PCIM_HDRTYPE_CARDBUS: dinfo->cfg.bridge.br_seclat = pci_read_config(dev, PCIR_SECLAT_2, 1); dinfo->cfg.bridge.br_subbus = pci_read_config(dev, PCIR_SUBBUS_2, 1); dinfo->cfg.bridge.br_secbus = pci_read_config(dev, PCIR_SECBUS_2, 1); dinfo->cfg.bridge.br_pribus = pci_read_config(dev, PCIR_PRIBUS_2, 1); dinfo->cfg.bridge.br_control = pci_read_config(dev, PCIR_BRIDGECTL_2, 2); dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_2, 2); dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_2, 2); break; } if (dinfo->cfg.pcie.pcie_location != 0) pci_cfg_save_pcie(dev, dinfo); if (dinfo->cfg.pcix.pcix_location != 0) pci_cfg_save_pcix(dev, dinfo); #ifdef PCI_IOV if (dinfo->cfg.iov != NULL) pci_iov_cfg_save(dev, dinfo); #endif /* * don't set the state for display devices, base peripherals and * memory devices since bad things happen when they are powered down. * We should (a) have drivers that can easily detach and (b) use * generic drivers for these devices so that some device actually * attaches. We need to make sure that when we implement (a) we don't * power the device down on a reattach. */ cls = pci_get_class(dev); if (!setstate) return; switch (pci_do_power_nodriver) { case 0: /* NO powerdown at all */ return; case 1: /* Conservative about what to power down */ if (cls == PCIC_STORAGE) return; /*FALLTHROUGH*/ case 2: /* Aggressive about what to power down */ if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY || cls == PCIC_BASEPERIPH) return; /*FALLTHROUGH*/ case 3: /* Power down everything */ break; } /* * PCI spec says we can only go into D3 state from D0 state. * Transition from D[12] into D0 before going to D3 state. */ ps = pci_get_powerstate(dev); if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3) pci_set_powerstate(dev, PCI_POWERSTATE_D0); if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3) pci_set_powerstate(dev, PCI_POWERSTATE_D3); } /* Wrapper APIs suitable for device driver use. */ void pci_save_state(device_t dev) { struct pci_devinfo *dinfo; dinfo = device_get_ivars(dev); pci_cfg_save(dev, dinfo, 0); } void pci_restore_state(device_t dev) { struct pci_devinfo *dinfo; dinfo = device_get_ivars(dev); pci_cfg_restore(dev, dinfo); } static int pci_get_id_method(device_t dev, device_t child, enum pci_id_type type, uintptr_t *id) { return (PCIB_GET_ID(device_get_parent(dev), child, type, id)); } /* Find the upstream port of a given PCI device in a root complex. */ device_t pci_find_pcie_root_port(device_t dev) { struct pci_devinfo *dinfo; devclass_t pci_class; device_t pcib, bus; pci_class = devclass_find("pci"); KASSERT(device_get_devclass(device_get_parent(dev)) == pci_class, ("%s: non-pci device %s", __func__, device_get_nameunit(dev))); /* * Walk the bridge hierarchy until we find a PCI-e root * port or a non-PCI device. */ for (;;) { bus = device_get_parent(dev); KASSERT(bus != NULL, ("%s: null parent of %s", __func__, device_get_nameunit(dev))); pcib = device_get_parent(bus); KASSERT(pcib != NULL, ("%s: null bridge of %s", __func__, device_get_nameunit(bus))); /* * pcib's parent must be a PCI bus for this to be a * PCI-PCI bridge. */ if (device_get_devclass(device_get_parent(pcib)) != pci_class) return (NULL); dinfo = device_get_ivars(pcib); if (dinfo->cfg.pcie.pcie_location != 0 && dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT) return (pcib); dev = pcib; } } /* * Wait for pending transactions to complete on a PCI-express function. * * The maximum delay is specified in milliseconds in max_delay. Note * that this function may sleep. * * Returns true if the function is idle and false if the timeout is * exceeded. If dev is not a PCI-express function, this returns true. */ bool pcie_wait_for_pending_transactions(device_t dev, u_int max_delay) { struct pci_devinfo *dinfo = device_get_ivars(dev); uint16_t sta; int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (true); sta = pci_read_config(dev, cap + PCIER_DEVICE_STA, 2); while (sta & PCIEM_STA_TRANSACTION_PND) { if (max_delay == 0) return (false); /* Poll once every 100 milliseconds up to the timeout. */ if (max_delay > 100) { pause_sbt("pcietp", 100 * SBT_1MS, 0, C_HARDCLOCK); max_delay -= 100; } else { pause_sbt("pcietp", max_delay * SBT_1MS, 0, C_HARDCLOCK); max_delay = 0; } sta = pci_read_config(dev, cap + PCIER_DEVICE_STA, 2); } return (true); } /* * Determine the maximum Completion Timeout in microseconds. * * For non-PCI-express functions this returns 0. */ int pcie_get_max_completion_timeout(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (0); /* * Functions using the 1.x spec use the default timeout range of * 50 microseconds to 50 milliseconds. Functions that do not * support programmable timeouts also use this range. */ if ((dinfo->cfg.pcie.pcie_flags & PCIEM_FLAGS_VERSION) < 2 || (pci_read_config(dev, cap + PCIER_DEVICE_CAP2, 4) & PCIEM_CAP2_COMP_TIMO_RANGES) == 0) return (50 * 1000); switch (pci_read_config(dev, cap + PCIER_DEVICE_CTL2, 2) & PCIEM_CTL2_COMP_TIMO_VAL) { case PCIEM_CTL2_COMP_TIMO_100US: return (100); case PCIEM_CTL2_COMP_TIMO_10MS: return (10 * 1000); case PCIEM_CTL2_COMP_TIMO_55MS: return (55 * 1000); case PCIEM_CTL2_COMP_TIMO_210MS: return (210 * 1000); case PCIEM_CTL2_COMP_TIMO_900MS: return (900 * 1000); case PCIEM_CTL2_COMP_TIMO_3500MS: return (3500 * 1000); case PCIEM_CTL2_COMP_TIMO_13S: return (13 * 1000 * 1000); case PCIEM_CTL2_COMP_TIMO_64S: return (64 * 1000 * 1000); default: return (50 * 1000); } } void pcie_apei_error(device_t dev, int sev, uint8_t *aerp) { struct pci_devinfo *dinfo = device_get_ivars(dev); const char *s; int aer; uint32_t r, r1; uint16_t rs; if (sev == PCIEM_STA_CORRECTABLE_ERROR) s = "Correctable"; else if (sev == PCIEM_STA_NON_FATAL_ERROR) s = "Uncorrectable (Non-Fatal)"; else s = "Uncorrectable (Fatal)"; device_printf(dev, "%s PCIe error reported by APEI\n", s); if (aerp) { if (sev == PCIEM_STA_CORRECTABLE_ERROR) { r = le32dec(aerp + PCIR_AER_COR_STATUS); r1 = le32dec(aerp + PCIR_AER_COR_MASK); } else { r = le32dec(aerp + PCIR_AER_UC_STATUS); r1 = le32dec(aerp + PCIR_AER_UC_MASK); } device_printf(dev, "status 0x%08x mask 0x%08x", r, r1); if (sev != PCIEM_STA_CORRECTABLE_ERROR) { r = le32dec(aerp + PCIR_AER_UC_SEVERITY); rs = le16dec(aerp + PCIR_AER_CAP_CONTROL); printf(" severity 0x%08x first %d\n", r, rs & 0x1f); } else printf("\n"); } /* As kind of recovery just report and clear the error statuses. */ if (pci_find_extcap(dev, PCIZ_AER, &aer) == 0) { r = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4); if (r != 0) { pci_write_config(dev, aer + PCIR_AER_UC_STATUS, r, 4); device_printf(dev, "Clearing UC AER errors 0x%08x\n", r); } r = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4); if (r != 0) { pci_write_config(dev, aer + PCIR_AER_COR_STATUS, r, 4); device_printf(dev, "Clearing COR AER errors 0x%08x\n", r); } } if (dinfo->cfg.pcie.pcie_location != 0) { rs = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_STA, 2); if ((rs & (PCIEM_STA_CORRECTABLE_ERROR | PCIEM_STA_NON_FATAL_ERROR | PCIEM_STA_FATAL_ERROR | PCIEM_STA_UNSUPPORTED_REQ)) != 0) { pci_write_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_STA, rs, 2); device_printf(dev, "Clearing PCIe errors 0x%04x\n", rs); } } } /* * Perform a Function Level Reset (FLR) on a device. * * This function first waits for any pending transactions to complete * within the timeout specified by max_delay. If transactions are * still pending, the function will return false without attempting a * reset. * * If dev is not a PCI-express function or does not support FLR, this * function returns false. * * Note that no registers are saved or restored. The caller is * responsible for saving and restoring any registers including * PCI-standard registers via pci_save_state() and * pci_restore_state(). */ bool pcie_flr(device_t dev, u_int max_delay, bool force) { struct pci_devinfo *dinfo = device_get_ivars(dev); uint16_t cmd, ctl; int compl_delay; int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (false); if (!(pci_read_config(dev, cap + PCIER_DEVICE_CAP, 4) & PCIEM_CAP_FLR)) return (false); /* * Disable busmastering to prevent generation of new * transactions while waiting for the device to go idle. If * the idle timeout fails, the command register is restored * which will re-enable busmastering. */ cmd = pci_read_config(dev, PCIR_COMMAND, 2); pci_write_config(dev, PCIR_COMMAND, cmd & ~(PCIM_CMD_BUSMASTEREN), 2); if (!pcie_wait_for_pending_transactions(dev, max_delay)) { if (!force) { pci_write_config(dev, PCIR_COMMAND, cmd, 2); return (false); } pci_printf(&dinfo->cfg, "Resetting with transactions pending after %d ms\n", max_delay); /* * Extend the post-FLR delay to cover the maximum * Completion Timeout delay of anything in flight * during the FLR delay. Enforce a minimum delay of * at least 10ms. */ compl_delay = pcie_get_max_completion_timeout(dev) / 1000; if (compl_delay < 10) compl_delay = 10; } else compl_delay = 0; /* Initiate the reset. */ ctl = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); pci_write_config(dev, cap + PCIER_DEVICE_CTL, ctl | PCIEM_CTL_INITIATE_FLR, 2); /* Wait for 100ms. */ pause_sbt("pcieflr", (100 + compl_delay) * SBT_1MS, 0, C_HARDCLOCK); if (pci_read_config(dev, cap + PCIER_DEVICE_STA, 2) & PCIEM_STA_TRANSACTION_PND) pci_printf(&dinfo->cfg, "Transactions pending after FLR!\n"); return (true); } /* * Attempt a power-management reset by cycling the device in/out of D3 * state. PCI spec says we can only go into D3 state from D0 state. * Transition from D[12] into D0 before going to D3 state. */ int pci_power_reset(device_t dev) { int ps; ps = pci_get_powerstate(dev); if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3) pci_set_powerstate(dev, PCI_POWERSTATE_D0); pci_set_powerstate(dev, PCI_POWERSTATE_D3); pci_set_powerstate(dev, ps); return (0); } /* * Try link drop and retrain of the downstream port of upstream * switch, for PCIe. According to the PCIe 3.0 spec 6.6.1, this must * cause Conventional Hot reset of the device in the slot. * Alternative, for PCIe, could be the secondary bus reset initiatied * on the upstream switch PCIR_BRIDGECTL_1, bit 6. */ int pcie_link_reset(device_t port, int pcie_location) { uint16_t v; v = pci_read_config(port, pcie_location + PCIER_LINK_CTL, 2); v |= PCIEM_LINK_CTL_LINK_DIS; pci_write_config(port, pcie_location + PCIER_LINK_CTL, v, 2); pause_sbt("pcier1", mstosbt(20), 0, 0); v &= ~PCIEM_LINK_CTL_LINK_DIS; v |= PCIEM_LINK_CTL_RETRAIN_LINK; pci_write_config(port, pcie_location + PCIER_LINK_CTL, v, 2); pause_sbt("pcier2", mstosbt(100), 0, 0); /* 100 ms */ v = pci_read_config(port, pcie_location + PCIER_LINK_STA, 2); return ((v & PCIEM_LINK_STA_TRAINING) != 0 ? ETIMEDOUT : 0); } static int pci_reset_post(device_t dev, device_t child) { if (dev == device_get_parent(child)) pci_restore_state(child); return (0); } static int pci_reset_prepare(device_t dev, device_t child) { if (dev == device_get_parent(child)) pci_save_state(child); return (0); } static int pci_reset_child(device_t dev, device_t child, int flags) { int error; if (dev == NULL || device_get_parent(child) != dev) return (0); if ((flags & DEVF_RESET_DETACH) != 0) { error = device_get_state(child) == DS_ATTACHED ? device_detach(child) : 0; } else { error = BUS_SUSPEND_CHILD(dev, child); } if (error == 0) { if (!pcie_flr(child, 1000, false)) { error = BUS_RESET_PREPARE(dev, child); if (error == 0) pci_power_reset(child); BUS_RESET_POST(dev, child); } if ((flags & DEVF_RESET_DETACH) != 0) device_probe_and_attach(child); else BUS_RESUME_CHILD(dev, child); } return (error); } const struct pci_device_table * pci_match_device(device_t child, const struct pci_device_table *id, size_t nelt) { bool match; uint16_t vendor, device, subvendor, subdevice, class, subclass, revid; vendor = pci_get_vendor(child); device = pci_get_device(child); subvendor = pci_get_subvendor(child); subdevice = pci_get_subdevice(child); class = pci_get_class(child); subclass = pci_get_subclass(child); revid = pci_get_revid(child); while (nelt-- > 0) { match = true; if (id->match_flag_vendor) match &= vendor == id->vendor; if (id->match_flag_device) match &= device == id->device; if (id->match_flag_subvendor) match &= subvendor == id->subvendor; if (id->match_flag_subdevice) match &= subdevice == id->subdevice; if (id->match_flag_class) match &= class == id->class_id; if (id->match_flag_subclass) match &= subclass == id->subclass; if (id->match_flag_revid) match &= revid == id->revid; if (match) return (id); id++; } return (NULL); } static void pci_print_faulted_dev_name(const struct pci_devinfo *dinfo) { const char *dev_name; device_t dev; dev = dinfo->cfg.dev; printf("pci%d:%d:%d:%d", dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot, dinfo->cfg.func); dev_name = device_get_name(dev); if (dev_name != NULL) printf(" (%s%d)", dev_name, device_get_unit(dev)); } void pci_print_faulted_dev(void) { struct pci_devinfo *dinfo; device_t dev; int aer, i; uint32_t r1, r2; uint16_t status; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { dev = dinfo->cfg.dev; status = pci_read_config(dev, PCIR_STATUS, 2); status &= PCIM_STATUS_MDPERR | PCIM_STATUS_STABORT | PCIM_STATUS_RTABORT | PCIM_STATUS_RMABORT | PCIM_STATUS_SERR | PCIM_STATUS_PERR; if (status != 0) { pci_print_faulted_dev_name(dinfo); printf(" error 0x%04x\n", status); } if (dinfo->cfg.pcie.pcie_location != 0) { status = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_STA, 2); if ((status & (PCIEM_STA_CORRECTABLE_ERROR | PCIEM_STA_NON_FATAL_ERROR | PCIEM_STA_FATAL_ERROR | PCIEM_STA_UNSUPPORTED_REQ)) != 0) { pci_print_faulted_dev_name(dinfo); printf(" PCIe DEVCTL 0x%04x DEVSTA 0x%04x\n", pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_CTL, 2), status); } } if (pci_find_extcap(dev, PCIZ_AER, &aer) == 0) { r1 = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4); r2 = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4); if (r1 != 0 || r2 != 0) { pci_print_faulted_dev_name(dinfo); printf(" AER UC 0x%08x Mask 0x%08x Svr 0x%08x\n" " COR 0x%08x Mask 0x%08x Ctl 0x%08x\n", r1, pci_read_config(dev, aer + PCIR_AER_UC_MASK, 4), pci_read_config(dev, aer + PCIR_AER_UC_SEVERITY, 4), r2, pci_read_config(dev, aer + PCIR_AER_COR_MASK, 4), pci_read_config(dev, aer + PCIR_AER_CAP_CONTROL, 4)); for (i = 0; i < 4; i++) { r1 = pci_read_config(dev, aer + PCIR_AER_HEADER_LOG + i * 4, 4); printf(" HL%d: 0x%08x\n", i, r1); } } } } } #ifdef DDB DB_SHOW_COMMAND_FLAGS(pcierr, pci_print_faulted_dev_db, DB_CMD_MEMSAFE) { pci_print_faulted_dev(); } static void db_clear_pcie_errors(const struct pci_devinfo *dinfo) { device_t dev; int aer; uint32_t r; dev = dinfo->cfg.dev; r = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_STA, 2); pci_write_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_STA, r, 2); if (pci_find_extcap(dev, PCIZ_AER, &aer) != 0) return; r = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4); if (r != 0) pci_write_config(dev, aer + PCIR_AER_UC_STATUS, r, 4); r = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4); if (r != 0) pci_write_config(dev, aer + PCIR_AER_COR_STATUS, r, 4); } DB_COMMAND_FLAGS(pci_clearerr, db_pci_clearerr, DB_CMD_MEMSAFE) { struct pci_devinfo *dinfo; device_t dev; uint16_t status, status1; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { dev = dinfo->cfg.dev; status1 = status = pci_read_config(dev, PCIR_STATUS, 2); status1 &= PCIM_STATUS_MDPERR | PCIM_STATUS_STABORT | PCIM_STATUS_RTABORT | PCIM_STATUS_RMABORT | PCIM_STATUS_SERR | PCIM_STATUS_PERR; if (status1 != 0) { status &= ~status1; pci_write_config(dev, PCIR_STATUS, status, 2); } if (dinfo->cfg.pcie.pcie_location != 0) db_clear_pcie_errors(dinfo); } } #endif diff --git a/sys/dev/pci/pci_host_generic.c b/sys/dev/pci/pci_host_generic.c index 3657be018c99..5831b0f9a8d9 100644 --- a/sys/dev/pci/pci_host_generic.c +++ b/sys/dev/pci/pci_host_generic.c @@ -1,747 +1,742 @@ /*- * Copyright (c) 2015, 2020 Ruslan Bukin * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* Generic ECAM PCIe driver */ #include #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #if defined(VM_MEMATTR_DEVICE_NP) #define PCI_UNMAPPED #define PCI_RF_FLAGS RF_UNMAPPED #else #define PCI_RF_FLAGS 0 #endif /* Forward prototypes */ static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes); static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t val, int bytes); static int generic_pcie_maxslots(device_t dev); static int generic_pcie_read_ivar(device_t dev, device_t child, int index, uintptr_t *result); static int generic_pcie_write_ivar(device_t dev, device_t child, int index, uintptr_t value); int pci_host_generic_core_attach(device_t dev) { #ifdef PCI_UNMAPPED struct resource_map_request req; struct resource_map map; #endif struct generic_pcie_core_softc *sc; uint64_t phys_base; uint64_t pci_base; uint64_t size; const char *range_descr; char buf[64]; int domain, error; int flags, rid, tuple, type; sc = device_get_softc(dev); sc->dev = dev; /* Create the parent DMA tag to pass down the coherent flag */ error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE, /* maxsegsize */ sc->coherent ? BUS_DMA_COHERENT : 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->dmat); if (error != 0) return (error); /* * Attempt to set the domain. If it's missing, or we are unable to * set it then memory allocations may be placed in the wrong domain. */ if (bus_get_domain(dev, &domain) == 0) (void)bus_dma_tag_set_domain(sc->dmat, domain); if ((sc->quirks & PCIE_CUSTOM_CONFIG_SPACE_QUIRK) == 0) { rid = 0; sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, PCI_RF_FLAGS | RF_ACTIVE); if (sc->res == NULL) { device_printf(dev, "could not allocate memory.\n"); error = ENXIO; goto err_resource; } #ifdef PCI_UNMAPPED resource_init_map_request(&req); req.memattr = VM_MEMATTR_DEVICE_NP; error = bus_map_resource(dev, SYS_RES_MEMORY, sc->res, &req, &map); if (error != 0) { device_printf(dev, "could not map memory.\n"); return (error); } rman_set_mapping(sc->res, &map); #endif } sc->has_pmem = false; sc->pmem_rman.rm_type = RMAN_ARRAY; snprintf(buf, sizeof(buf), "%s prefetch window", device_get_nameunit(dev)); sc->pmem_rman.rm_descr = strdup(buf, M_DEVBUF); sc->mem_rman.rm_type = RMAN_ARRAY; snprintf(buf, sizeof(buf), "%s memory window", device_get_nameunit(dev)); sc->mem_rman.rm_descr = strdup(buf, M_DEVBUF); sc->io_rman.rm_type = RMAN_ARRAY; snprintf(buf, sizeof(buf), "%s I/O port window", device_get_nameunit(dev)); sc->io_rman.rm_descr = strdup(buf, M_DEVBUF); /* Initialize rman and allocate memory regions */ error = rman_init(&sc->pmem_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); goto err_pmem_rman; } error = rman_init(&sc->mem_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); goto err_mem_rman; } error = rman_init(&sc->io_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); goto err_io_rman; } for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { phys_base = sc->ranges[tuple].phys_base; pci_base = sc->ranges[tuple].pci_base; size = sc->ranges[tuple].size; rid = tuple + 1; if (size == 0) continue; /* empty range element */ switch (FLAG_TYPE(sc->ranges[tuple].flags)) { case FLAG_TYPE_PMEM: sc->has_pmem = true; range_descr = "prefetch"; flags = RF_PREFETCHABLE; type = SYS_RES_MEMORY; error = rman_manage_region(&sc->pmem_rman, pci_base, pci_base + size - 1); break; case FLAG_TYPE_MEM: range_descr = "memory"; flags = 0; type = SYS_RES_MEMORY; error = rman_manage_region(&sc->mem_rman, pci_base, pci_base + size - 1); break; case FLAG_TYPE_IO: range_descr = "I/O port"; flags = 0; type = SYS_RES_IOPORT; error = rman_manage_region(&sc->io_rman, pci_base, pci_base + size - 1); break; default: continue; } if (error) { device_printf(dev, "rman_manage_region() failed." "error = %d\n", error); goto err_rman_manage; } error = bus_set_resource(dev, type, rid, phys_base, size); if (error != 0) { device_printf(dev, "failed to set resource for range %d: %d\n", tuple, error); goto err_rman_manage; } sc->ranges[tuple].res = bus_alloc_resource_any(dev, type, &rid, RF_ACTIVE | RF_UNMAPPED | flags); if (sc->ranges[tuple].res == NULL) { device_printf(dev, "failed to allocate resource for range %d\n", tuple); error = ENXIO; goto err_rman_manage; } if (bootverbose) device_printf(dev, "PCI addr: 0x%jx, CPU addr: 0x%jx, Size: 0x%jx, Type: %s\n", pci_base, phys_base, size, range_descr); } return (0); err_rman_manage: for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { if (sc->ranges[tuple].size == 0) continue; /* empty range element */ switch (FLAG_TYPE(sc->ranges[tuple].flags)) { case FLAG_TYPE_PMEM: case FLAG_TYPE_MEM: type = SYS_RES_MEMORY; break; case FLAG_TYPE_IO: type = SYS_RES_IOPORT; break; default: continue; } if (sc->ranges[tuple].res != NULL) bus_release_resource(dev, type, tuple + 1, sc->ranges[tuple].res); bus_delete_resource(dev, type, tuple + 1); } rman_fini(&sc->io_rman); err_io_rman: rman_fini(&sc->mem_rman); err_mem_rman: rman_fini(&sc->pmem_rman); err_pmem_rman: free(__DECONST(char *, sc->io_rman.rm_descr), M_DEVBUF); free(__DECONST(char *, sc->mem_rman.rm_descr), M_DEVBUF); free(__DECONST(char *, sc->pmem_rman.rm_descr), M_DEVBUF); if (sc->res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res); err_resource: bus_dma_tag_destroy(sc->dmat); return (error); } int pci_host_generic_core_detach(device_t dev) { struct generic_pcie_core_softc *sc; int error, tuple, type; sc = device_get_softc(dev); error = bus_generic_detach(dev); if (error != 0) return (error); for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { if (sc->ranges[tuple].size == 0) continue; /* empty range element */ switch (FLAG_TYPE(sc->ranges[tuple].flags)) { case FLAG_TYPE_PMEM: case FLAG_TYPE_MEM: type = SYS_RES_MEMORY; break; case FLAG_TYPE_IO: type = SYS_RES_IOPORT; break; default: continue; } if (sc->ranges[tuple].res != NULL) bus_release_resource(dev, type, tuple + 1, sc->ranges[tuple].res); bus_delete_resource(dev, type, tuple + 1); } rman_fini(&sc->io_rman); rman_fini(&sc->mem_rman); rman_fini(&sc->pmem_rman); free(__DECONST(char *, sc->io_rman.rm_descr), M_DEVBUF); free(__DECONST(char *, sc->mem_rman.rm_descr), M_DEVBUF); free(__DECONST(char *, sc->pmem_rman.rm_descr), M_DEVBUF); if (sc->res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res); bus_dma_tag_destroy(sc->dmat); return (0); } static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct generic_pcie_core_softc *sc; uint64_t offset; uint32_t data; sc = device_get_softc(dev); if ((bus < sc->bus_start) || (bus > sc->bus_end)) return (~0U); if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX)) return (~0U); if ((sc->quirks & PCIE_ECAM_DESIGNWARE_QUIRK) && bus == 0 && slot > 0) return (~0U); offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg); switch (bytes) { case 1: data = bus_read_1(sc->res, offset); break; case 2: data = le16toh(bus_read_2(sc->res, offset)); break; case 4: data = le32toh(bus_read_4(sc->res, offset)); break; default: return (~0U); } return (data); } static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t val, int bytes) { struct generic_pcie_core_softc *sc; uint64_t offset; sc = device_get_softc(dev); if ((bus < sc->bus_start) || (bus > sc->bus_end)) return; if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX)) return; offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg); switch (bytes) { case 1: bus_write_1(sc->res, offset, val); break; case 2: bus_write_2(sc->res, offset, htole16(val)); break; case 4: bus_write_4(sc->res, offset, htole32(val)); break; default: return; } } static int generic_pcie_maxslots(device_t dev) { return (31); /* max slots per bus acc. to standard */ } static int generic_pcie_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct generic_pcie_core_softc *sc; sc = device_get_softc(dev); if (index == PCIB_IVAR_BUS) { *result = sc->bus_start; return (0); } if (index == PCIB_IVAR_DOMAIN) { *result = sc->ecam; return (0); } if (bootverbose) device_printf(dev, "ERROR: Unknown index %d.\n", index); return (ENOENT); } static int generic_pcie_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { return (ENOENT); } static struct rman * generic_pcie_get_rman(device_t dev, int type, u_int flags) { struct generic_pcie_core_softc *sc = device_get_softc(dev); switch (type) { case SYS_RES_IOPORT: return (&sc->io_rman); case SYS_RES_MEMORY: if (sc->has_pmem && (flags & RF_PREFETCHABLE) != 0) return (&sc->pmem_rman); return (&sc->mem_rman); default: break; } return (NULL); } int pci_host_generic_core_release_resource(device_t dev, device_t child, int type, int rid, struct resource *res) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct generic_pcie_core_softc *sc; sc = device_get_softc(dev); #endif switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_release_bus(sc->ecam, child, rid, res)); #endif case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_release_resource(dev, child, type, rid, res)); default: return (bus_generic_release_resource(dev, child, type, rid, res)); } } static struct pcie_range * generic_pcie_containing_range(device_t dev, int type, rman_res_t start, rman_res_t end) { struct generic_pcie_core_softc *sc = device_get_softc(dev); uint64_t pci_base; uint64_t size; int i, space; switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (NULL); } for (i = 0; i < MAX_RANGES_TUPLES; i++) { pci_base = sc->ranges[i].pci_base; size = sc->ranges[i].size; if (size == 0) continue; /* empty range element */ if (start < pci_base || end >= pci_base + size) continue; switch (FLAG_TYPE(sc->ranges[i].flags)) { case FLAG_TYPE_MEM: case FLAG_TYPE_PMEM: space = SYS_RES_MEMORY; break; case FLAG_TYPE_IO: space = SYS_RES_IOPORT; break; default: continue; } if (type == space) return (&sc->ranges[i]); } return (NULL); } static int generic_pcie_translate_resource(device_t dev, int type, rman_res_t start, rman_res_t *new_start) { struct pcie_range *range; /* Translate the address from a PCI address to a physical address */ switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: range = generic_pcie_containing_range(dev, type, start, start); if (range == NULL) return (ENOENT); *new_start = start - range->pci_base + range->phys_base; break; default: /* No translation for non-memory types */ *new_start = start; break; } return (0); } struct resource * pci_host_generic_core_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct generic_pcie_core_softc *sc; #endif struct resource *res; #if defined(NEW_PCIB) && defined(PCI_RES_BUS) sc = device_get_softc(dev); #endif switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: res = pci_domain_alloc_bus(sc->ecam, child, rid, start, end, count, flags); break; #endif case SYS_RES_IOPORT: case SYS_RES_MEMORY: res = bus_generic_rman_alloc_resource(dev, child, type, rid, start, end, count, flags); break; default: res = bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags); break; } if (res == NULL) { device_printf(dev, "%s FAIL: type=%d, rid=%d, " "start=%016jx, end=%016jx, count=%016jx, flags=%x\n", __func__, type, *rid, start, end, count, flags); } return (res); } static int -generic_pcie_activate_resource(device_t dev, device_t child, int type, - int rid, struct resource *r) +generic_pcie_activate_resource(device_t dev, device_t child, struct resource *r) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct generic_pcie_core_softc *sc; sc = device_get_softc(dev); #endif - switch (type) { + switch (rman_get_type(r)) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: - return (pci_domain_activate_bus(sc->ecam, child, rid, r)); + return (pci_domain_activate_bus(sc->ecam, child, r)); #endif case SYS_RES_IOPORT: case SYS_RES_MEMORY: - return (bus_generic_rman_activate_resource(dev, child, type, - rid, r)); + return (bus_generic_rman_activate_resource(dev, child, r)); default: - return (bus_generic_activate_resource(dev, child, type, rid, - r)); + return (bus_generic_activate_resource(dev, child, r)); } } static int -generic_pcie_deactivate_resource(device_t dev, device_t child, int type, - int rid, struct resource *r) +generic_pcie_deactivate_resource(device_t dev, device_t child, + struct resource *r) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct generic_pcie_core_softc *sc; sc = device_get_softc(dev); #endif - switch (type) { + switch (rman_get_type(r)) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: - return (pci_domain_deactivate_bus(sc->ecam, child, rid, r)); + return (pci_domain_deactivate_bus(sc->ecam, child, r)); #endif case SYS_RES_IOPORT: case SYS_RES_MEMORY: - return (bus_generic_rman_deactivate_resource(dev, child, type, - rid, r)); + return (bus_generic_rman_deactivate_resource(dev, child, r)); default: - return (bus_generic_deactivate_resource(dev, child, type, rid, - r)); + return (bus_generic_deactivate_resource(dev, child, r)); } } static int generic_pcie_adjust_resource(device_t dev, device_t child, struct resource *res, rman_res_t start, rman_res_t end) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct generic_pcie_core_softc *sc; sc = device_get_softc(dev); #endif switch (rman_get_type(res)) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_adjust_bus(sc->ecam, child, res, start, end)); #endif case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_adjust_resource(dev, child, res, start, end)); default: return (bus_generic_adjust_resource(dev, child, res, start, end)); } } static int generic_pcie_map_resource(device_t dev, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; struct pcie_range *range; rman_res_t length, start; int error, type; type = rman_get_type(r); switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (EINVAL); #endif case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (bus_generic_map_resource(dev, child, r, argsp, map)); } /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); range = generic_pcie_containing_range(dev, type, rman_get_start(r), rman_get_end(r)); if (range == NULL || range->res == NULL) return (ENOENT); args.offset = start - range->pci_base; args.length = length; return (bus_generic_map_resource(dev, child, range->res, &args, map)); } static int generic_pcie_unmap_resource(device_t dev, device_t child, struct resource *r, struct resource_map *map) { struct pcie_range *range; int type; type = rman_get_type(r); switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (EINVAL); #endif case SYS_RES_IOPORT: case SYS_RES_MEMORY: range = generic_pcie_containing_range(dev, type, rman_get_start(r), rman_get_end(r)); if (range == NULL || range->res == NULL) return (ENOENT); r = range->res; break; default: break; } return (bus_generic_unmap_resource(dev, child, r, map)); } static bus_dma_tag_t generic_pcie_get_dma_tag(device_t dev, device_t child) { struct generic_pcie_core_softc *sc; sc = device_get_softc(dev); return (sc->dmat); } static device_method_t generic_pcie_methods[] = { DEVMETHOD(device_attach, pci_host_generic_core_attach), DEVMETHOD(device_detach, pci_host_generic_core_detach), DEVMETHOD(bus_get_rman, generic_pcie_get_rman), DEVMETHOD(bus_read_ivar, generic_pcie_read_ivar), DEVMETHOD(bus_write_ivar, generic_pcie_write_ivar), DEVMETHOD(bus_alloc_resource, pci_host_generic_core_alloc_resource), DEVMETHOD(bus_adjust_resource, generic_pcie_adjust_resource), DEVMETHOD(bus_activate_resource, generic_pcie_activate_resource), DEVMETHOD(bus_deactivate_resource, generic_pcie_deactivate_resource), DEVMETHOD(bus_release_resource, pci_host_generic_core_release_resource), DEVMETHOD(bus_translate_resource, generic_pcie_translate_resource), DEVMETHOD(bus_map_resource, generic_pcie_map_resource), DEVMETHOD(bus_unmap_resource, generic_pcie_unmap_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_dma_tag, generic_pcie_get_dma_tag), /* pcib interface */ DEVMETHOD(pcib_maxslots, generic_pcie_maxslots), DEVMETHOD(pcib_read_config, generic_pcie_read_config), DEVMETHOD(pcib_write_config, generic_pcie_write_config), DEVMETHOD_END }; DEFINE_CLASS_0(pcib, generic_pcie_core_driver, generic_pcie_methods, sizeof(struct generic_pcie_core_softc)); diff --git a/sys/dev/pci/pci_pci.c b/sys/dev/pci/pci_pci.c index 146b67c70801..cef58a1670d7 100644 --- a/sys/dev/pci/pci_pci.c +++ b/sys/dev/pci/pci_pci.c @@ -1,3161 +1,3159 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1994,1995 Stefan Esser, Wolfgang StanglMeier * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * PCI:PCI bridge support. */ #include "opt_pci.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" static int pcib_probe(device_t dev); static int pcib_suspend(device_t dev); static int pcib_resume(device_t dev); static bus_child_present_t pcib_child_present; static bus_alloc_resource_t pcib_alloc_resource; #ifdef NEW_PCIB static bus_adjust_resource_t pcib_adjust_resource; static bus_release_resource_t pcib_release_resource; static bus_activate_resource_t pcib_activate_resource; static bus_deactivate_resource_t pcib_deactivate_resource; static bus_map_resource_t pcib_map_resource; static bus_unmap_resource_t pcib_unmap_resource; #endif static int pcib_reset_child(device_t dev, device_t child, int flags); static int pcib_power_for_sleep(device_t pcib, device_t dev, int *pstate); static int pcib_ari_get_id(device_t pcib, device_t dev, enum pci_id_type type, uintptr_t *id); static uint32_t pcib_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width); static void pcib_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, uint32_t val, int width); static int pcib_ari_maxslots(device_t dev); static int pcib_ari_maxfuncs(device_t dev); static int pcib_try_enable_ari(device_t pcib, device_t dev); static int pcib_ari_enabled(device_t pcib); static void pcib_ari_decode_rid(device_t pcib, uint16_t rid, int *bus, int *slot, int *func); #ifdef PCI_HP static void pcib_pcie_ab_timeout(void *arg, int pending); static void pcib_pcie_cc_timeout(void *arg, int pending); static void pcib_pcie_dll_timeout(void *arg, int pending); #endif static int pcib_request_feature_default(device_t pcib, device_t dev, enum pci_feature feature); static device_method_t pcib_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pcib_probe), DEVMETHOD(device_attach, pcib_attach), DEVMETHOD(device_detach, pcib_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, pcib_suspend), DEVMETHOD(device_resume, pcib_resume), /* Bus interface */ DEVMETHOD(bus_child_present, pcib_child_present), DEVMETHOD(bus_read_ivar, pcib_read_ivar), DEVMETHOD(bus_write_ivar, pcib_write_ivar), DEVMETHOD(bus_alloc_resource, pcib_alloc_resource), #ifdef NEW_PCIB DEVMETHOD(bus_adjust_resource, pcib_adjust_resource), DEVMETHOD(bus_release_resource, pcib_release_resource), DEVMETHOD(bus_activate_resource, pcib_activate_resource), DEVMETHOD(bus_deactivate_resource, pcib_deactivate_resource), DEVMETHOD(bus_map_resource, pcib_map_resource), DEVMETHOD(bus_unmap_resource, pcib_unmap_resource), #else DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), #endif DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_reset_child, pcib_reset_child), /* pcib interface */ DEVMETHOD(pcib_maxslots, pcib_ari_maxslots), DEVMETHOD(pcib_maxfuncs, pcib_ari_maxfuncs), DEVMETHOD(pcib_read_config, pcib_read_config), DEVMETHOD(pcib_write_config, pcib_write_config), DEVMETHOD(pcib_route_interrupt, pcib_route_interrupt), DEVMETHOD(pcib_alloc_msi, pcib_alloc_msi), DEVMETHOD(pcib_release_msi, pcib_release_msi), DEVMETHOD(pcib_alloc_msix, pcib_alloc_msix), DEVMETHOD(pcib_release_msix, pcib_release_msix), DEVMETHOD(pcib_map_msi, pcib_map_msi), DEVMETHOD(pcib_power_for_sleep, pcib_power_for_sleep), DEVMETHOD(pcib_get_id, pcib_ari_get_id), DEVMETHOD(pcib_try_enable_ari, pcib_try_enable_ari), DEVMETHOD(pcib_ari_enabled, pcib_ari_enabled), DEVMETHOD(pcib_decode_rid, pcib_ari_decode_rid), DEVMETHOD(pcib_request_feature, pcib_request_feature_default), DEVMETHOD_END }; DEFINE_CLASS_0(pcib, pcib_driver, pcib_methods, sizeof(struct pcib_softc)); EARLY_DRIVER_MODULE(pcib, pci, pcib_driver, NULL, NULL, BUS_PASS_BUS); #if defined(NEW_PCIB) || defined(PCI_HP) SYSCTL_DECL(_hw_pci); #endif #ifdef NEW_PCIB static int pci_clear_pcib; SYSCTL_INT(_hw_pci, OID_AUTO, clear_pcib, CTLFLAG_RDTUN, &pci_clear_pcib, 0, "Clear firmware-assigned resources for PCI-PCI bridge I/O windows."); /* * Get the corresponding window if this resource from a child device was * sub-allocated from one of our window resource managers. */ static struct pcib_window * pcib_get_resource_window(struct pcib_softc *sc, struct resource *r) { switch (rman_get_type(r)) { case SYS_RES_IOPORT: if (rman_is_region_manager(r, &sc->io.rman)) return (&sc->io); break; case SYS_RES_MEMORY: /* Prefetchable resources may live in either memory rman. */ if (rman_get_flags(r) & RF_PREFETCHABLE && rman_is_region_manager(r, &sc->pmem.rman)) return (&sc->pmem); if (rman_is_region_manager(r, &sc->mem.rman)) return (&sc->mem); break; } return (NULL); } /* * Is a resource from a child device sub-allocated from one of our * resource managers? */ static int pcib_is_resource_managed(struct pcib_softc *sc, struct resource *r) { #ifdef PCI_RES_BUS if (rman_get_type(r) == PCI_RES_BUS) return (rman_is_region_manager(r, &sc->bus.rman)); #endif return (pcib_get_resource_window(sc, r) != NULL); } static int pcib_is_window_open(struct pcib_window *pw) { return (pw->valid && pw->base < pw->limit); } /* * XXX: If RF_ACTIVE did not also imply allocating a bus space tag and * handle for the resource, we could pass RF_ACTIVE up to the PCI bus * when allocating the resource windows and rely on the PCI bus driver * to do this for us. */ static void pcib_activate_window(struct pcib_softc *sc, int type) { PCI_ENABLE_IO(device_get_parent(sc->dev), sc->dev, type); } static void pcib_write_windows(struct pcib_softc *sc, int mask) { device_t dev; uint32_t val; dev = sc->dev; if (sc->io.valid && mask & WIN_IO) { val = pci_read_config(dev, PCIR_IOBASEL_1, 1); if ((val & PCIM_BRIO_MASK) == PCIM_BRIO_32) { pci_write_config(dev, PCIR_IOBASEH_1, sc->io.base >> 16, 2); pci_write_config(dev, PCIR_IOLIMITH_1, sc->io.limit >> 16, 2); } pci_write_config(dev, PCIR_IOBASEL_1, sc->io.base >> 8, 1); pci_write_config(dev, PCIR_IOLIMITL_1, sc->io.limit >> 8, 1); } if (mask & WIN_MEM) { pci_write_config(dev, PCIR_MEMBASE_1, sc->mem.base >> 16, 2); pci_write_config(dev, PCIR_MEMLIMIT_1, sc->mem.limit >> 16, 2); } if (sc->pmem.valid && mask & WIN_PMEM) { val = pci_read_config(dev, PCIR_PMBASEL_1, 2); if ((val & PCIM_BRPM_MASK) == PCIM_BRPM_64) { pci_write_config(dev, PCIR_PMBASEH_1, sc->pmem.base >> 32, 4); pci_write_config(dev, PCIR_PMLIMITH_1, sc->pmem.limit >> 32, 4); } pci_write_config(dev, PCIR_PMBASEL_1, sc->pmem.base >> 16, 2); pci_write_config(dev, PCIR_PMLIMITL_1, sc->pmem.limit >> 16, 2); } } /* * This is used to reject I/O port allocations that conflict with an * ISA alias range. */ static int pcib_is_isa_range(struct pcib_softc *sc, rman_res_t start, rman_res_t end, rman_res_t count) { rman_res_t next_alias; if (!(sc->bridgectl & PCIB_BCR_ISA_ENABLE)) return (0); /* Only check fixed ranges for overlap. */ if (start + count - 1 != end) return (0); /* ISA aliases are only in the lower 64KB of I/O space. */ if (start >= 65536) return (0); /* Check for overlap with 0x000 - 0x0ff as a special case. */ if (start < 0x100) goto alias; /* * If the start address is an alias, the range is an alias. * Otherwise, compute the start of the next alias range and * check if it is before the end of the candidate range. */ if ((start & 0x300) != 0) goto alias; next_alias = (start & ~0x3fful) | 0x100; if (next_alias <= end) goto alias; return (0); alias: if (bootverbose) device_printf(sc->dev, "I/O range %#jx-%#jx overlaps with an ISA alias\n", start, end); return (1); } static void pcib_add_window_resources(struct pcib_window *w, struct resource **res, int count) { struct resource **newarray; int error, i; newarray = malloc(sizeof(struct resource *) * (w->count + count), M_DEVBUF, M_WAITOK); if (w->res != NULL) bcopy(w->res, newarray, sizeof(struct resource *) * w->count); bcopy(res, newarray + w->count, sizeof(struct resource *) * count); free(w->res, M_DEVBUF); w->res = newarray; w->count += count; for (i = 0; i < count; i++) { error = rman_manage_region(&w->rman, rman_get_start(res[i]), rman_get_end(res[i])); if (error) panic("Failed to add resource to rman"); } } typedef void (nonisa_callback)(rman_res_t start, rman_res_t end, void *arg); static void pcib_walk_nonisa_ranges(rman_res_t start, rman_res_t end, nonisa_callback *cb, void *arg) { rman_res_t next_end; /* * If start is within an ISA alias range, move up to the start * of the next non-alias range. As a special case, addresses * in the range 0x000 - 0x0ff should also be skipped since * those are used for various system I/O devices in ISA * systems. */ if (start <= 65535) { if (start < 0x100 || (start & 0x300) != 0) { start &= ~0x3ff; start += 0x400; } } /* ISA aliases are only in the lower 64KB of I/O space. */ while (start <= MIN(end, 65535)) { next_end = MIN(start | 0xff, end); cb(start, next_end, arg); start += 0x400; } if (start <= end) cb(start, end, arg); } static void count_ranges(rman_res_t start, rman_res_t end, void *arg) { int *countp; countp = arg; (*countp)++; } struct alloc_state { struct resource **res; struct pcib_softc *sc; int count, error; }; static void alloc_ranges(rman_res_t start, rman_res_t end, void *arg) { struct alloc_state *as; struct pcib_window *w; int rid; as = arg; if (as->error != 0) return; w = &as->sc->io; rid = w->reg; if (bootverbose) device_printf(as->sc->dev, "allocating non-ISA range %#jx-%#jx\n", start, end); as->res[as->count] = bus_alloc_resource(as->sc->dev, SYS_RES_IOPORT, &rid, start, end, end - start + 1, RF_ACTIVE | RF_UNMAPPED); if (as->res[as->count] == NULL) as->error = ENXIO; else as->count++; } static int pcib_alloc_nonisa_ranges(struct pcib_softc *sc, rman_res_t start, rman_res_t end) { struct alloc_state as; int i, new_count; /* First, see how many ranges we need. */ new_count = 0; pcib_walk_nonisa_ranges(start, end, count_ranges, &new_count); /* Second, allocate the ranges. */ as.res = malloc(sizeof(struct resource *) * new_count, M_DEVBUF, M_WAITOK); as.sc = sc; as.count = 0; as.error = 0; pcib_walk_nonisa_ranges(start, end, alloc_ranges, &as); if (as.error != 0) { for (i = 0; i < as.count; i++) bus_release_resource(sc->dev, SYS_RES_IOPORT, sc->io.reg, as.res[i]); free(as.res, M_DEVBUF); return (as.error); } KASSERT(as.count == new_count, ("%s: count mismatch", __func__)); /* Third, add the ranges to the window. */ pcib_add_window_resources(&sc->io, as.res, as.count); free(as.res, M_DEVBUF); return (0); } static void pcib_alloc_window(struct pcib_softc *sc, struct pcib_window *w, int type, int flags, pci_addr_t max_address) { struct resource *res; char buf[64]; int error, rid; if (max_address != (rman_res_t)max_address) max_address = ~0; w->rman.rm_start = 0; w->rman.rm_end = max_address; w->rman.rm_type = RMAN_ARRAY; snprintf(buf, sizeof(buf), "%s %s window", device_get_nameunit(sc->dev), w->name); w->rman.rm_descr = strdup(buf, M_DEVBUF); error = rman_init(&w->rman); if (error) panic("Failed to initialize %s %s rman", device_get_nameunit(sc->dev), w->name); if (!pcib_is_window_open(w)) return; if (w->base > max_address || w->limit > max_address) { device_printf(sc->dev, "initial %s window has too many bits, ignoring\n", w->name); return; } if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE) (void)pcib_alloc_nonisa_ranges(sc, w->base, w->limit); else { rid = w->reg; res = bus_alloc_resource(sc->dev, type, &rid, w->base, w->limit, w->limit - w->base + 1, flags | RF_ACTIVE | RF_UNMAPPED); if (res != NULL) pcib_add_window_resources(w, &res, 1); } if (w->res == NULL) { device_printf(sc->dev, "failed to allocate initial %s window: %#jx-%#jx\n", w->name, (uintmax_t)w->base, (uintmax_t)w->limit); w->base = max_address; w->limit = 0; pcib_write_windows(sc, w->mask); return; } pcib_activate_window(sc, type); } /* * Initialize I/O windows. */ static void pcib_probe_windows(struct pcib_softc *sc) { pci_addr_t max; device_t dev; uint32_t val; dev = sc->dev; if (pci_clear_pcib) { pcib_bridge_init(dev); } /* Determine if the I/O port window is implemented. */ val = pci_read_config(dev, PCIR_IOBASEL_1, 1); if (val == 0) { /* * If 'val' is zero, then only 16-bits of I/O space * are supported. */ pci_write_config(dev, PCIR_IOBASEL_1, 0xff, 1); if (pci_read_config(dev, PCIR_IOBASEL_1, 1) != 0) { sc->io.valid = 1; pci_write_config(dev, PCIR_IOBASEL_1, 0, 1); } } else sc->io.valid = 1; /* Read the existing I/O port window. */ if (sc->io.valid) { sc->io.reg = PCIR_IOBASEL_1; sc->io.step = 12; sc->io.mask = WIN_IO; sc->io.name = "I/O port"; if ((val & PCIM_BRIO_MASK) == PCIM_BRIO_32) { sc->io.base = PCI_PPBIOBASE( pci_read_config(dev, PCIR_IOBASEH_1, 2), val); sc->io.limit = PCI_PPBIOLIMIT( pci_read_config(dev, PCIR_IOLIMITH_1, 2), pci_read_config(dev, PCIR_IOLIMITL_1, 1)); max = 0xffffffff; } else { sc->io.base = PCI_PPBIOBASE(0, val); sc->io.limit = PCI_PPBIOLIMIT(0, pci_read_config(dev, PCIR_IOLIMITL_1, 1)); max = 0xffff; } pcib_alloc_window(sc, &sc->io, SYS_RES_IOPORT, 0, max); } /* Read the existing memory window. */ sc->mem.valid = 1; sc->mem.reg = PCIR_MEMBASE_1; sc->mem.step = 20; sc->mem.mask = WIN_MEM; sc->mem.name = "memory"; sc->mem.base = PCI_PPBMEMBASE(0, pci_read_config(dev, PCIR_MEMBASE_1, 2)); sc->mem.limit = PCI_PPBMEMLIMIT(0, pci_read_config(dev, PCIR_MEMLIMIT_1, 2)); pcib_alloc_window(sc, &sc->mem, SYS_RES_MEMORY, 0, 0xffffffff); /* Determine if the prefetchable memory window is implemented. */ val = pci_read_config(dev, PCIR_PMBASEL_1, 2); if (val == 0) { /* * If 'val' is zero, then only 32-bits of memory space * are supported. */ pci_write_config(dev, PCIR_PMBASEL_1, 0xffff, 2); if (pci_read_config(dev, PCIR_PMBASEL_1, 2) != 0) { sc->pmem.valid = 1; pci_write_config(dev, PCIR_PMBASEL_1, 0, 2); } } else sc->pmem.valid = 1; /* Read the existing prefetchable memory window. */ if (sc->pmem.valid) { sc->pmem.reg = PCIR_PMBASEL_1; sc->pmem.step = 20; sc->pmem.mask = WIN_PMEM; sc->pmem.name = "prefetch"; if ((val & PCIM_BRPM_MASK) == PCIM_BRPM_64) { sc->pmem.base = PCI_PPBMEMBASE( pci_read_config(dev, PCIR_PMBASEH_1, 4), val); sc->pmem.limit = PCI_PPBMEMLIMIT( pci_read_config(dev, PCIR_PMLIMITH_1, 4), pci_read_config(dev, PCIR_PMLIMITL_1, 2)); max = 0xffffffffffffffff; } else { sc->pmem.base = PCI_PPBMEMBASE(0, val); sc->pmem.limit = PCI_PPBMEMLIMIT(0, pci_read_config(dev, PCIR_PMLIMITL_1, 2)); max = 0xffffffff; } pcib_alloc_window(sc, &sc->pmem, SYS_RES_MEMORY, RF_PREFETCHABLE, max); } } static void pcib_release_window(struct pcib_softc *sc, struct pcib_window *w, int type) { device_t dev; int error, i; if (!w->valid) return; dev = sc->dev; error = rman_fini(&w->rman); if (error) { device_printf(dev, "failed to release %s rman\n", w->name); return; } free(__DECONST(char *, w->rman.rm_descr), M_DEVBUF); for (i = 0; i < w->count; i++) { error = bus_free_resource(dev, type, w->res[i]); if (error) device_printf(dev, "failed to release %s resource: %d\n", w->name, error); } free(w->res, M_DEVBUF); } static void pcib_free_windows(struct pcib_softc *sc) { pcib_release_window(sc, &sc->pmem, SYS_RES_MEMORY); pcib_release_window(sc, &sc->mem, SYS_RES_MEMORY); pcib_release_window(sc, &sc->io, SYS_RES_IOPORT); } #ifdef PCI_RES_BUS /* * Allocate a suitable secondary bus for this bridge if needed and * initialize the resource manager for the secondary bus range. Note * that the minimum count is a desired value and this may allocate a * smaller range. */ void pcib_setup_secbus(device_t dev, struct pcib_secbus *bus, int min_count) { char buf[64]; int error, rid, sec_reg; switch (pci_read_config(dev, PCIR_HDRTYPE, 1) & PCIM_HDRTYPE) { case PCIM_HDRTYPE_BRIDGE: sec_reg = PCIR_SECBUS_1; bus->sub_reg = PCIR_SUBBUS_1; break; case PCIM_HDRTYPE_CARDBUS: sec_reg = PCIR_SECBUS_2; bus->sub_reg = PCIR_SUBBUS_2; break; default: panic("not a PCI bridge"); } bus->sec = pci_read_config(dev, sec_reg, 1); bus->sub = pci_read_config(dev, bus->sub_reg, 1); bus->dev = dev; bus->rman.rm_start = 0; bus->rman.rm_end = PCI_BUSMAX; bus->rman.rm_type = RMAN_ARRAY; snprintf(buf, sizeof(buf), "%s bus numbers", device_get_nameunit(dev)); bus->rman.rm_descr = strdup(buf, M_DEVBUF); error = rman_init(&bus->rman); if (error) panic("Failed to initialize %s bus number rman", device_get_nameunit(dev)); /* * Allocate a bus range. This will return an existing bus range * if one exists, or a new bus range if one does not. */ rid = 0; bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid, min_count, RF_ACTIVE); if (bus->res == NULL) { /* * Fall back to just allocating a range of a single bus * number. */ bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid, 1, RF_ACTIVE); } else if (rman_get_size(bus->res) < min_count) /* * Attempt to grow the existing range to satisfy the * minimum desired count. */ (void)bus_adjust_resource(dev, PCI_RES_BUS, bus->res, rman_get_start(bus->res), rman_get_start(bus->res) + min_count - 1); /* * Add the initial resource to the rman. */ if (bus->res != NULL) { error = rman_manage_region(&bus->rman, rman_get_start(bus->res), rman_get_end(bus->res)); if (error) panic("Failed to add resource to rman"); bus->sec = rman_get_start(bus->res); bus->sub = rman_get_end(bus->res); } } void pcib_free_secbus(device_t dev, struct pcib_secbus *bus) { int error; error = rman_fini(&bus->rman); if (error) { device_printf(dev, "failed to release bus number rman\n"); return; } free(__DECONST(char *, bus->rman.rm_descr), M_DEVBUF); error = bus_free_resource(dev, PCI_RES_BUS, bus->res); if (error) device_printf(dev, "failed to release bus numbers resource: %d\n", error); } static struct resource * pcib_suballoc_bus(struct pcib_secbus *bus, device_t child, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; res = rman_reserve_resource(&bus->rman, start, end, count, flags, child); if (res == NULL) return (NULL); if (bootverbose) device_printf(bus->dev, "allocated bus range (%ju-%ju) for rid %d of %s\n", rman_get_start(res), rman_get_end(res), *rid, pcib_child_name(child)); rman_set_rid(res, *rid); rman_set_type(res, PCI_RES_BUS); return (res); } /* * Attempt to grow the secondary bus range. This is much simpler than * for I/O windows as the range can only be grown by increasing * subbus. */ static int pcib_grow_subbus(struct pcib_secbus *bus, rman_res_t new_end) { rman_res_t old_end; int error; old_end = rman_get_end(bus->res); KASSERT(new_end > old_end, ("attempt to shrink subbus")); error = bus_adjust_resource(bus->dev, PCI_RES_BUS, bus->res, rman_get_start(bus->res), new_end); if (error) return (error); if (bootverbose) device_printf(bus->dev, "grew bus range to %ju-%ju\n", rman_get_start(bus->res), rman_get_end(bus->res)); error = rman_manage_region(&bus->rman, old_end + 1, rman_get_end(bus->res)); if (error) panic("Failed to add resource to rman"); bus->sub = rman_get_end(bus->res); pci_write_config(bus->dev, bus->sub_reg, bus->sub, 1); return (0); } struct resource * pcib_alloc_subbus(struct pcib_secbus *bus, device_t child, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; rman_res_t start_free, end_free, new_end; /* * First, see if the request can be satisified by the existing * bus range. */ res = pcib_suballoc_bus(bus, child, rid, start, end, count, flags); if (res != NULL) return (res); /* * Figure out a range to grow the bus range. First, find the * first bus number after the last allocated bus in the rman and * enforce that as a minimum starting point for the range. */ if (rman_last_free_region(&bus->rman, &start_free, &end_free) != 0 || end_free != bus->sub) start_free = bus->sub + 1; if (start_free < start) start_free = start; new_end = start_free + count - 1; /* * See if this new range would satisfy the request if it * succeeds. */ if (new_end > end) return (NULL); /* Finally, attempt to grow the existing resource. */ if (bootverbose) { device_printf(bus->dev, "attempting to grow bus range for %ju buses\n", count); printf("\tback candidate range: %ju-%ju\n", start_free, new_end); } if (pcib_grow_subbus(bus, new_end) == 0) return (pcib_suballoc_bus(bus, child, rid, start, end, count, flags)); return (NULL); } #endif #else /* * Is the prefetch window open (eg, can we allocate memory in it?) */ static int pcib_is_prefetch_open(struct pcib_softc *sc) { return (sc->pmembase > 0 && sc->pmembase < sc->pmemlimit); } /* * Is the nonprefetch window open (eg, can we allocate memory in it?) */ static int pcib_is_nonprefetch_open(struct pcib_softc *sc) { return (sc->membase > 0 && sc->membase < sc->memlimit); } /* * Is the io window open (eg, can we allocate ports in it?) */ static int pcib_is_io_open(struct pcib_softc *sc) { return (sc->iobase > 0 && sc->iobase < sc->iolimit); } /* * Get current I/O decode. */ static void pcib_get_io_decode(struct pcib_softc *sc) { device_t dev; uint32_t iolow; dev = sc->dev; iolow = pci_read_config(dev, PCIR_IOBASEL_1, 1); if ((iolow & PCIM_BRIO_MASK) == PCIM_BRIO_32) sc->iobase = PCI_PPBIOBASE( pci_read_config(dev, PCIR_IOBASEH_1, 2), iolow); else sc->iobase = PCI_PPBIOBASE(0, iolow); iolow = pci_read_config(dev, PCIR_IOLIMITL_1, 1); if ((iolow & PCIM_BRIO_MASK) == PCIM_BRIO_32) sc->iolimit = PCI_PPBIOLIMIT( pci_read_config(dev, PCIR_IOLIMITH_1, 2), iolow); else sc->iolimit = PCI_PPBIOLIMIT(0, iolow); } /* * Get current memory decode. */ static void pcib_get_mem_decode(struct pcib_softc *sc) { device_t dev; pci_addr_t pmemlow; dev = sc->dev; sc->membase = PCI_PPBMEMBASE(0, pci_read_config(dev, PCIR_MEMBASE_1, 2)); sc->memlimit = PCI_PPBMEMLIMIT(0, pci_read_config(dev, PCIR_MEMLIMIT_1, 2)); pmemlow = pci_read_config(dev, PCIR_PMBASEL_1, 2); if ((pmemlow & PCIM_BRPM_MASK) == PCIM_BRPM_64) sc->pmembase = PCI_PPBMEMBASE( pci_read_config(dev, PCIR_PMBASEH_1, 4), pmemlow); else sc->pmembase = PCI_PPBMEMBASE(0, pmemlow); pmemlow = pci_read_config(dev, PCIR_PMLIMITL_1, 2); if ((pmemlow & PCIM_BRPM_MASK) == PCIM_BRPM_64) sc->pmemlimit = PCI_PPBMEMLIMIT( pci_read_config(dev, PCIR_PMLIMITH_1, 4), pmemlow); else sc->pmemlimit = PCI_PPBMEMLIMIT(0, pmemlow); } /* * Restore previous I/O decode. */ static void pcib_set_io_decode(struct pcib_softc *sc) { device_t dev; uint32_t iohi; dev = sc->dev; iohi = sc->iobase >> 16; if (iohi > 0) pci_write_config(dev, PCIR_IOBASEH_1, iohi, 2); pci_write_config(dev, PCIR_IOBASEL_1, sc->iobase >> 8, 1); iohi = sc->iolimit >> 16; if (iohi > 0) pci_write_config(dev, PCIR_IOLIMITH_1, iohi, 2); pci_write_config(dev, PCIR_IOLIMITL_1, sc->iolimit >> 8, 1); } /* * Restore previous memory decode. */ static void pcib_set_mem_decode(struct pcib_softc *sc) { device_t dev; pci_addr_t pmemhi; dev = sc->dev; pci_write_config(dev, PCIR_MEMBASE_1, sc->membase >> 16, 2); pci_write_config(dev, PCIR_MEMLIMIT_1, sc->memlimit >> 16, 2); pmemhi = sc->pmembase >> 32; if (pmemhi > 0) pci_write_config(dev, PCIR_PMBASEH_1, pmemhi, 4); pci_write_config(dev, PCIR_PMBASEL_1, sc->pmembase >> 16, 2); pmemhi = sc->pmemlimit >> 32; if (pmemhi > 0) pci_write_config(dev, PCIR_PMLIMITH_1, pmemhi, 4); pci_write_config(dev, PCIR_PMLIMITL_1, sc->pmemlimit >> 16, 2); } #endif #ifdef PCI_HP /* * PCI-express HotPlug support. */ static int pci_enable_pcie_hp = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_pcie_hp, CTLFLAG_RDTUN, &pci_enable_pcie_hp, 0, "Enable support for native PCI-express HotPlug."); TASKQUEUE_DEFINE_THREAD(pci_hp); static void pcib_probe_hotplug(struct pcib_softc *sc) { device_t dev; uint32_t link_cap; uint16_t link_sta, slot_sta; if (!pci_enable_pcie_hp) return; dev = sc->dev; if (pci_find_cap(dev, PCIY_EXPRESS, NULL) != 0) return; if (!(pcie_read_config(dev, PCIER_FLAGS, 2) & PCIEM_FLAGS_SLOT)) return; sc->pcie_slot_cap = pcie_read_config(dev, PCIER_SLOT_CAP, 4); if ((sc->pcie_slot_cap & PCIEM_SLOT_CAP_HPC) == 0) return; link_cap = pcie_read_config(dev, PCIER_LINK_CAP, 4); if ((link_cap & PCIEM_LINK_CAP_DL_ACTIVE) == 0) return; /* * Some devices report that they have an MRL when they actually * do not. Since they always report that the MRL is open, child * devices would be ignored. Try to detect these devices and * ignore their claim of HotPlug support. * * If there is an open MRL but the Data Link Layer is active, * the MRL is not real. */ if ((sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP) != 0) { link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2); slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2); if ((slot_sta & PCIEM_SLOT_STA_MRLSS) != 0 && (link_sta & PCIEM_LINK_STA_DL_ACTIVE) != 0) { return; } } /* * Now that we're sure we want to do hot plug, ask the * firmware, if any, if that's OK. */ if (pcib_request_feature(dev, PCI_FEATURE_HP) != 0) { if (bootverbose) device_printf(dev, "Unable to activate hot plug feature.\n"); return; } sc->flags |= PCIB_HOTPLUG; } /* * Send a HotPlug command to the slot control register. If this slot * uses command completion interrupts and a previous command is still * in progress, then the command is dropped. Once the previous * command completes or times out, pcib_pcie_hotplug_update() will be * invoked to post a new command based on the slot's state at that * time. */ static void pcib_pcie_hotplug_command(struct pcib_softc *sc, uint16_t val, uint16_t mask) { device_t dev; uint16_t ctl, new; dev = sc->dev; if (sc->flags & PCIB_HOTPLUG_CMD_PENDING) return; ctl = pcie_read_config(dev, PCIER_SLOT_CTL, 2); new = (ctl & ~mask) | val; if (new == ctl) return; if (bootverbose) device_printf(dev, "HotPlug command: %04x -> %04x\n", ctl, new); pcie_write_config(dev, PCIER_SLOT_CTL, new, 2); if (!(sc->pcie_slot_cap & PCIEM_SLOT_CAP_NCCS) && (ctl & new) & PCIEM_SLOT_CTL_CCIE) { sc->flags |= PCIB_HOTPLUG_CMD_PENDING; if (!cold) taskqueue_enqueue_timeout(taskqueue_pci_hp, &sc->pcie_cc_task, hz); } } static void pcib_pcie_hotplug_command_completed(struct pcib_softc *sc) { device_t dev; dev = sc->dev; if (bootverbose) device_printf(dev, "Command Completed\n"); if (!(sc->flags & PCIB_HOTPLUG_CMD_PENDING)) return; taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_cc_task, NULL); sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING; wakeup(sc); } /* * Returns true if a card is fully inserted from the user's * perspective. It may not yet be ready for access, but the driver * can now start enabling access if necessary. */ static bool pcib_hotplug_inserted(struct pcib_softc *sc) { /* Pretend the card isn't present if a detach is forced. */ if (sc->flags & PCIB_DETACHING) return (false); /* Card must be present in the slot. */ if ((sc->pcie_slot_sta & PCIEM_SLOT_STA_PDS) == 0) return (false); /* A power fault implicitly turns off power to the slot. */ if (sc->pcie_slot_sta & PCIEM_SLOT_STA_PFD) return (false); /* If the MRL is disengaged, the slot is powered off. */ if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP && (sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSS) != 0) return (false); return (true); } /* * Returns -1 if the card is fully inserted, powered, and ready for * access. Otherwise, returns 0. */ static int pcib_hotplug_present(struct pcib_softc *sc) { /* Card must be inserted. */ if (!pcib_hotplug_inserted(sc)) return (0); /* Require the Data Link Layer to be active. */ if (!(sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE)) return (0); return (-1); } static int pci_enable_pcie_ei = 0; SYSCTL_INT(_hw_pci, OID_AUTO, enable_pcie_ei, CTLFLAG_RWTUN, &pci_enable_pcie_ei, 0, "Enable support for PCI-express Electromechanical Interlock."); static void pcib_pcie_hotplug_update(struct pcib_softc *sc, uint16_t val, uint16_t mask, bool schedule_task) { bool card_inserted, ei_engaged; /* Clear DETACHING if Presence Detect has cleared. */ if ((sc->pcie_slot_sta & (PCIEM_SLOT_STA_PDC | PCIEM_SLOT_STA_PDS)) == PCIEM_SLOT_STA_PDC) sc->flags &= ~PCIB_DETACHING; card_inserted = pcib_hotplug_inserted(sc); /* Turn the power indicator on if a card is inserted. */ if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PIP) { mask |= PCIEM_SLOT_CTL_PIC; if (card_inserted) val |= PCIEM_SLOT_CTL_PI_ON; else if (sc->flags & PCIB_DETACH_PENDING) val |= PCIEM_SLOT_CTL_PI_BLINK; else val |= PCIEM_SLOT_CTL_PI_OFF; } /* Turn the power on via the Power Controller if a card is inserted. */ if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PCP) { mask |= PCIEM_SLOT_CTL_PCC; if (card_inserted) val |= PCIEM_SLOT_CTL_PC_ON; else val |= PCIEM_SLOT_CTL_PC_OFF; } /* * If a card is inserted, enable the Electromechanical * Interlock. If a card is not inserted (or we are in the * process of detaching), disable the Electromechanical * Interlock. */ if ((sc->pcie_slot_cap & PCIEM_SLOT_CAP_EIP) && pci_enable_pcie_ei) { mask |= PCIEM_SLOT_CTL_EIC; ei_engaged = (sc->pcie_slot_sta & PCIEM_SLOT_STA_EIS) != 0; if (card_inserted != ei_engaged) val |= PCIEM_SLOT_CTL_EIC; } /* * Start a timer to see if the Data Link Layer times out. * Note that we only start the timer if Presence Detect or MRL Sensor * changed on this interrupt. Stop any scheduled timer if * the Data Link Layer is active. */ if (card_inserted && !(sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE) && sc->pcie_slot_sta & (PCIEM_SLOT_STA_MRLSC | PCIEM_SLOT_STA_PDC)) { if (cold) device_printf(sc->dev, "Data Link Layer inactive\n"); else taskqueue_enqueue_timeout(taskqueue_pci_hp, &sc->pcie_dll_task, hz); } else if (sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE) taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_dll_task, NULL); pcib_pcie_hotplug_command(sc, val, mask); /* * During attach the child "pci" device is added synchronously; * otherwise, the task is scheduled to manage the child * device. */ if (schedule_task && (pcib_hotplug_present(sc) != 0) != (sc->child != NULL)) taskqueue_enqueue(taskqueue_pci_hp, &sc->pcie_hp_task); } static void pcib_pcie_intr_hotplug(void *arg) { struct pcib_softc *sc; device_t dev; uint16_t old_slot_sta; sc = arg; dev = sc->dev; PCIB_HP_LOCK(sc); old_slot_sta = sc->pcie_slot_sta; sc->pcie_slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2); /* Clear the events just reported. */ pcie_write_config(dev, PCIER_SLOT_STA, sc->pcie_slot_sta, 2); if (bootverbose) device_printf(dev, "HotPlug interrupt: %#x\n", sc->pcie_slot_sta); if (sc->pcie_slot_sta & PCIEM_SLOT_STA_ABP) { if (sc->flags & PCIB_DETACH_PENDING) { device_printf(dev, "Attention Button Pressed: Detach Cancelled\n"); sc->flags &= ~PCIB_DETACH_PENDING; taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_ab_task, NULL); } else if (old_slot_sta & PCIEM_SLOT_STA_PDS) { /* Only initiate detach sequence if device present. */ device_printf(dev, "Attention Button Pressed: Detaching in 5 seconds\n"); sc->flags |= PCIB_DETACH_PENDING; taskqueue_enqueue_timeout(taskqueue_pci_hp, &sc->pcie_ab_task, 5 * hz); } } if (sc->pcie_slot_sta & PCIEM_SLOT_STA_PFD) device_printf(dev, "Power Fault Detected\n"); if (sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSC) device_printf(dev, "MRL Sensor Changed to %s\n", sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSS ? "open" : "closed"); if (bootverbose && sc->pcie_slot_sta & PCIEM_SLOT_STA_PDC) device_printf(dev, "Presence Detect Changed to %s\n", sc->pcie_slot_sta & PCIEM_SLOT_STA_PDS ? "card present" : "empty"); if (sc->pcie_slot_sta & PCIEM_SLOT_STA_CC) pcib_pcie_hotplug_command_completed(sc); if (sc->pcie_slot_sta & PCIEM_SLOT_STA_DLLSC) { sc->pcie_link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2); if (bootverbose) device_printf(dev, "Data Link Layer State Changed to %s\n", sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE ? "active" : "inactive"); } pcib_pcie_hotplug_update(sc, 0, 0, true); PCIB_HP_UNLOCK(sc); } static void pcib_pcie_hotplug_task(void *context, int pending) { struct pcib_softc *sc; device_t dev; sc = context; PCIB_HP_LOCK(sc); dev = sc->dev; if (pcib_hotplug_present(sc) != 0) { if (sc->child == NULL) { sc->child = device_add_child(dev, "pci", -1); bus_generic_attach(dev); } } else { if (sc->child != NULL) { if (device_delete_child(dev, sc->child) == 0) sc->child = NULL; } } PCIB_HP_UNLOCK(sc); } static void pcib_pcie_ab_timeout(void *arg, int pending) { struct pcib_softc *sc = arg; PCIB_HP_LOCK(sc); if (sc->flags & PCIB_DETACH_PENDING) { sc->flags |= PCIB_DETACHING; sc->flags &= ~PCIB_DETACH_PENDING; pcib_pcie_hotplug_update(sc, 0, 0, true); } PCIB_HP_UNLOCK(sc); } static void pcib_pcie_cc_timeout(void *arg, int pending) { struct pcib_softc *sc = arg; device_t dev = sc->dev; uint16_t sta; PCIB_HP_LOCK(sc); sta = pcie_read_config(dev, PCIER_SLOT_STA, 2); if (!(sta & PCIEM_SLOT_STA_CC)) { device_printf(dev, "HotPlug Command Timed Out\n"); sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING; } else { device_printf(dev, "Missed HotPlug interrupt waiting for Command Completion\n"); pcib_pcie_intr_hotplug(sc); } PCIB_HP_UNLOCK(sc); } static void pcib_pcie_dll_timeout(void *arg, int pending) { struct pcib_softc *sc = arg; device_t dev = sc->dev; uint16_t sta; PCIB_HP_LOCK(sc); sta = pcie_read_config(dev, PCIER_LINK_STA, 2); if (!(sta & PCIEM_LINK_STA_DL_ACTIVE)) { device_printf(dev, "Timed out waiting for Data Link Layer Active\n"); sc->flags |= PCIB_DETACHING; pcib_pcie_hotplug_update(sc, 0, 0, true); } else if (sta != sc->pcie_link_sta) { device_printf(dev, "Missed HotPlug interrupt waiting for DLL Active\n"); pcib_pcie_intr_hotplug(sc); } PCIB_HP_UNLOCK(sc); } static int pcib_alloc_pcie_irq(struct pcib_softc *sc) { device_t dev; int count, error, mem_rid, rid; rid = -1; dev = sc->dev; /* * For simplicity, only use MSI-X if there is a single message. * To support a device with multiple messages we would have to * use remap intr if the MSI number is not 0. */ count = pci_msix_count(dev); if (count == 1) { mem_rid = pci_msix_table_bar(dev); sc->pcie_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &mem_rid, RF_ACTIVE); if (sc->pcie_mem == NULL) { device_printf(dev, "Failed to allocate BAR for MSI-X table\n"); } else { error = pci_alloc_msix(dev, &count); if (error == 0) rid = 1; } } if (rid < 0 && pci_msi_count(dev) > 0) { count = 1; error = pci_alloc_msi(dev, &count); if (error == 0) rid = 1; } if (rid < 0) rid = 0; sc->pcie_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | RF_SHAREABLE); if (sc->pcie_irq == NULL) { device_printf(dev, "Failed to allocate interrupt for PCI-e events\n"); if (rid > 0) pci_release_msi(dev); return (ENXIO); } error = bus_setup_intr(dev, sc->pcie_irq, INTR_TYPE_MISC|INTR_MPSAFE, NULL, pcib_pcie_intr_hotplug, sc, &sc->pcie_ihand); if (error) { device_printf(dev, "Failed to setup PCI-e interrupt handler\n"); bus_release_resource(dev, SYS_RES_IRQ, rid, sc->pcie_irq); if (rid > 0) pci_release_msi(dev); return (error); } return (0); } static int pcib_release_pcie_irq(struct pcib_softc *sc) { device_t dev; int error; dev = sc->dev; error = bus_teardown_intr(dev, sc->pcie_irq, sc->pcie_ihand); if (error) return (error); error = bus_free_resource(dev, SYS_RES_IRQ, sc->pcie_irq); if (error) return (error); error = pci_release_msi(dev); if (error) return (error); if (sc->pcie_mem != NULL) error = bus_free_resource(dev, SYS_RES_MEMORY, sc->pcie_mem); return (error); } static void pcib_setup_hotplug(struct pcib_softc *sc) { device_t dev; uint16_t mask, val; dev = sc->dev; TASK_INIT(&sc->pcie_hp_task, 0, pcib_pcie_hotplug_task, sc); TIMEOUT_TASK_INIT(taskqueue_pci_hp, &sc->pcie_ab_task, 0, pcib_pcie_ab_timeout, sc); TIMEOUT_TASK_INIT(taskqueue_pci_hp, &sc->pcie_cc_task, 0, pcib_pcie_cc_timeout, sc); TIMEOUT_TASK_INIT(taskqueue_pci_hp, &sc->pcie_dll_task, 0, pcib_pcie_dll_timeout, sc); sc->pcie_hp_lock = bus_topo_mtx(); /* Allocate IRQ. */ if (pcib_alloc_pcie_irq(sc) != 0) return; sc->pcie_link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2); sc->pcie_slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2); /* Clear any events previously pending. */ pcie_write_config(dev, PCIER_SLOT_STA, sc->pcie_slot_sta, 2); /* Enable HotPlug events. */ mask = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE | PCIEM_SLOT_CTL_CCIE | PCIEM_SLOT_CTL_PDCE | PCIEM_SLOT_CTL_MRLSCE | PCIEM_SLOT_CTL_PFDE | PCIEM_SLOT_CTL_ABPE; val = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE | PCIEM_SLOT_CTL_PDCE; if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_APB) val |= PCIEM_SLOT_CTL_ABPE; if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PCP) val |= PCIEM_SLOT_CTL_PFDE; if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP) val |= PCIEM_SLOT_CTL_MRLSCE; if (!(sc->pcie_slot_cap & PCIEM_SLOT_CAP_NCCS)) val |= PCIEM_SLOT_CTL_CCIE; /* Turn the attention indicator off. */ if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_AIP) { mask |= PCIEM_SLOT_CTL_AIC; val |= PCIEM_SLOT_CTL_AI_OFF; } pcib_pcie_hotplug_update(sc, val, mask, false); } static int pcib_detach_hotplug(struct pcib_softc *sc) { uint16_t mask, val; int error; /* Disable the card in the slot and force it to detach. */ if (sc->flags & PCIB_DETACH_PENDING) { sc->flags &= ~PCIB_DETACH_PENDING; taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_ab_task, NULL); } sc->flags |= PCIB_DETACHING; if (sc->flags & PCIB_HOTPLUG_CMD_PENDING) { taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_cc_task, NULL); tsleep(sc, 0, "hpcmd", hz); sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING; } /* Disable HotPlug events. */ mask = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE | PCIEM_SLOT_CTL_CCIE | PCIEM_SLOT_CTL_PDCE | PCIEM_SLOT_CTL_MRLSCE | PCIEM_SLOT_CTL_PFDE | PCIEM_SLOT_CTL_ABPE; val = 0; /* Turn the attention indicator off. */ if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_AIP) { mask |= PCIEM_SLOT_CTL_AIC; val |= PCIEM_SLOT_CTL_AI_OFF; } pcib_pcie_hotplug_update(sc, val, mask, false); error = pcib_release_pcie_irq(sc); if (error) return (error); taskqueue_drain(taskqueue_pci_hp, &sc->pcie_hp_task); taskqueue_drain_timeout(taskqueue_pci_hp, &sc->pcie_ab_task); taskqueue_drain_timeout(taskqueue_pci_hp, &sc->pcie_cc_task); taskqueue_drain_timeout(taskqueue_pci_hp, &sc->pcie_dll_task); return (0); } #endif /* * Get current bridge configuration. */ static void pcib_cfg_save(struct pcib_softc *sc) { #ifndef NEW_PCIB device_t dev; uint16_t command; dev = sc->dev; command = pci_read_config(dev, PCIR_COMMAND, 2); if (command & PCIM_CMD_PORTEN) pcib_get_io_decode(sc); if (command & PCIM_CMD_MEMEN) pcib_get_mem_decode(sc); #endif } /* * Restore previous bridge configuration. */ static void pcib_cfg_restore(struct pcib_softc *sc) { #ifndef NEW_PCIB uint16_t command; #endif #ifdef NEW_PCIB pcib_write_windows(sc, WIN_IO | WIN_MEM | WIN_PMEM); #else command = pci_read_config(sc->dev, PCIR_COMMAND, 2); if (command & PCIM_CMD_PORTEN) pcib_set_io_decode(sc); if (command & PCIM_CMD_MEMEN) pcib_set_mem_decode(sc); #endif } /* * Generic device interface */ static int pcib_probe(device_t dev) { if ((pci_get_class(dev) == PCIC_BRIDGE) && (pci_get_subclass(dev) == PCIS_BRIDGE_PCI)) { device_set_desc(dev, "PCI-PCI bridge"); return(-10000); } return(ENXIO); } void pcib_attach_common(device_t dev) { struct pcib_softc *sc; struct sysctl_ctx_list *sctx; struct sysctl_oid *soid; int comma; sc = device_get_softc(dev); sc->dev = dev; /* * Get current bridge configuration. */ sc->domain = pci_get_domain(dev); #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) sc->bus.sec = pci_read_config(dev, PCIR_SECBUS_1, 1); sc->bus.sub = pci_read_config(dev, PCIR_SUBBUS_1, 1); #endif sc->bridgectl = pci_read_config(dev, PCIR_BRIDGECTL_1, 2); pcib_cfg_save(sc); /* * The primary bus register should always be the bus of the * parent. */ sc->pribus = pci_get_bus(dev); pci_write_config(dev, PCIR_PRIBUS_1, sc->pribus, 1); /* * Setup sysctl reporting nodes */ sctx = device_get_sysctl_ctx(dev); soid = device_get_sysctl_tree(dev); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "domain", CTLFLAG_RD, &sc->domain, 0, "Domain number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "pribus", CTLFLAG_RD, &sc->pribus, 0, "Primary bus number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "secbus", CTLFLAG_RD, &sc->bus.sec, 0, "Secondary bus number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "subbus", CTLFLAG_RD, &sc->bus.sub, 0, "Subordinate bus number"); /* * Quirk handling. */ switch (pci_get_devid(dev)) { #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) case 0x12258086: /* Intel 82454KX/GX (Orion) */ { uint8_t supbus; supbus = pci_read_config(dev, 0x41, 1); if (supbus != 0xff) { sc->bus.sec = supbus + 1; sc->bus.sub = supbus + 1; } break; } #endif /* * The i82380FB mobile docking controller is a PCI-PCI bridge, * and it is a subtractive bridge. However, the ProgIf is wrong * so the normal setting of PCIB_SUBTRACTIVE bit doesn't * happen. There are also Toshiba and Cavium ThunderX bridges * that behave this way. */ case 0xa002177d: /* Cavium ThunderX */ case 0x124b8086: /* Intel 82380FB Mobile */ case 0x060513d7: /* Toshiba ???? */ sc->flags |= PCIB_SUBTRACTIVE; break; #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) /* Compaq R3000 BIOS sets wrong subordinate bus number. */ case 0x00dd10de: { char *cp; if ((cp = kern_getenv("smbios.planar.maker")) == NULL) break; if (strncmp(cp, "Compal", 6) != 0) { freeenv(cp); break; } freeenv(cp); if ((cp = kern_getenv("smbios.planar.product")) == NULL) break; if (strncmp(cp, "08A0", 4) != 0) { freeenv(cp); break; } freeenv(cp); if (sc->bus.sub < 0xa) { pci_write_config(dev, PCIR_SUBBUS_1, 0xa, 1); sc->bus.sub = pci_read_config(dev, PCIR_SUBBUS_1, 1); } break; } #endif } if (pci_msi_device_blacklisted(dev)) sc->flags |= PCIB_DISABLE_MSI; if (pci_msix_device_blacklisted(dev)) sc->flags |= PCIB_DISABLE_MSIX; /* * Intel 815, 845 and other chipsets say they are PCI-PCI bridges, * but have a ProgIF of 0x80. The 82801 family (AA, AB, BAM/CAM, * BA/CA/DB and E) PCI bridges are HUB-PCI bridges, in Intelese. * This means they act as if they were subtractively decoding * bridges and pass all transactions. Mark them and real ProgIf 1 * parts as subtractive. */ if ((pci_get_devid(dev) & 0xff00ffff) == 0x24008086 || pci_read_config(dev, PCIR_PROGIF, 1) == PCIP_BRIDGE_PCI_SUBTRACTIVE) sc->flags |= PCIB_SUBTRACTIVE; #ifdef PCI_HP pcib_probe_hotplug(sc); #endif #ifdef NEW_PCIB #ifdef PCI_RES_BUS pcib_setup_secbus(dev, &sc->bus, 1); #endif pcib_probe_windows(sc); #endif #ifdef PCI_HP if (sc->flags & PCIB_HOTPLUG) pcib_setup_hotplug(sc); #endif if (bootverbose) { device_printf(dev, " domain %d\n", sc->domain); device_printf(dev, " secondary bus %d\n", sc->bus.sec); device_printf(dev, " subordinate bus %d\n", sc->bus.sub); #ifdef NEW_PCIB if (pcib_is_window_open(&sc->io)) device_printf(dev, " I/O decode 0x%jx-0x%jx\n", (uintmax_t)sc->io.base, (uintmax_t)sc->io.limit); if (pcib_is_window_open(&sc->mem)) device_printf(dev, " memory decode 0x%jx-0x%jx\n", (uintmax_t)sc->mem.base, (uintmax_t)sc->mem.limit); if (pcib_is_window_open(&sc->pmem)) device_printf(dev, " prefetched decode 0x%jx-0x%jx\n", (uintmax_t)sc->pmem.base, (uintmax_t)sc->pmem.limit); #else if (pcib_is_io_open(sc)) device_printf(dev, " I/O decode 0x%x-0x%x\n", sc->iobase, sc->iolimit); if (pcib_is_nonprefetch_open(sc)) device_printf(dev, " memory decode 0x%jx-0x%jx\n", (uintmax_t)sc->membase, (uintmax_t)sc->memlimit); if (pcib_is_prefetch_open(sc)) device_printf(dev, " prefetched decode 0x%jx-0x%jx\n", (uintmax_t)sc->pmembase, (uintmax_t)sc->pmemlimit); #endif if (sc->bridgectl & (PCIB_BCR_ISA_ENABLE | PCIB_BCR_VGA_ENABLE) || sc->flags & PCIB_SUBTRACTIVE) { device_printf(dev, " special decode "); comma = 0; if (sc->bridgectl & PCIB_BCR_ISA_ENABLE) { printf("ISA"); comma = 1; } if (sc->bridgectl & PCIB_BCR_VGA_ENABLE) { printf("%sVGA", comma ? ", " : ""); comma = 1; } if (sc->flags & PCIB_SUBTRACTIVE) printf("%ssubtractive", comma ? ", " : ""); printf("\n"); } } /* * Always enable busmastering on bridges so that transactions * initiated on the secondary bus are passed through to the * primary bus. */ pci_enable_busmaster(dev); } #ifdef PCI_HP static int pcib_present(struct pcib_softc *sc) { if (sc->flags & PCIB_HOTPLUG) return (pcib_hotplug_present(sc) != 0); return (1); } #endif int pcib_attach_child(device_t dev) { struct pcib_softc *sc; sc = device_get_softc(dev); if (sc->bus.sec == 0) { /* no secondary bus; we should have fixed this */ return(0); } #ifdef PCI_HP if (!pcib_present(sc)) { /* An empty HotPlug slot, so don't add a PCI bus yet. */ return (0); } #endif sc->child = device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } int pcib_attach(device_t dev) { pcib_attach_common(dev); return (pcib_attach_child(dev)); } int pcib_detach(device_t dev) { #if defined(PCI_HP) || defined(NEW_PCIB) struct pcib_softc *sc; #endif int error; #if defined(PCI_HP) || defined(NEW_PCIB) sc = device_get_softc(dev); #endif error = bus_generic_detach(dev); if (error) return (error); #ifdef PCI_HP if (sc->flags & PCIB_HOTPLUG) { error = pcib_detach_hotplug(sc); if (error) return (error); } #endif error = device_delete_children(dev); if (error) return (error); #ifdef NEW_PCIB pcib_free_windows(sc); #ifdef PCI_RES_BUS pcib_free_secbus(dev, &sc->bus); #endif #endif return (0); } int pcib_suspend(device_t dev) { pcib_cfg_save(device_get_softc(dev)); return (bus_generic_suspend(dev)); } int pcib_resume(device_t dev) { pcib_cfg_restore(device_get_softc(dev)); /* * Restore the Command register only after restoring the windows. * The bridge should not be claiming random windows. */ pci_write_config(dev, PCIR_COMMAND, pci_get_cmdreg(dev), 2); return (bus_generic_resume(dev)); } void pcib_bridge_init(device_t dev) { pci_write_config(dev, PCIR_IOBASEL_1, 0xff, 1); pci_write_config(dev, PCIR_IOBASEH_1, 0xffff, 2); pci_write_config(dev, PCIR_IOLIMITL_1, 0, 1); pci_write_config(dev, PCIR_IOLIMITH_1, 0, 2); pci_write_config(dev, PCIR_MEMBASE_1, 0xffff, 2); pci_write_config(dev, PCIR_MEMLIMIT_1, 0, 2); pci_write_config(dev, PCIR_PMBASEL_1, 0xffff, 2); pci_write_config(dev, PCIR_PMBASEH_1, 0xffffffff, 4); pci_write_config(dev, PCIR_PMLIMITL_1, 0, 2); pci_write_config(dev, PCIR_PMLIMITH_1, 0, 4); } int pcib_child_present(device_t dev, device_t child) { #ifdef PCI_HP struct pcib_softc *sc = device_get_softc(dev); int retval; retval = bus_child_present(dev); if (retval != 0 && sc->flags & PCIB_HOTPLUG) retval = pcib_hotplug_present(sc); return (retval); #else return (bus_child_present(dev)); #endif } int pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct pcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = sc->domain; return(0); case PCIB_IVAR_BUS: *result = sc->bus.sec; return(0); } return(ENOENT); } int pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { switch (which) { case PCIB_IVAR_DOMAIN: return(EINVAL); case PCIB_IVAR_BUS: return(EINVAL); } return(ENOENT); } #ifdef NEW_PCIB /* * Attempt to allocate a resource from the existing resources assigned * to a window. */ static struct resource * pcib_suballoc_resource(struct pcib_softc *sc, struct pcib_window *w, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; if (!pcib_is_window_open(w)) return (NULL); res = rman_reserve_resource(&w->rman, start, end, count, flags & ~RF_ACTIVE, child); if (res == NULL) return (NULL); if (bootverbose) device_printf(sc->dev, "allocated %s range (%#jx-%#jx) for rid %x of %s\n", w->name, rman_get_start(res), rman_get_end(res), *rid, pcib_child_name(child)); rman_set_rid(res, *rid); rman_set_type(res, type); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, res) != 0) { rman_release_resource(res); return (NULL); } } return (res); } /* Allocate a fresh resource range for an unconfigured window. */ static int pcib_alloc_new_window(struct pcib_softc *sc, struct pcib_window *w, int type, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; rman_res_t base, limit, wmask; int rid; /* * If this is an I/O window on a bridge with ISA enable set * and the start address is below 64k, then try to allocate an * initial window of 0x1000 bytes long starting at address * 0xf000 and walking down. Note that if the original request * was larger than the non-aliased range size of 0x100 our * caller would have raised the start address up to 64k * already. */ if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE && start < 65536) { for (base = 0xf000; (long)base >= 0; base -= 0x1000) { limit = base + 0xfff; /* * Skip ranges that wouldn't work for the * original request. Note that the actual * window that overlaps are the non-alias * ranges within [base, limit], so this isn't * quite a simple comparison. */ if (start + count > limit - 0x400) continue; if (base == 0) { /* * The first open region for the window at * 0 is 0x400-0x4ff. */ if (end - count + 1 < 0x400) continue; } else { if (end - count + 1 < base) continue; } if (pcib_alloc_nonisa_ranges(sc, base, limit) == 0) { w->base = base; w->limit = limit; return (0); } } return (ENOSPC); } wmask = ((rman_res_t)1 << w->step) - 1; if (RF_ALIGNMENT(flags) < w->step) { flags &= ~RF_ALIGNMENT_MASK; flags |= RF_ALIGNMENT_LOG2(w->step); } start &= ~wmask; end |= wmask; count = roundup2(count, (rman_res_t)1 << w->step); rid = w->reg; res = bus_alloc_resource(sc->dev, type, &rid, start, end, count, flags | RF_ACTIVE | RF_UNMAPPED); if (res == NULL) return (ENOSPC); pcib_add_window_resources(w, &res, 1); pcib_activate_window(sc, type); w->base = rman_get_start(res); w->limit = rman_get_end(res); return (0); } /* Try to expand an existing window to the requested base and limit. */ static int pcib_expand_window(struct pcib_softc *sc, struct pcib_window *w, int type, rman_res_t base, rman_res_t limit) { struct resource *res; int error, i, force_64k_base; KASSERT(base <= w->base && limit >= w->limit, ("attempting to shrink window")); /* * XXX: pcib_grow_window() doesn't try to do this anyway and * the error handling for all the edge cases would be tedious. */ KASSERT(limit == w->limit || base == w->base, ("attempting to grow both ends of a window")); /* * Yet more special handling for requests to expand an I/O * window behind an ISA-enabled bridge. Since I/O windows * have to grow in 0x1000 increments and the end of the 0xffff * range is an alias, growing a window below 64k will always * result in allocating new resources and never adjusting an * existing resource. */ if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE && (limit <= 65535 || (base <= 65535 && base != w->base))) { KASSERT(limit == w->limit || limit <= 65535, ("attempting to grow both ends across 64k ISA alias")); if (base != w->base) error = pcib_alloc_nonisa_ranges(sc, base, w->base - 1); else error = pcib_alloc_nonisa_ranges(sc, w->limit + 1, limit); if (error == 0) { w->base = base; w->limit = limit; } return (error); } /* * Find the existing resource to adjust. Usually there is only one, * but for an ISA-enabled bridge we might be growing the I/O window * above 64k and need to find the existing resource that maps all * of the area above 64k. */ for (i = 0; i < w->count; i++) { if (rman_get_end(w->res[i]) == w->limit) break; } KASSERT(i != w->count, ("did not find existing resource")); res = w->res[i]; /* * Usually the resource we found should match the window's * existing range. The one exception is the ISA-enabled case * mentioned above in which case the resource should start at * 64k. */ if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE && w->base <= 65535) { KASSERT(rman_get_start(res) == 65536, ("existing resource mismatch")); force_64k_base = 1; } else { KASSERT(w->base == rman_get_start(res), ("existing resource mismatch")); force_64k_base = 0; } error = bus_adjust_resource(sc->dev, type, res, force_64k_base ? rman_get_start(res) : base, limit); if (error) return (error); /* Add the newly allocated region to the resource manager. */ if (w->base != base) { error = rman_manage_region(&w->rman, base, w->base - 1); w->base = base; } else { error = rman_manage_region(&w->rman, w->limit + 1, limit); w->limit = limit; } if (error) { if (bootverbose) device_printf(sc->dev, "failed to expand %s resource manager\n", w->name); (void)bus_adjust_resource(sc->dev, type, res, force_64k_base ? rman_get_start(res) : w->base, w->limit); } return (error); } /* * Attempt to grow a window to make room for a given resource request. */ static int pcib_grow_window(struct pcib_softc *sc, struct pcib_window *w, int type, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { rman_res_t align, start_free, end_free, front, back, wmask; int error; /* * Clamp the desired resource range to the maximum address * this window supports. Reject impossible requests. * * For I/O port requests behind a bridge with the ISA enable * bit set, force large allocations to start above 64k. */ if (!w->valid) return (EINVAL); if (sc->bridgectl & PCIB_BCR_ISA_ENABLE && count > 0x100 && start < 65536) start = 65536; if (end > w->rman.rm_end) end = w->rman.rm_end; if (start + count - 1 > end || start + count < start) return (EINVAL); wmask = ((rman_res_t)1 << w->step) - 1; /* * If there is no resource at all, just try to allocate enough * aligned space for this resource. */ if (w->res == NULL) { error = pcib_alloc_new_window(sc, w, type, start, end, count, flags); if (error) { if (bootverbose) device_printf(sc->dev, "failed to allocate initial %s window (%#jx-%#jx,%#jx)\n", w->name, start, end, count); return (error); } if (bootverbose) device_printf(sc->dev, "allocated initial %s window of %#jx-%#jx\n", w->name, (uintmax_t)w->base, (uintmax_t)w->limit); goto updatewin; } /* * See if growing the window would help. Compute the minimum * amount of address space needed on both the front and back * ends of the existing window to satisfy the allocation. * * For each end, build a candidate region adjusting for the * required alignment, etc. If there is a free region at the * edge of the window, grow from the inner edge of the free * region. Otherwise grow from the window boundary. * * Growing an I/O window below 64k for a bridge with the ISA * enable bit doesn't require any special magic as the step * size of an I/O window (1k) always includes multiple * non-alias ranges when it is grown in either direction. * * XXX: Special case: if w->res is completely empty and the * request size is larger than w->res, we should find the * optimal aligned buffer containing w->res and allocate that. */ if (bootverbose) device_printf(sc->dev, "attempting to grow %s window for (%#jx-%#jx,%#jx)\n", w->name, start, end, count); align = (rman_res_t)1 << RF_ALIGNMENT(flags); if (start < w->base) { if (rman_first_free_region(&w->rman, &start_free, &end_free) != 0 || start_free != w->base) end_free = w->base; if (end_free > end) end_free = end + 1; /* Move end_free down until it is properly aligned. */ end_free &= ~(align - 1); end_free--; front = end_free - (count - 1); /* * The resource would now be allocated at (front, * end_free). Ensure that fits in the (start, end) * bounds. end_free is checked above. If 'front' is * ok, ensure it is properly aligned for this window. * Also check for underflow. */ if (front >= start && front <= end_free) { if (bootverbose) printf("\tfront candidate range: %#jx-%#jx\n", front, end_free); front &= ~wmask; front = w->base - front; } else front = 0; } else front = 0; if (end > w->limit) { if (rman_last_free_region(&w->rman, &start_free, &end_free) != 0 || end_free != w->limit) start_free = w->limit + 1; if (start_free < start) start_free = start; /* Move start_free up until it is properly aligned. */ start_free = roundup2(start_free, align); back = start_free + count - 1; /* * The resource would now be allocated at (start_free, * back). Ensure that fits in the (start, end) * bounds. start_free is checked above. If 'back' is * ok, ensure it is properly aligned for this window. * Also check for overflow. */ if (back <= end && start_free <= back) { if (bootverbose) printf("\tback candidate range: %#jx-%#jx\n", start_free, back); back |= wmask; back -= w->limit; } else back = 0; } else back = 0; /* * Try to allocate the smallest needed region first. * If that fails, fall back to the other region. */ error = ENOSPC; while (front != 0 || back != 0) { if (front != 0 && (front <= back || back == 0)) { error = pcib_expand_window(sc, w, type, w->base - front, w->limit); if (error == 0) break; front = 0; } else { error = pcib_expand_window(sc, w, type, w->base, w->limit + back); if (error == 0) break; back = 0; } } if (error) return (error); if (bootverbose) device_printf(sc->dev, "grew %s window to %#jx-%#jx\n", w->name, (uintmax_t)w->base, (uintmax_t)w->limit); updatewin: /* Write the new window. */ KASSERT((w->base & wmask) == 0, ("start address is not aligned")); KASSERT((w->limit & wmask) == wmask, ("end address is not aligned")); pcib_write_windows(sc, w->mask); return (0); } /* * We have to trap resource allocation requests and ensure that the bridge * is set up to, or capable of handling them. */ static struct resource * pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct pcib_softc *sc; struct resource *r; sc = device_get_softc(dev); /* * VGA resources are decoded iff the VGA enable bit is set in * the bridge control register. VGA resources do not fall into * the resource windows and are passed up to the parent. */ if ((type == SYS_RES_IOPORT && pci_is_vga_ioport_range(start, end)) || (type == SYS_RES_MEMORY && pci_is_vga_memory_range(start, end))) { if (sc->bridgectl & PCIB_BCR_VGA_ENABLE) return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); else return (NULL); } switch (type) { #ifdef PCI_RES_BUS case PCI_RES_BUS: return (pcib_alloc_subbus(&sc->bus, child, rid, start, end, count, flags)); #endif case SYS_RES_IOPORT: if (pcib_is_isa_range(sc, start, end, count)) return (NULL); r = pcib_suballoc_resource(sc, &sc->io, child, type, rid, start, end, count, flags); if (r != NULL || (sc->flags & PCIB_SUBTRACTIVE) != 0) break; if (pcib_grow_window(sc, &sc->io, type, start, end, count, flags) == 0) r = pcib_suballoc_resource(sc, &sc->io, child, type, rid, start, end, count, flags); break; case SYS_RES_MEMORY: /* * For prefetchable resources, prefer the prefetchable * memory window, but fall back to the regular memory * window if that fails. Try both windows before * attempting to grow a window in case the firmware * has used a range in the regular memory window to * map a prefetchable BAR. */ if (flags & RF_PREFETCHABLE) { r = pcib_suballoc_resource(sc, &sc->pmem, child, type, rid, start, end, count, flags); if (r != NULL) break; } r = pcib_suballoc_resource(sc, &sc->mem, child, type, rid, start, end, count, flags); if (r != NULL || (sc->flags & PCIB_SUBTRACTIVE) != 0) break; if (flags & RF_PREFETCHABLE) { if (pcib_grow_window(sc, &sc->pmem, type, start, end, count, flags) == 0) { r = pcib_suballoc_resource(sc, &sc->pmem, child, type, rid, start, end, count, flags); if (r != NULL) break; } } if (pcib_grow_window(sc, &sc->mem, type, start, end, count, flags & ~RF_PREFETCHABLE) == 0) r = pcib_suballoc_resource(sc, &sc->mem, child, type, rid, start, end, count, flags); break; default: return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); } /* * If attempts to suballocate from the window fail but this is a * subtractive bridge, pass the request up the tree. */ if (sc->flags & PCIB_SUBTRACTIVE && r == NULL) return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); return (r); } static int pcib_adjust_resource(device_t bus, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct pcib_softc *sc; struct pcib_window *w; rman_res_t wmask; int error, type; sc = device_get_softc(bus); type = rman_get_type(r); /* * If the resource wasn't sub-allocated from one of our region * managers then just pass the request up. */ if (!pcib_is_resource_managed(sc, r)) return (bus_generic_adjust_resource(bus, child, r, start, end)); #ifdef PCI_RES_BUS if (type == PCI_RES_BUS) { /* * If our bus range isn't big enough to grow the sub-allocation * then we need to grow our bus range. Any request that would * require us to decrease the start of our own bus range is * invalid, we can only extend the end; ignore such requests * and let rman_adjust_resource fail below. */ if (start >= sc->bus.sec && end > sc->bus.sub) { error = pcib_grow_subbus(&sc->bus, end); if (error != 0) return (error); } } else #endif { /* * Resource is managed and not a secondary bus number, must * be from one of our windows. */ w = pcib_get_resource_window(sc, r); KASSERT(w != NULL, ("%s: no window for resource (%#jx-%#jx) type %d", __func__, rman_get_start(r), rman_get_end(r), type)); /* * If our window isn't big enough to grow the sub-allocation * then we need to expand the window. */ if (start < w->base || end > w->limit) { wmask = ((rman_res_t)1 << w->step) - 1; error = pcib_expand_window(sc, w, type, MIN(start & ~wmask, w->base), MAX(end | wmask, w->limit)); if (error != 0) return (error); if (bootverbose) device_printf(sc->dev, "grew %s window to %#jx-%#jx\n", w->name, (uintmax_t)w->base, (uintmax_t)w->limit); pcib_write_windows(sc, w->mask); } } return (rman_adjust_resource(r, start, end)); } static int pcib_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct pcib_softc *sc; int error; sc = device_get_softc(dev); if (pcib_is_resource_managed(sc, r)) { if (rman_get_flags(r) & RF_ACTIVE) { error = bus_deactivate_resource(child, type, rid, r); if (error) return (error); } return (rman_release_resource(r)); } return (bus_generic_release_resource(dev, child, type, rid, r)); } static int -pcib_activate_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +pcib_activate_resource(device_t dev, device_t child, struct resource *r) { struct pcib_softc *sc = device_get_softc(dev); struct resource_map map; - int error; + int error, type; if (!pcib_is_resource_managed(sc, r)) - return (bus_generic_activate_resource(dev, child, type, rid, - r)); + return (bus_generic_activate_resource(dev, child, r)); error = rman_activate_resource(r); if (error != 0) return (error); + type = rman_get_type(r); if ((rman_get_flags(r) & RF_UNMAPPED) == 0 && (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT)) { error = BUS_MAP_RESOURCE(dev, child, r, NULL, &map); if (error != 0) { rman_deactivate_resource(r); return (error); } rman_set_mapping(r, &map); } return (0); } static int -pcib_deactivate_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +pcib_deactivate_resource(device_t dev, device_t child, struct resource *r) { struct pcib_softc *sc = device_get_softc(dev); struct resource_map map; - int error; + int error, type; if (!pcib_is_resource_managed(sc, r)) - return (bus_generic_deactivate_resource(dev, child, type, rid, - r)); + return (bus_generic_deactivate_resource(dev, child, r)); error = rman_deactivate_resource(r); if (error != 0) return (error); + type = rman_get_type(r); if ((rman_get_flags(r) & RF_UNMAPPED) == 0 && (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT)) { rman_get_mapping(r, &map); BUS_UNMAP_RESOURCE(dev, child, r, &map); } return (0); } static struct resource * pcib_find_parent_resource(struct pcib_window *w, struct resource *r) { for (int i = 0; i < w->count; i++) { if (rman_get_start(w->res[i]) <= rman_get_start(r) && rman_get_end(w->res[i]) >= rman_get_end(r)) return (w->res[i]); } return (NULL); } static int pcib_map_resource(device_t dev, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct pcib_softc *sc = device_get_softc(dev); struct resource_map_request args; struct pcib_window *w; struct resource *pres; rman_res_t length, start; int error; w = pcib_get_resource_window(sc, r); if (w == NULL) return (bus_generic_map_resource(dev, child, r, argsp, map)); /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); pres = pcib_find_parent_resource(w, r); if (pres == NULL) return (ENOENT); args.offset = start - rman_get_start(pres); args.length = length; return (bus_generic_map_resource(dev, child, pres, &args, map)); } static int pcib_unmap_resource(device_t dev, device_t child, struct resource *r, struct resource_map *map) { struct pcib_softc *sc = device_get_softc(dev); struct pcib_window *w; w = pcib_get_resource_window(sc, r); if (w != NULL) { r = pcib_find_parent_resource(w, r); if (r == NULL) return (ENOENT); } return (bus_generic_unmap_resource(dev, child, r, map)); } #else /* * We have to trap resource allocation requests and ensure that the bridge * is set up to, or capable of handling them. */ static struct resource * pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct pcib_softc *sc = device_get_softc(dev); const char *name, *suffix; int ok; /* * Fail the allocation for this range if it's not supported. */ name = device_get_nameunit(child); if (name == NULL) { name = ""; suffix = ""; } else suffix = " "; switch (type) { case SYS_RES_IOPORT: ok = 0; if (!pcib_is_io_open(sc)) break; ok = (start >= sc->iobase && end <= sc->iolimit); /* * Make sure we allow access to VGA I/O addresses when the * bridge has the "VGA Enable" bit set. */ if (!ok && pci_is_vga_ioport_range(start, end)) ok = (sc->bridgectl & PCIB_BCR_VGA_ENABLE) ? 1 : 0; if ((sc->flags & PCIB_SUBTRACTIVE) == 0) { if (!ok) { if (start < sc->iobase) start = sc->iobase; if (end > sc->iolimit) end = sc->iolimit; if (start < end) ok = 1; } } else { ok = 1; #if 0 /* * If we overlap with the subtractive range, then * pick the upper range to use. */ if (start < sc->iolimit && end > sc->iobase) start = sc->iolimit + 1; #endif } if (end < start) { device_printf(dev, "ioport: end (%jx) < start (%jx)\n", end, start); start = 0; end = 0; ok = 0; } if (!ok) { device_printf(dev, "%s%srequested unsupported I/O " "range 0x%jx-0x%jx (decoding 0x%x-0x%x)\n", name, suffix, start, end, sc->iobase, sc->iolimit); return (NULL); } if (bootverbose) device_printf(dev, "%s%srequested I/O range 0x%jx-0x%jx: in range\n", name, suffix, start, end); break; case SYS_RES_MEMORY: ok = 0; if (pcib_is_nonprefetch_open(sc)) ok = ok || (start >= sc->membase && end <= sc->memlimit); if (pcib_is_prefetch_open(sc)) ok = ok || (start >= sc->pmembase && end <= sc->pmemlimit); /* * Make sure we allow access to VGA memory addresses when the * bridge has the "VGA Enable" bit set. */ if (!ok && pci_is_vga_memory_range(start, end)) ok = (sc->bridgectl & PCIB_BCR_VGA_ENABLE) ? 1 : 0; if ((sc->flags & PCIB_SUBTRACTIVE) == 0) { if (!ok) { ok = 1; if (flags & RF_PREFETCHABLE) { if (pcib_is_prefetch_open(sc)) { if (start < sc->pmembase) start = sc->pmembase; if (end > sc->pmemlimit) end = sc->pmemlimit; } else { ok = 0; } } else { /* non-prefetchable */ if (pcib_is_nonprefetch_open(sc)) { if (start < sc->membase) start = sc->membase; if (end > sc->memlimit) end = sc->memlimit; } else { ok = 0; } } } } else if (!ok) { ok = 1; /* subtractive bridge: always ok */ #if 0 if (pcib_is_nonprefetch_open(sc)) { if (start < sc->memlimit && end > sc->membase) start = sc->memlimit + 1; } if (pcib_is_prefetch_open(sc)) { if (start < sc->pmemlimit && end > sc->pmembase) start = sc->pmemlimit + 1; } #endif } if (end < start) { device_printf(dev, "memory: end (%jx) < start (%jx)\n", end, start); start = 0; end = 0; ok = 0; } if (!ok && bootverbose) device_printf(dev, "%s%srequested unsupported memory range %#jx-%#jx " "(decoding %#jx-%#jx, %#jx-%#jx)\n", name, suffix, start, end, (uintmax_t)sc->membase, (uintmax_t)sc->memlimit, (uintmax_t)sc->pmembase, (uintmax_t)sc->pmemlimit); if (!ok) return (NULL); if (bootverbose) device_printf(dev,"%s%srequested memory range " "0x%jx-0x%jx: good\n", name, suffix, start, end); break; default: break; } /* * Bridge is OK decoding this resource, so pass it up. */ return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); } #endif /* * If ARI is enabled on this downstream port, translate the function number * to the non-ARI slot/function. The downstream port will convert it back in * hardware. If ARI is not enabled slot and func are not modified. */ static __inline void pcib_xlate_ari(device_t pcib, int bus, int *slot, int *func) { struct pcib_softc *sc; int ari_func; sc = device_get_softc(pcib); ari_func = *func; if (sc->flags & PCIB_ENABLE_ARI) { KASSERT(*slot == 0, ("Non-zero slot number with ARI enabled!")); *slot = PCIE_ARI_SLOT(ari_func); *func = PCIE_ARI_FUNC(ari_func); } } static void pcib_enable_ari(struct pcib_softc *sc, uint32_t pcie_pos) { uint32_t ctl2; ctl2 = pci_read_config(sc->dev, pcie_pos + PCIER_DEVICE_CTL2, 4); ctl2 |= PCIEM_CTL2_ARI; pci_write_config(sc->dev, pcie_pos + PCIER_DEVICE_CTL2, ctl2, 4); sc->flags |= PCIB_ENABLE_ARI; } /* * PCIB interface. */ int pcib_maxslots(device_t dev) { #if !defined(__amd64__) && !defined(__i386__) uint32_t pcie_pos; uint16_t val; /* * If this is a PCIe rootport or downstream switch port, there's only * one slot permitted. */ if (pci_find_cap(dev, PCIY_EXPRESS, &pcie_pos) == 0) { val = pci_read_config(dev, pcie_pos + PCIER_FLAGS, 2); val &= PCIEM_FLAGS_TYPE; if (val == PCIEM_TYPE_ROOT_PORT || val == PCIEM_TYPE_DOWNSTREAM_PORT) return (0); } #endif return (PCI_SLOTMAX); } static int pcib_ari_maxslots(device_t dev) { struct pcib_softc *sc; sc = device_get_softc(dev); if (sc->flags & PCIB_ENABLE_ARI) return (PCIE_ARI_SLOTMAX); else return (pcib_maxslots(dev)); } static int pcib_ari_maxfuncs(device_t dev) { struct pcib_softc *sc; sc = device_get_softc(dev); if (sc->flags & PCIB_ENABLE_ARI) return (PCIE_ARI_FUNCMAX); else return (PCI_FUNCMAX); } static void pcib_ari_decode_rid(device_t pcib, uint16_t rid, int *bus, int *slot, int *func) { struct pcib_softc *sc; sc = device_get_softc(pcib); *bus = PCI_RID2BUS(rid); if (sc->flags & PCIB_ENABLE_ARI) { *slot = PCIE_ARI_RID2SLOT(rid); *func = PCIE_ARI_RID2FUNC(rid); } else { *slot = PCI_RID2SLOT(rid); *func = PCI_RID2FUNC(rid); } } /* * Since we are a child of a PCI bus, its parent must support the pcib interface. */ static uint32_t pcib_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width) { #ifdef PCI_HP struct pcib_softc *sc; sc = device_get_softc(dev); if (!pcib_present(sc)) { switch (width) { case 2: return (0xffff); case 1: return (0xff); default: return (0xffffffff); } } #endif pcib_xlate_ari(dev, b, &s, &f); return(PCIB_READ_CONFIG(device_get_parent(device_get_parent(dev)), b, s, f, reg, width)); } static void pcib_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, uint32_t val, int width) { #ifdef PCI_HP struct pcib_softc *sc; sc = device_get_softc(dev); if (!pcib_present(sc)) return; #endif pcib_xlate_ari(dev, b, &s, &f); PCIB_WRITE_CONFIG(device_get_parent(device_get_parent(dev)), b, s, f, reg, val, width); } /* * Route an interrupt across a PCI bridge. */ int pcib_route_interrupt(device_t pcib, device_t dev, int pin) { device_t bus; int parent_intpin; int intnum; /* * * The PCI standard defines a swizzle of the child-side device/intpin to * the parent-side intpin as follows. * * device = device on child bus * child_intpin = intpin on child bus slot (0-3) * parent_intpin = intpin on parent bus slot (0-3) * * parent_intpin = (device + child_intpin) % 4 */ parent_intpin = (pci_get_slot(dev) + (pin - 1)) % 4; /* * Our parent is a PCI bus. Its parent must export the pcib interface * which includes the ability to route interrupts. */ bus = device_get_parent(pcib); intnum = PCIB_ROUTE_INTERRUPT(device_get_parent(bus), pcib, parent_intpin + 1); if (PCI_INTERRUPT_VALID(intnum) && bootverbose) { device_printf(pcib, "slot %d INT%c is routed to irq %d\n", pci_get_slot(dev), 'A' + pin - 1, intnum); } return(intnum); } /* Pass request to alloc MSI/MSI-X messages up to the parent bridge. */ int pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs) { struct pcib_softc *sc = device_get_softc(pcib); device_t bus; if (sc->flags & PCIB_DISABLE_MSI) return (ENXIO); bus = device_get_parent(pcib); return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount, irqs)); } /* Pass request to release MSI/MSI-X messages up to the parent bridge. */ int pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs) { device_t bus; bus = device_get_parent(pcib); return (PCIB_RELEASE_MSI(device_get_parent(bus), dev, count, irqs)); } /* Pass request to alloc an MSI-X message up to the parent bridge. */ int pcib_alloc_msix(device_t pcib, device_t dev, int *irq) { struct pcib_softc *sc = device_get_softc(pcib); device_t bus; if (sc->flags & PCIB_DISABLE_MSIX) return (ENXIO); bus = device_get_parent(pcib); return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq)); } /* Pass request to release an MSI-X message up to the parent bridge. */ int pcib_release_msix(device_t pcib, device_t dev, int irq) { device_t bus; bus = device_get_parent(pcib); return (PCIB_RELEASE_MSIX(device_get_parent(bus), dev, irq)); } /* Pass request to map MSI/MSI-X message up to parent bridge. */ int pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data) { device_t bus; int error; bus = device_get_parent(pcib); error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data); if (error) return (error); pci_ht_map_msi(pcib, *addr); return (0); } /* Pass request for device power state up to parent bridge. */ int pcib_power_for_sleep(device_t pcib, device_t dev, int *pstate) { device_t bus; bus = device_get_parent(pcib); return (PCIB_POWER_FOR_SLEEP(bus, dev, pstate)); } static int pcib_ari_enabled(device_t pcib) { struct pcib_softc *sc; sc = device_get_softc(pcib); return ((sc->flags & PCIB_ENABLE_ARI) != 0); } static int pcib_ari_get_id(device_t pcib, device_t dev, enum pci_id_type type, uintptr_t *id) { struct pcib_softc *sc; device_t bus_dev; uint8_t bus, slot, func; if (type != PCI_ID_RID) { bus_dev = device_get_parent(pcib); return (PCIB_GET_ID(device_get_parent(bus_dev), dev, type, id)); } sc = device_get_softc(pcib); if (sc->flags & PCIB_ENABLE_ARI) { bus = pci_get_bus(dev); func = pci_get_function(dev); *id = (PCI_ARI_RID(bus, func)); } else { bus = pci_get_bus(dev); slot = pci_get_slot(dev); func = pci_get_function(dev); *id = (PCI_RID(bus, slot, func)); } return (0); } /* * Check that the downstream port (pcib) and the endpoint device (dev) both * support ARI. If so, enable it and return 0, otherwise return an error. */ static int pcib_try_enable_ari(device_t pcib, device_t dev) { struct pcib_softc *sc; int error; uint32_t cap2; int ari_cap_off; uint32_t ari_ver; uint32_t pcie_pos; sc = device_get_softc(pcib); /* * ARI is controlled in a register in the PCIe capability structure. * If the downstream port does not have the PCIe capability structure * then it does not support ARI. */ error = pci_find_cap(pcib, PCIY_EXPRESS, &pcie_pos); if (error != 0) return (ENODEV); /* Check that the PCIe port advertises ARI support. */ cap2 = pci_read_config(pcib, pcie_pos + PCIER_DEVICE_CAP2, 4); if (!(cap2 & PCIEM_CAP2_ARI)) return (ENODEV); /* * Check that the endpoint device advertises ARI support via the ARI * extended capability structure. */ error = pci_find_extcap(dev, PCIZ_ARI, &ari_cap_off); if (error != 0) return (ENODEV); /* * Finally, check that the endpoint device supports the same version * of ARI that we do. */ ari_ver = pci_read_config(dev, ari_cap_off, 4); if (PCI_EXTCAP_VER(ari_ver) != PCIB_SUPPORTED_ARI_VER) { if (bootverbose) device_printf(pcib, "Unsupported version of ARI (%d) detected\n", PCI_EXTCAP_VER(ari_ver)); return (ENXIO); } pcib_enable_ari(sc, pcie_pos); return (0); } int pcib_request_feature_allow(device_t pcib, device_t dev, enum pci_feature feature) { /* * No host firmware we have to negotiate with, so we allow * every valid feature requested. */ switch (feature) { case PCI_FEATURE_AER: case PCI_FEATURE_HP: break; default: return (EINVAL); } return (0); } int pcib_request_feature(device_t dev, enum pci_feature feature) { /* * Invoke PCIB_REQUEST_FEATURE of this bridge first in case * the firmware overrides the method of PCI-PCI bridges. */ return (PCIB_REQUEST_FEATURE(dev, dev, feature)); } /* * Pass the request to use this PCI feature up the tree. Either there's a * firmware like ACPI that's using this feature that will approve (or deny) the * request to take it over, or the platform has no such firmware, in which case * the request will be approved. If the request is approved, the OS is expected * to make use of the feature or render it harmless. */ static int pcib_request_feature_default(device_t pcib, device_t dev, enum pci_feature feature) { device_t bus; /* * Our parent is necessarily a pci bus. Its parent will either be * another pci bridge (which passes it up) or a host bridge that can * approve or reject the request. */ bus = device_get_parent(pcib); return (PCIB_REQUEST_FEATURE(device_get_parent(bus), dev, feature)); } static int pcib_reset_child(device_t dev, device_t child, int flags) { struct pci_devinfo *pdinfo; int error; error = 0; if (dev == NULL || device_get_parent(child) != dev) goto out; error = ENXIO; if (device_get_devclass(child) != devclass_find("pci")) goto out; pdinfo = device_get_ivars(dev); if (pdinfo->cfg.pcie.pcie_location != 0 && (pdinfo->cfg.pcie.pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT || pdinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT)) { error = bus_helper_reset_prepare(child, flags); if (error == 0) { error = pcie_link_reset(dev, pdinfo->cfg.pcie.pcie_location); /* XXXKIB call _post even if error != 0 ? */ bus_helper_reset_post(child, flags); } } out: return (error); } diff --git a/sys/dev/pci/pci_private.h b/sys/dev/pci/pci_private.h index b634c7b61260..6bec699bff2b 100644 --- a/sys/dev/pci/pci_private.h +++ b/sys/dev/pci/pci_private.h @@ -1,187 +1,187 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1997, Stefan Esser * Copyright (c) 2000, Michael Smith * Copyright (c) 2000, BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #ifndef _PCI_PRIVATE_H_ #define _PCI_PRIVATE_H_ /* * Export definitions of the pci bus so that we can more easily share * it with "subclass" buses. */ DECLARE_CLASS(pci_driver); struct pci_softc { bus_dma_tag_t sc_dma_tag; #ifdef PCI_RES_BUS struct resource *sc_bus; #endif }; extern int pci_do_power_resume; extern int pci_do_power_suspend; void pci_add_children(device_t dev, int domain, int busno); void pci_add_child(device_t bus, struct pci_devinfo *dinfo); device_t pci_add_iov_child(device_t bus, device_t pf, uint16_t rid, uint16_t vid, uint16_t did); void pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask); void pci_add_resources_ea(device_t bus, device_t dev, int alloc_iov); struct pci_devinfo *pci_alloc_devinfo_method(device_t dev); int pci_attach(device_t dev); int pci_attach_common(device_t dev); int pci_detach(device_t dev); int pci_rescan_method(device_t dev); void pci_driver_added(device_t dev, driver_t *driver); int pci_ea_is_enabled(device_t dev, int rid); int pci_print_child(device_t dev, device_t child); void pci_probe_nomatch(device_t dev, device_t child); int pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); int pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value); int pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep); int pci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie); int pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr); int pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw, const char **vptr); int pci_set_powerstate_method(device_t dev, device_t child, int state); int pci_get_powerstate_method(device_t dev, device_t child); uint32_t pci_read_config_method(device_t dev, device_t child, int reg, int width); void pci_write_config_method(device_t dev, device_t child, int reg, uint32_t val, int width); int pci_enable_busmaster_method(device_t dev, device_t child); int pci_disable_busmaster_method(device_t dev, device_t child); int pci_enable_io_method(device_t dev, device_t child, int space); int pci_disable_io_method(device_t dev, device_t child, int space); int pci_find_cap_method(device_t dev, device_t child, int capability, int *capreg); int pci_find_next_cap_method(device_t dev, device_t child, int capability, int start, int *capreg); int pci_find_extcap_method(device_t dev, device_t child, int capability, int *capreg); int pci_find_next_extcap_method(device_t dev, device_t child, int capability, int start, int *capreg); int pci_find_htcap_method(device_t dev, device_t child, int capability, int *capreg); int pci_find_next_htcap_method(device_t dev, device_t child, int capability, int start, int *capreg); int pci_alloc_msi_method(device_t dev, device_t child, int *count); int pci_alloc_msix_method(device_t dev, device_t child, int *count); void pci_enable_msi_method(device_t dev, device_t child, uint64_t address, uint16_t data); void pci_enable_msix_method(device_t dev, device_t child, u_int index, uint64_t address, uint32_t data); void pci_disable_msi_method(device_t dev, device_t child); int pci_remap_msix_method(device_t dev, device_t child, int count, const u_int *vectors); int pci_release_msi_method(device_t dev, device_t child); int pci_msi_count_method(device_t dev, device_t child); int pci_msix_count_method(device_t dev, device_t child); int pci_msix_pba_bar_method(device_t dev, device_t child); int pci_msix_table_bar_method(device_t dev, device_t child); struct resource *pci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int pci_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r); -int pci_activate_resource(device_t dev, device_t child, int type, - int rid, struct resource *r); -int pci_deactivate_resource(device_t dev, device_t child, int type, - int rid, struct resource *r); +int pci_activate_resource(device_t dev, device_t child, + struct resource *r); +int pci_deactivate_resource(device_t dev, device_t child, + struct resource *r); void pci_delete_resource(device_t dev, device_t child, int type, int rid); struct resource_list *pci_get_resource_list (device_t dev, device_t child); struct pci_devinfo *pci_read_device(device_t pcib, device_t bus, int d, int b, int s, int f); void pci_print_verbose(struct pci_devinfo *dinfo); int pci_freecfg(struct pci_devinfo *dinfo); void pci_child_deleted(device_t dev, device_t child); void pci_child_detached(device_t dev, device_t child); int pci_child_location_method(device_t cbdev, device_t child, struct sbuf *sb); int pci_child_pnpinfo_method(device_t cbdev, device_t child, struct sbuf *sb); int pci_get_device_path_method(device_t dev, device_t child, const char *locator, struct sbuf *sb); int pci_assign_interrupt_method(device_t dev, device_t child); int pci_resume(device_t dev); int pci_resume_child(device_t dev, device_t child); int pci_suspend_child(device_t dev, device_t child); bus_dma_tag_t pci_get_dma_tag(device_t bus, device_t dev); void pci_child_added_method(device_t dev, device_t child); /** Restore the config register state. The state must be previously * saved with pci_cfg_save. However, the pci bus driver takes care of * that. This function will also return the device to PCI_POWERSTATE_D0 * if it is currently in a lower power mode. */ void pci_cfg_restore(device_t, struct pci_devinfo *); /** Save the config register state. Optionally set the power state to D3 * if the third argument is non-zero. */ void pci_cfg_save(device_t, struct pci_devinfo *, int); int pci_mapsize(uint64_t testval); void pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp, int *bar64); struct pci_map *pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size); struct resource *pci_reserve_map(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int num, u_int flags); struct resource *pci_alloc_multi_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_long num, u_int flags); int pci_iov_attach_method(device_t bus, device_t dev, struct nvlist *pf_schema, struct nvlist *vf_schema, const char *name); int pci_iov_detach_method(device_t bus, device_t dev); device_t pci_create_iov_child_method(device_t bus, device_t pf, uint16_t rid, uint16_t vid, uint16_t did); struct resource *pci_vf_alloc_mem_resource(device_t dev, device_t child, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int pci_vf_release_mem_resource(device_t dev, device_t child, int rid, struct resource *r); #endif /* _PCI_PRIVATE_H_ */ diff --git a/sys/dev/pci/pci_subr.c b/sys/dev/pci/pci_subr.c index 4be3e3f166eb..19081a33f159 100644 --- a/sys/dev/pci/pci_subr.c +++ b/sys/dev/pci/pci_subr.c @@ -1,417 +1,417 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2011 Hudson River Trading LLC * Written by: John H. Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Support APIs for Host to PCI bridge drivers and drivers that * provide PCI domains. */ #include #include #include #include #include #include #include #include /* * Try to read the bus number of a host-PCI bridge using appropriate config * registers. */ int host_pcib_get_busno(pci_read_config_fn read_config, int bus, int slot, int func, uint8_t *busnum) { uint32_t id; id = read_config(0, bus, slot, func, PCIR_DEVVENDOR, 4); if (id == 0xffffffff) return (0); switch (id) { case 0x12258086: /* Intel 824?? */ /* XXX This is a guess */ /* *busnum = read_config(0, bus, slot, func, 0x41, 1); */ *busnum = bus; break; case 0x84c48086: /* Intel 82454KX/GX (Orion) */ *busnum = read_config(0, bus, slot, func, 0x4a, 1); break; case 0x84ca8086: /* * For the 450nx chipset, there is a whole bundle of * things pretending to be host bridges. The MIOC will * be seen first and isn't really a pci bridge (the * actual buses are attached to the PXB's). We need to * read the registers of the MIOC to figure out the * bus numbers for the PXB channels. * * Since the MIOC doesn't have a pci bus attached, we * pretend it wasn't there. */ return (0); case 0x84cb8086: switch (slot) { case 0x12: /* Intel 82454NX PXB#0, Bus#A */ *busnum = read_config(0, bus, 0x10, func, 0xd0, 1); break; case 0x13: /* Intel 82454NX PXB#0, Bus#B */ *busnum = read_config(0, bus, 0x10, func, 0xd1, 1) + 1; break; case 0x14: /* Intel 82454NX PXB#1, Bus#A */ *busnum = read_config(0, bus, 0x10, func, 0xd3, 1); break; case 0x15: /* Intel 82454NX PXB#1, Bus#B */ *busnum = read_config(0, bus, 0x10, func, 0xd4, 1) + 1; break; } break; /* ServerWorks -- vendor 0x1166 */ case 0x00051166: case 0x00061166: case 0x00081166: case 0x00091166: case 0x00101166: case 0x00111166: case 0x00171166: case 0x01011166: case 0x010f1014: case 0x01101166: case 0x02011166: case 0x02251166: case 0x03021014: *busnum = read_config(0, bus, slot, func, 0x44, 1); break; /* Compaq/HP -- vendor 0x0e11 */ case 0x60100e11: *busnum = read_config(0, bus, slot, func, 0xc8, 1); break; default: /* Don't know how to read bus number. */ return 0; } return 1; } #ifdef NEW_PCIB /* * Return a pointer to a pretty name for a PCI device. If the device * has a driver attached, the device's name is used, otherwise a name * is generated from the device's PCI address. */ const char * pcib_child_name(device_t child) { static char buf[64]; if (device_get_nameunit(child) != NULL) return (device_get_nameunit(child)); snprintf(buf, sizeof(buf), "pci%d:%d:%d:%d", pci_get_domain(child), pci_get_bus(child), pci_get_slot(child), pci_get_function(child)); return (buf); } /* * Some Host-PCI bridge drivers know which resource ranges they can * decode and should only allocate subranges to child PCI devices. * This API provides a way to manage this. The bridge driver should * initialize this structure during attach and call * pcib_host_res_decodes() on each resource range it decodes. It can * then use pcib_host_res_alloc() and pcib_host_res_adjust() as helper * routines for BUS_ALLOC_RESOURCE() and BUS_ADJUST_RESOURCE(). This * API assumes that resources for any decoded ranges can be safely * allocated from the parent via bus_generic_alloc_resource(). */ int pcib_host_res_init(device_t pcib, struct pcib_host_resources *hr) { hr->hr_pcib = pcib; resource_list_init(&hr->hr_rl); return (0); } int pcib_host_res_free(device_t pcib, struct pcib_host_resources *hr) { resource_list_free(&hr->hr_rl); return (0); } int pcib_host_res_decodes(struct pcib_host_resources *hr, int type, rman_res_t start, rman_res_t end, u_int flags) { struct resource_list_entry *rle; int rid; if (bootverbose) device_printf(hr->hr_pcib, "decoding %d %srange %#jx-%#jx\n", type, flags & RF_PREFETCHABLE ? "prefetchable ": "", start, end); rid = resource_list_add_next(&hr->hr_rl, type, start, end, end - start + 1); if (flags & RF_PREFETCHABLE) { KASSERT(type == SYS_RES_MEMORY, ("only memory is prefetchable")); rle = resource_list_find(&hr->hr_rl, type, rid); rle->flags = RLE_PREFETCH; } return (0); } struct resource * pcib_host_res_alloc(struct pcib_host_resources *hr, device_t dev, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list_entry *rle; struct resource *r; rman_res_t new_start, new_end; if (flags & RF_PREFETCHABLE) KASSERT(type == SYS_RES_MEMORY, ("only memory is prefetchable")); rle = resource_list_find(&hr->hr_rl, type, 0); if (rle == NULL) { /* * No decoding ranges for this resource type, just pass * the request up to the parent. */ return (bus_generic_alloc_resource(hr->hr_pcib, dev, type, rid, start, end, count, flags)); } restart: /* Try to allocate from each decoded range. */ for (; rle != NULL; rle = STAILQ_NEXT(rle, link)) { if (rle->type != type) continue; if (((flags & RF_PREFETCHABLE) != 0) != ((rle->flags & RLE_PREFETCH) != 0)) continue; new_start = ummax(start, rle->start); new_end = ummin(end, rle->end); if (new_start > new_end || new_start + count - 1 > new_end || new_start + count < new_start) continue; r = bus_generic_alloc_resource(hr->hr_pcib, dev, type, rid, new_start, new_end, count, flags); if (r != NULL) { if (bootverbose) device_printf(hr->hr_pcib, "allocated type %d (%#jx-%#jx) for rid %x of %s\n", type, rman_get_start(r), rman_get_end(r), *rid, pcib_child_name(dev)); return (r); } } /* * If we failed to find a prefetch range for a memory * resource, try again without prefetch. */ if (flags & RF_PREFETCHABLE) { flags &= ~RF_PREFETCHABLE; rle = resource_list_find(&hr->hr_rl, type, 0); goto restart; } return (NULL); } int pcib_host_res_adjust(struct pcib_host_resources *hr, device_t dev, struct resource *r, rman_res_t start, rman_res_t end) { struct resource_list_entry *rle; rle = resource_list_find(&hr->hr_rl, rman_get_type(r), 0); if (rle == NULL) { /* * No decoding ranges for this resource type, just pass * the request up to the parent. */ return (bus_generic_adjust_resource(hr->hr_pcib, dev, r, start, end)); } /* Only allow adjustments that stay within a decoded range. */ for (; rle != NULL; rle = STAILQ_NEXT(rle, link)) { if (rle->start <= start && rle->end >= end) return (bus_generic_adjust_resource(hr->hr_pcib, dev, r, start, end)); } return (ERANGE); } #ifdef PCI_RES_BUS struct pci_domain { int pd_domain; struct rman pd_bus_rman; TAILQ_ENTRY(pci_domain) pd_link; }; static TAILQ_HEAD(, pci_domain) domains = TAILQ_HEAD_INITIALIZER(domains); /* * Each PCI domain maintains its own resource manager for PCI bus * numbers in that domain. Domain objects are created on first use. * Host to PCI bridge drivers and PCI-PCI bridge drivers should * allocate their bus ranges from their domain. */ static struct pci_domain * pci_find_domain(int domain) { struct pci_domain *d; char buf[64]; int error; TAILQ_FOREACH(d, &domains, pd_link) { if (d->pd_domain == domain) return (d); } snprintf(buf, sizeof(buf), "PCI domain %d bus numbers", domain); d = malloc(sizeof(*d) + strlen(buf) + 1, M_DEVBUF, M_WAITOK | M_ZERO); d->pd_domain = domain; d->pd_bus_rman.rm_start = 0; d->pd_bus_rman.rm_end = PCI_BUSMAX; d->pd_bus_rman.rm_type = RMAN_ARRAY; strcpy((char *)(d + 1), buf); d->pd_bus_rman.rm_descr = (char *)(d + 1); error = rman_init(&d->pd_bus_rman); if (error == 0) error = rman_manage_region(&d->pd_bus_rman, 0, PCI_BUSMAX); if (error) panic("Failed to initialize PCI domain %d rman", domain); TAILQ_INSERT_TAIL(&domains, d, pd_link); return (d); } struct resource * pci_domain_alloc_bus(int domain, device_t dev, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct pci_domain *d; struct resource *res; if (domain < 0 || domain > PCI_DOMAINMAX) return (NULL); d = pci_find_domain(domain); res = rman_reserve_resource(&d->pd_bus_rman, start, end, count, flags, dev); if (res == NULL) return (NULL); rman_set_rid(res, *rid); rman_set_type(res, PCI_RES_BUS); return (res); } int pci_domain_adjust_bus(int domain, device_t dev, struct resource *r, rman_res_t start, rman_res_t end) { #ifdef INVARIANTS struct pci_domain *d; #endif if (domain < 0 || domain > PCI_DOMAINMAX) return (EINVAL); #ifdef INVARIANTS d = pci_find_domain(domain); KASSERT(rman_is_region_manager(r, &d->pd_bus_rman), ("bad resource")); #endif return (rman_adjust_resource(r, start, end)); } int pci_domain_release_bus(int domain, device_t dev, int rid, struct resource *r) { #ifdef INVARIANTS struct pci_domain *d; #endif if (domain < 0 || domain > PCI_DOMAINMAX) return (EINVAL); #ifdef INVARIANTS d = pci_find_domain(domain); KASSERT(rman_is_region_manager(r, &d->pd_bus_rman), ("bad resource")); #endif return (rman_release_resource(r)); } int -pci_domain_activate_bus(int domain, device_t dev, int rid, struct resource *r) +pci_domain_activate_bus(int domain, device_t dev, struct resource *r) { #ifdef INVARIANTS struct pci_domain *d; #endif if (domain < 0 || domain > PCI_DOMAINMAX) return (EINVAL); #ifdef INVARIANTS d = pci_find_domain(domain); KASSERT(rman_is_region_manager(r, &d->pd_bus_rman), ("bad resource")); #endif return (rman_activate_resource(r)); } int -pci_domain_deactivate_bus(int domain, device_t dev, int rid, struct resource *r) +pci_domain_deactivate_bus(int domain, device_t dev, struct resource *r) { #ifdef INVARIANTS struct pci_domain *d; #endif if (domain < 0 || domain > PCI_DOMAINMAX) return (EINVAL); #ifdef INVARIANTS d = pci_find_domain(domain); KASSERT(rman_is_region_manager(r, &d->pd_bus_rman), ("bad resource")); #endif return (rman_deactivate_resource(r)); } #endif /* PCI_RES_BUS */ #endif /* NEW_PCIB */ diff --git a/sys/dev/pci/pcib_private.h b/sys/dev/pci/pcib_private.h index 1f4f18d921e5..c95321586ac2 100644 --- a/sys/dev/pci/pcib_private.h +++ b/sys/dev/pci/pcib_private.h @@ -1,199 +1,199 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1994,1995 Stefan Esser, Wolfgang StanglMeier * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef __PCIB_PRIVATE_H__ #define __PCIB_PRIVATE_H__ #include #ifdef NEW_PCIB /* * Data structure and routines that Host to PCI bridge drivers can use * to restrict allocations for child devices to ranges decoded by the * bridge. */ struct pcib_host_resources { device_t hr_pcib; struct resource_list hr_rl; }; int pcib_host_res_init(device_t pcib, struct pcib_host_resources *hr); int pcib_host_res_free(device_t pcib, struct pcib_host_resources *hr); int pcib_host_res_decodes(struct pcib_host_resources *hr, int type, rman_res_t start, rman_res_t end, u_int flags); struct resource *pcib_host_res_alloc(struct pcib_host_resources *hr, device_t dev, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int pcib_host_res_adjust(struct pcib_host_resources *hr, device_t dev, struct resource *r, rman_res_t start, rman_res_t end); #endif /* * Export portions of generic PCI:PCI bridge support so that it can be * used by subclasses. */ DECLARE_CLASS(pcib_driver); #ifdef NEW_PCIB #define WIN_IO 0x1 #define WIN_MEM 0x2 #define WIN_PMEM 0x4 struct pcib_window { pci_addr_t base; /* base address */ pci_addr_t limit; /* topmost address */ struct rman rman; struct resource **res; int count; /* size of 'res' array */ int reg; /* resource id from parent */ int valid; int mask; /* WIN_* bitmask of this window */ int step; /* log_2 of window granularity */ const char *name; }; #endif struct pcib_secbus { u_int sec; u_int sub; #if defined(NEW_PCIB) && defined(PCI_RES_BUS) device_t dev; struct rman rman; struct resource *res; const char *name; int sub_reg; #endif }; /* * Bridge-specific data. */ struct pcib_softc { device_t dev; device_t child; uint32_t flags; /* flags */ #define PCIB_SUBTRACTIVE 0x1 #define PCIB_DISABLE_MSI 0x2 #define PCIB_DISABLE_MSIX 0x4 #define PCIB_ENABLE_ARI 0x8 #define PCIB_HOTPLUG 0x10 #define PCIB_HOTPLUG_CMD_PENDING 0x20 #define PCIB_DETACH_PENDING 0x40 #define PCIB_DETACHING 0x80 u_int domain; /* domain number */ u_int pribus; /* primary bus number */ struct pcib_secbus bus; /* secondary bus numbers */ #ifdef NEW_PCIB struct pcib_window io; /* I/O port window */ struct pcib_window mem; /* memory window */ struct pcib_window pmem; /* prefetchable memory window */ #else pci_addr_t pmembase; /* base address of prefetchable memory */ pci_addr_t pmemlimit; /* topmost address of prefetchable memory */ pci_addr_t membase; /* base address of memory window */ pci_addr_t memlimit; /* topmost address of memory window */ uint32_t iobase; /* base address of port window */ uint32_t iolimit; /* topmost address of port window */ #endif uint16_t bridgectl; /* bridge control register */ uint16_t pcie_link_sta; uint16_t pcie_slot_sta; uint32_t pcie_slot_cap; struct resource *pcie_mem; struct resource *pcie_irq; void *pcie_ihand; struct task pcie_hp_task; struct timeout_task pcie_ab_task; struct timeout_task pcie_cc_task; struct timeout_task pcie_dll_task; struct mtx *pcie_hp_lock; }; #define PCIB_HP_LOCK(sc) mtx_lock((sc)->pcie_hp_lock) #define PCIB_HP_UNLOCK(sc) mtx_unlock((sc)->pcie_hp_lock) #define PCIB_HP_LOCK_ASSERT(sc) mtx_assert((sc)->pcie_hp_lock, MA_OWNED) #define PCIB_SUPPORTED_ARI_VER 1 typedef uint32_t pci_read_config_fn(int d, int b, int s, int f, int reg, int width); int host_pcib_get_busno(pci_read_config_fn read_config, int bus, int slot, int func, uint8_t *busnum); #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct resource *pci_domain_alloc_bus(int domain, device_t dev, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int pci_domain_adjust_bus(int domain, device_t dev, struct resource *r, rman_res_t start, rman_res_t end); int pci_domain_release_bus(int domain, device_t dev, int rid, struct resource *r); -int pci_domain_activate_bus(int domain, device_t dev, int rid, +int pci_domain_activate_bus(int domain, device_t dev, struct resource *r); -int pci_domain_deactivate_bus(int domain, device_t dev, int rid, +int pci_domain_deactivate_bus(int domain, device_t dev, struct resource *r); struct resource *pcib_alloc_subbus(struct pcib_secbus *bus, device_t child, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); void pcib_free_secbus(device_t dev, struct pcib_secbus *bus); void pcib_setup_secbus(device_t dev, struct pcib_secbus *bus, int min_count); #endif int pcib_attach(device_t dev); int pcib_attach_child(device_t dev); void pcib_attach_common(device_t dev); void pcib_bridge_init(device_t dev); #ifdef NEW_PCIB const char *pcib_child_name(device_t child); #endif int pcib_detach(device_t dev); int pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); int pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value); int pcib_maxslots(device_t dev); int pcib_maxfuncs(device_t dev); int pcib_route_interrupt(device_t pcib, device_t dev, int pin); int pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs); int pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs); int pcib_alloc_msix(device_t pcib, device_t dev, int *irq); int pcib_release_msix(device_t pcib, device_t dev, int irq); int pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data); int pcib_get_id(device_t pcib, device_t dev, enum pci_id_type type, uintptr_t *id); void pcib_decode_rid(device_t pcib, uint16_t rid, int *bus, int *slot, int *func); int pcib_request_feature(device_t dev, enum pci_feature feature); int pcib_request_feature_allow(device_t pcib, device_t dev, enum pci_feature feature); #endif diff --git a/sys/dev/vmd/vmd.c b/sys/dev/vmd/vmd.c index a0a021c7d367..904e615258a6 100644 --- a/sys/dev/vmd/vmd.c +++ b/sys/dev/vmd/vmd.c @@ -1,759 +1,755 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Alexander Motin * Copyright 2019 Cisco Systems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" struct vmd_type { u_int16_t vmd_vid; u_int16_t vmd_did; char *vmd_name; int flags; #define BUS_RESTRICT 1 #define VECTOR_OFFSET 2 #define CAN_BYPASS_MSI 4 }; #define VMD_CAP 0x40 #define VMD_BUS_RESTRICT 0x1 #define VMD_CONFIG 0x44 #define VMD_BYPASS_MSI 0x2 #define VMD_BUS_START(x) ((x >> 8) & 0x3) #define VMD_LOCK 0x70 SYSCTL_NODE(_hw, OID_AUTO, vmd, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Intel Volume Management Device tuning parameters"); /* * By default all VMD devices remap children MSI/MSI-X interrupts into their * own. It creates additional isolation, but also complicates things due to * sharing, etc. Fortunately some VMD devices can bypass the remapping. */ static int vmd_bypass_msi = 1; SYSCTL_INT(_hw_vmd, OID_AUTO, bypass_msi, CTLFLAG_RWTUN, &vmd_bypass_msi, 0, "Bypass MSI remapping on capable hardware"); /* * All MSIs within a group share address, so VMD can't distinguish them. * It makes no sense to use more than one per device, only if required by * some specific device drivers. */ static int vmd_max_msi = 1; SYSCTL_INT(_hw_vmd, OID_AUTO, max_msi, CTLFLAG_RWTUN, &vmd_max_msi, 0, "Maximum number of MSI vectors per device"); /* * MSI-X can use different addresses, but we have limited number of MSI-X * we can route to, so use conservative default to try to avoid sharing. */ static int vmd_max_msix = 3; SYSCTL_INT(_hw_vmd, OID_AUTO, max_msix, CTLFLAG_RWTUN, &vmd_max_msix, 0, "Maximum number of MSI-X vectors per device"); static struct vmd_type vmd_devs[] = { { 0x8086, 0x201d, "Intel Volume Management Device", 0 }, { 0x8086, 0x28c0, "Intel Volume Management Device", BUS_RESTRICT | CAN_BYPASS_MSI }, { 0x8086, 0x467f, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET }, { 0x8086, 0x4c3d, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET }, { 0x8086, 0x7d0b, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET }, { 0x8086, 0x9a0b, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET }, { 0x8086, 0xa77f, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET }, { 0x8086, 0xad0b, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET }, { 0, 0, NULL, 0 } }; static int vmd_probe(device_t dev) { struct vmd_type *t; uint16_t vid, did; vid = pci_get_vendor(dev); did = pci_get_device(dev); for (t = vmd_devs; t->vmd_name != NULL; t++) { if (vid == t->vmd_vid && did == t->vmd_did) { device_set_desc(dev, t->vmd_name); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static void vmd_free(struct vmd_softc *sc) { struct vmd_irq *vi; struct vmd_irq_user *u; int i; if (sc->psc.bus.rman.rm_end != 0) rman_fini(&sc->psc.bus.rman); if (sc->psc.mem.rman.rm_end != 0) rman_fini(&sc->psc.mem.rman); while ((u = LIST_FIRST(&sc->vmd_users)) != NULL) { LIST_REMOVE(u, viu_link); free(u, M_DEVBUF); } if (sc->vmd_irq != NULL) { for (i = 0; i < sc->vmd_msix_count; i++) { vi = &sc->vmd_irq[i]; if (vi->vi_res == NULL) continue; bus_teardown_intr(sc->psc.dev, vi->vi_res, vi->vi_handle); bus_release_resource(sc->psc.dev, SYS_RES_IRQ, vi->vi_rid, vi->vi_res); } } free(sc->vmd_irq, M_DEVBUF); sc->vmd_irq = NULL; pci_release_msi(sc->psc.dev); for (i = 0; i < VMD_MAX_BAR; i++) { if (sc->vmd_regs_res[i] != NULL) bus_release_resource(sc->psc.dev, SYS_RES_MEMORY, sc->vmd_regs_rid[i], sc->vmd_regs_res[i]); } } /* Hidden PCI Roots are hidden in BAR(0). */ static uint32_t vmd_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width) { struct vmd_softc *sc; bus_addr_t offset; sc = device_get_softc(dev); if (b < sc->vmd_bus_start || b > sc->vmd_bus_end) return (0xffffffff); offset = ((b - sc->vmd_bus_start) << 20) + (s << 15) + (f << 12) + reg; switch (width) { case 4: return (bus_read_4(sc->vmd_regs_res[0], offset)); case 2: return (bus_read_2(sc->vmd_regs_res[0], offset)); case 1: return (bus_read_1(sc->vmd_regs_res[0], offset)); default: __assert_unreachable(); return (0xffffffff); } } static void vmd_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, uint32_t val, int width) { struct vmd_softc *sc; bus_addr_t offset; sc = device_get_softc(dev); if (b < sc->vmd_bus_start || b > sc->vmd_bus_end) return; offset = ((b - sc->vmd_bus_start) << 20) + (s << 15) + (f << 12) + reg; switch (width) { case 4: return (bus_write_4(sc->vmd_regs_res[0], offset, val)); case 2: return (bus_write_2(sc->vmd_regs_res[0], offset, val)); case 1: return (bus_write_1(sc->vmd_regs_res[0], offset, val)); default: __assert_unreachable(); } } static void vmd_set_msi_bypass(device_t dev, bool enable) { uint16_t val; val = pci_read_config(dev, VMD_CONFIG, 2); if (enable) val |= VMD_BYPASS_MSI; else val &= ~VMD_BYPASS_MSI; pci_write_config(dev, VMD_CONFIG, val, 2); } static int vmd_intr(void *arg) { /* * We have nothing to do here, but we have to register some interrupt * handler to make PCI code setup and enable the MSI-X vector. */ return (FILTER_STRAY); } static int vmd_attach(device_t dev) { struct vmd_softc *sc; struct pcib_secbus *bus; struct pcib_window *w; struct vmd_type *t; struct vmd_irq *vi; uint16_t vid, did; uint32_t bar; int i, j, error; char buf[64]; sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); sc->psc.dev = dev; sc->psc.domain = PCI_DOMAINMAX - device_get_unit(dev); pci_enable_busmaster(dev); for (i = 0, j = 0; i < VMD_MAX_BAR; i++, j++) { sc->vmd_regs_rid[i] = PCIR_BAR(j); bar = pci_read_config(dev, PCIR_BAR(0), 4); if (PCI_BAR_MEM(bar) && (bar & PCIM_BAR_MEM_TYPE) == PCIM_BAR_MEM_64) j++; if ((sc->vmd_regs_res[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->vmd_regs_rid[i], RF_ACTIVE)) == NULL) { device_printf(dev, "Cannot allocate resources\n"); goto fail; } } vid = pci_get_vendor(dev); did = pci_get_device(dev); for (t = vmd_devs; t->vmd_name != NULL; t++) { if (vid == t->vmd_vid && did == t->vmd_did) break; } sc->vmd_bus_start = 0; if ((t->flags & BUS_RESTRICT) && (pci_read_config(dev, VMD_CAP, 2) & VMD_BUS_RESTRICT)) { switch (VMD_BUS_START(pci_read_config(dev, VMD_CONFIG, 2))) { case 0: sc->vmd_bus_start = 0; break; case 1: sc->vmd_bus_start = 128; break; case 2: sc->vmd_bus_start = 224; break; default: device_printf(dev, "Unknown bus offset\n"); goto fail; } } sc->vmd_bus_end = MIN(PCI_BUSMAX, sc->vmd_bus_start + (rman_get_size(sc->vmd_regs_res[0]) >> 20) - 1); bus = &sc->psc.bus; bus->sec = sc->vmd_bus_start; bus->sub = sc->vmd_bus_end; bus->dev = dev; bus->rman.rm_start = 0; bus->rman.rm_end = PCI_BUSMAX; bus->rman.rm_type = RMAN_ARRAY; snprintf(buf, sizeof(buf), "%s bus numbers", device_get_nameunit(dev)); bus->rman.rm_descr = strdup(buf, M_DEVBUF); error = rman_init(&bus->rman); if (error) { device_printf(dev, "Failed to initialize bus rman\n"); bus->rman.rm_end = 0; goto fail; } error = rman_manage_region(&bus->rman, sc->vmd_bus_start, sc->vmd_bus_end); if (error) { device_printf(dev, "Failed to add resource to bus rman\n"); goto fail; } w = &sc->psc.mem; w->rman.rm_type = RMAN_ARRAY; snprintf(buf, sizeof(buf), "%s memory window", device_get_nameunit(dev)); w->rman.rm_descr = strdup(buf, M_DEVBUF); error = rman_init(&w->rman); if (error) { device_printf(dev, "Failed to initialize memory rman\n"); w->rman.rm_end = 0; goto fail; } error = rman_manage_region(&w->rman, rman_get_start(sc->vmd_regs_res[1]), rman_get_end(sc->vmd_regs_res[1])); if (error) { device_printf(dev, "Failed to add resource to memory rman\n"); goto fail; } error = rman_manage_region(&w->rman, rman_get_start(sc->vmd_regs_res[2]) + 0x2000, rman_get_end(sc->vmd_regs_res[2])); if (error) { device_printf(dev, "Failed to add resource to memory rman\n"); goto fail; } LIST_INIT(&sc->vmd_users); sc->vmd_fist_vector = (t->flags & VECTOR_OFFSET) ? 1 : 0; sc->vmd_msix_count = pci_msix_count(dev); if (vmd_bypass_msi && (t->flags & CAN_BYPASS_MSI)) { sc->vmd_msix_count = 0; vmd_set_msi_bypass(dev, true); } else if (pci_alloc_msix(dev, &sc->vmd_msix_count) == 0) { sc->vmd_irq = malloc(sizeof(struct vmd_irq) * sc->vmd_msix_count, M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < sc->vmd_msix_count; i++) { vi = &sc->vmd_irq[i]; vi->vi_rid = i + 1; vi->vi_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &vi->vi_rid, RF_ACTIVE | RF_SHAREABLE); if (vi->vi_res == NULL) { device_printf(dev, "Failed to allocate irq\n"); goto fail; } vi->vi_irq = rman_get_start(vi->vi_res); if (bus_setup_intr(dev, vi->vi_res, INTR_TYPE_MISC | INTR_MPSAFE, vmd_intr, NULL, vi, &vi->vi_handle)) { device_printf(dev, "Can't set up interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, vi->vi_rid, vi->vi_res); vi->vi_res = NULL; goto fail; } } vmd_set_msi_bypass(dev, false); } sc->vmd_dma_tag = bus_get_dma_tag(dev); sc->psc.child = device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); fail: vmd_free(sc); return (ENXIO); } static int vmd_detach(device_t dev) { struct vmd_softc *sc = device_get_softc(dev); int error; error = bus_generic_detach(dev); if (error) return (error); error = device_delete_children(dev); if (error) return (error); if (sc->vmd_msix_count == 0) vmd_set_msi_bypass(dev, false); vmd_free(sc); return (0); } static bus_dma_tag_t vmd_get_dma_tag(device_t dev, device_t child) { struct vmd_softc *sc = device_get_softc(dev); return (sc->vmd_dma_tag); } static struct rman * vmd_get_rman(device_t dev, int type, u_int flags) { struct vmd_softc *sc = device_get_softc(dev); switch (type) { case SYS_RES_MEMORY: return (&sc->psc.mem.rman); case PCI_RES_BUS: return (&sc->psc.bus.rman); default: /* VMD hardware does not support I/O ports. */ return (NULL); } } static struct resource * vmd_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; if (type == SYS_RES_IRQ) { /* VMD hardware does not support legacy interrupts. */ if (*rid == 0) return (NULL); return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags | RF_SHAREABLE)); } res = bus_generic_rman_alloc_resource(dev, child, type, rid, start, end, count, flags); if (bootverbose && res != NULL) { switch (type) { case SYS_RES_MEMORY: device_printf(dev, "allocated memory range (%#jx-%#jx) for rid %d of %s\n", rman_get_start(res), rman_get_end(res), *rid, pcib_child_name(child)); break; case PCI_RES_BUS: device_printf(dev, "allocated bus range (%ju-%ju) for rid %d of %s\n", rman_get_start(res), rman_get_end(res), *rid, pcib_child_name(child)); break; } } return (res); } static int vmd_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { if (rman_get_type(r) == SYS_RES_IRQ) { return (bus_generic_adjust_resource(dev, child, r, start, end)); } return (bus_generic_rman_adjust_resource(dev, child, r, start, end)); } static int vmd_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { if (type == SYS_RES_IRQ) { return (bus_generic_release_resource(dev, child, type, rid, r)); } return (bus_generic_rman_release_resource(dev, child, type, rid, r)); } static int -vmd_activate_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +vmd_activate_resource(device_t dev, device_t child, struct resource *r) { - if (type == SYS_RES_IRQ) { - return (bus_generic_activate_resource(dev, child, type, rid, - r)); + if (rman_get_type(r) == SYS_RES_IRQ) { + return (bus_generic_activate_resource(dev, child, r)); } - return (bus_generic_rman_activate_resource(dev, child, type, rid, r)); + return (bus_generic_rman_activate_resource(dev, child, r)); } static int -vmd_deactivate_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +vmd_deactivate_resource(device_t dev, device_t child, struct resource *r) { - if (type == SYS_RES_IRQ) { - return (bus_generic_deactivate_resource(dev, child, type, rid, - r)); + if (rman_get_type(r) == SYS_RES_IRQ) { + return (bus_generic_deactivate_resource(dev, child, r)); } - return (bus_generic_rman_deactivate_resource(dev, child, type, rid, r)); + return (bus_generic_rman_deactivate_resource(dev, child, r)); } static struct resource * vmd_find_parent_resource(struct vmd_softc *sc, struct resource *r) { for (int i = 1; i < 3; i++) { if (rman_get_start(sc->vmd_regs_res[i]) <= rman_get_start(r) && rman_get_end(sc->vmd_regs_res[i]) >= rman_get_end(r)) return (sc->vmd_regs_res[i]); } return (NULL); } static int vmd_map_resource(device_t dev, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct vmd_softc *sc = device_get_softc(dev); struct resource_map_request args; struct resource *pres; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); pres = vmd_find_parent_resource(sc, r); if (pres == NULL) return (ENOENT); args.offset = start - rman_get_start(pres); args.length = length; return (bus_generic_map_resource(dev, child, pres, &args, map)); } static int vmd_unmap_resource(device_t dev, device_t child, struct resource *r, struct resource_map *map) { struct vmd_softc *sc = device_get_softc(dev); r = vmd_find_parent_resource(sc, r); if (r == NULL) return (ENOENT); return (bus_generic_unmap_resource(dev, child, r, map)); } static int vmd_route_interrupt(device_t dev, device_t child, int pin) { /* VMD hardware does not support legacy interrupts. */ return (PCI_INVALID_IRQ); } static int vmd_alloc_msi(device_t dev, device_t child, int count, int maxcount, int *irqs) { struct vmd_softc *sc = device_get_softc(dev); struct vmd_irq_user *u; int i, ibest = 0, best = INT_MAX; if (sc->vmd_msix_count == 0) { return (PCIB_ALLOC_MSI(device_get_parent(device_get_parent(dev)), child, count, maxcount, irqs)); } if (count > vmd_max_msi) return (ENOSPC); LIST_FOREACH(u, &sc->vmd_users, viu_link) { if (u->viu_child == child) return (EBUSY); } for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) { if (best > sc->vmd_irq[i].vi_nusers) { best = sc->vmd_irq[i].vi_nusers; ibest = i; } } u = malloc(sizeof(*u), M_DEVBUF, M_WAITOK | M_ZERO); u->viu_child = child; u->viu_vector = ibest; LIST_INSERT_HEAD(&sc->vmd_users, u, viu_link); sc->vmd_irq[ibest].vi_nusers += count; for (i = 0; i < count; i++) irqs[i] = sc->vmd_irq[ibest].vi_irq; return (0); } static int vmd_release_msi(device_t dev, device_t child, int count, int *irqs) { struct vmd_softc *sc = device_get_softc(dev); struct vmd_irq_user *u; if (sc->vmd_msix_count == 0) { return (PCIB_RELEASE_MSI(device_get_parent(device_get_parent(dev)), child, count, irqs)); } LIST_FOREACH(u, &sc->vmd_users, viu_link) { if (u->viu_child == child) { sc->vmd_irq[u->viu_vector].vi_nusers -= count; LIST_REMOVE(u, viu_link); free(u, M_DEVBUF); return (0); } } return (EINVAL); } static int vmd_alloc_msix(device_t dev, device_t child, int *irq) { struct vmd_softc *sc = device_get_softc(dev); struct vmd_irq_user *u; int i, ibest = 0, best = INT_MAX; if (sc->vmd_msix_count == 0) { return (PCIB_ALLOC_MSIX(device_get_parent(device_get_parent(dev)), child, irq)); } i = 0; LIST_FOREACH(u, &sc->vmd_users, viu_link) { if (u->viu_child == child) i++; } if (i >= vmd_max_msix) return (ENOSPC); for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) { if (best > sc->vmd_irq[i].vi_nusers) { best = sc->vmd_irq[i].vi_nusers; ibest = i; } } u = malloc(sizeof(*u), M_DEVBUF, M_WAITOK | M_ZERO); u->viu_child = child; u->viu_vector = ibest; LIST_INSERT_HEAD(&sc->vmd_users, u, viu_link); sc->vmd_irq[ibest].vi_nusers++; *irq = sc->vmd_irq[ibest].vi_irq; return (0); } static int vmd_release_msix(device_t dev, device_t child, int irq) { struct vmd_softc *sc = device_get_softc(dev); struct vmd_irq_user *u; if (sc->vmd_msix_count == 0) { return (PCIB_RELEASE_MSIX(device_get_parent(device_get_parent(dev)), child, irq)); } LIST_FOREACH(u, &sc->vmd_users, viu_link) { if (u->viu_child == child && sc->vmd_irq[u->viu_vector].vi_irq == irq) { sc->vmd_irq[u->viu_vector].vi_nusers--; LIST_REMOVE(u, viu_link); free(u, M_DEVBUF); return (0); } } return (EINVAL); } static int vmd_map_msi(device_t dev, device_t child, int irq, uint64_t *addr, uint32_t *data) { struct vmd_softc *sc = device_get_softc(dev); int i; if (sc->vmd_msix_count == 0) { return (PCIB_MAP_MSI(device_get_parent(device_get_parent(dev)), child, irq, addr, data)); } for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) { if (sc->vmd_irq[i].vi_irq == irq) break; } if (i >= sc->vmd_msix_count) return (EINVAL); *addr = MSI_INTEL_ADDR_BASE | (i << 12); *data = 0; return (0); } static device_method_t vmd_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, vmd_probe), DEVMETHOD(device_attach, vmd_attach), DEVMETHOD(device_detach, vmd_detach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), DEVMETHOD(device_shutdown, bus_generic_shutdown), /* Bus interface */ DEVMETHOD(bus_get_dma_tag, vmd_get_dma_tag), DEVMETHOD(bus_get_rman, vmd_get_rman), DEVMETHOD(bus_read_ivar, pcib_read_ivar), DEVMETHOD(bus_write_ivar, pcib_write_ivar), DEVMETHOD(bus_alloc_resource, vmd_alloc_resource), DEVMETHOD(bus_adjust_resource, vmd_adjust_resource), DEVMETHOD(bus_release_resource, vmd_release_resource), DEVMETHOD(bus_activate_resource, vmd_activate_resource), DEVMETHOD(bus_deactivate_resource, vmd_deactivate_resource), DEVMETHOD(bus_map_resource, vmd_map_resource), DEVMETHOD(bus_unmap_resource, vmd_unmap_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, pcib_maxslots), DEVMETHOD(pcib_read_config, vmd_read_config), DEVMETHOD(pcib_write_config, vmd_write_config), DEVMETHOD(pcib_route_interrupt, vmd_route_interrupt), DEVMETHOD(pcib_alloc_msi, vmd_alloc_msi), DEVMETHOD(pcib_release_msi, vmd_release_msi), DEVMETHOD(pcib_alloc_msix, vmd_alloc_msix), DEVMETHOD(pcib_release_msix, vmd_release_msix), DEVMETHOD(pcib_map_msi, vmd_map_msi), DEVMETHOD(pcib_request_feature, pcib_request_feature_allow), DEVMETHOD_END }; DEFINE_CLASS_0(pcib, vmd_pci_driver, vmd_pci_methods, sizeof(struct vmd_softc)); DRIVER_MODULE(vmd, pci, vmd_pci_driver, NULL, NULL); MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, vmd, vmd_devs, nitems(vmd_devs) - 1); diff --git a/sys/kern/bus_if.m b/sys/kern/bus_if.m index 375aeebd1835..26745be6eb34 100644 --- a/sys/kern/bus_if.m +++ b/sys/kern/bus_if.m @@ -1,979 +1,971 @@ #- # Copyright (c) 1998-2004 Doug Rabson # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # # #include #include #include /** * @defgroup BUS bus - KObj methods for drivers of devices with children * @brief A set of methods required device drivers that support * child devices. * @{ */ INTERFACE bus; # # Default implementations of some methods. # CODE { static struct resource * null_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { return (0); } static int null_remap_intr(device_t bus, device_t dev, u_int irq) { if (dev != NULL) return (BUS_REMAP_INTR(dev, NULL, irq)); return (ENXIO); } static device_t null_add_child(device_t bus, int order, const char *name, int unit) { panic("bus_add_child is not implemented"); } static int null_reset_post(device_t bus, device_t dev) { return (0); } static int null_reset_prepare(device_t bus, device_t dev) { return (0); } static struct rman * null_get_rman(device_t bus, int type, u_int flags) { return (NULL); } }; /** * @brief Print a description of a child device * * This is called from system code which prints out a description of a * device. It should describe the attachment that the child has with * the parent. For instance the TurboLaser bus prints which node the * device is attached to. See bus_generic_print_child() for more * information. * * @param _dev the device whose child is being printed * @param _child the child device to describe * * @returns the number of characters output. */ METHOD int print_child { device_t _dev; device_t _child; } DEFAULT bus_generic_print_child; /** * @brief Print a notification about an unprobed child device. * * Called for each child device that did not succeed in probing for a * driver. * * @param _dev the device whose child was being probed * @param _child the child device which failed to probe */ METHOD void probe_nomatch { device_t _dev; device_t _child; }; /** * @brief Read the value of a bus-specific attribute of a device * * This method, along with BUS_WRITE_IVAR() manages a bus-specific set * of instance variables of a child device. The intention is that * each different type of bus defines a set of appropriate instance * variables (such as ports and irqs for ISA bus etc.) * * This information could be given to the child device as a struct but * that makes it hard for a bus to add or remove variables without * forcing an edit and recompile for all drivers which may not be * possible for vendor supplied binary drivers. * * This method copies the value of an instance variable to the * location specified by @p *_result. * * @param _dev the device whose child was being examined * @param _child the child device whose instance variable is * being read * @param _index the instance variable to read * @param _result a location to receive the instance variable * value * * @retval 0 success * @retval ENOENT no such instance variable is supported by @p * _dev */ METHOD int read_ivar { device_t _dev; device_t _child; int _index; uintptr_t *_result; }; /** * @brief Write the value of a bus-specific attribute of a device * * This method sets the value of an instance variable to @p _value. * * @param _dev the device whose child was being updated * @param _child the child device whose instance variable is * being written * @param _index the instance variable to write * @param _value the value to write to that instance variable * * @retval 0 success * @retval ENOENT no such instance variable is supported by @p * _dev * @retval EINVAL the instance variable was recognised but * contains a read-only value */ METHOD int write_ivar { device_t _dev; device_t _child; int _indx; uintptr_t _value; }; /** * @brief Notify a bus that a child was deleted * * Called at the beginning of device_delete_child() to allow the parent * to teardown any bus-specific state for the child. * * @param _dev the device whose child is being deleted * @param _child the child device which is being deleted */ METHOD void child_deleted { device_t _dev; device_t _child; }; /** * @brief Notify a bus that a child was detached * * Called after the child's DEVICE_DETACH() method to allow the parent * to reclaim any resources allocated on behalf of the child. * * @param _dev the device whose child changed state * @param _child the child device which changed state */ METHOD void child_detached { device_t _dev; device_t _child; }; /** * @brief Notify a bus that a new driver was added * * Called when a new driver is added to the devclass which owns this * bus. The generic implementation of this method attempts to probe and * attach any un-matched children of the bus. * * @param _dev the device whose devclass had a new driver * added to it * @param _driver the new driver which was added */ METHOD void driver_added { device_t _dev; driver_t *_driver; } DEFAULT bus_generic_driver_added; /** * @brief Create a new child device * * For buses which use use drivers supporting DEVICE_IDENTIFY() to * enumerate their devices, this method is used to create new * device instances. The new device will be added after the last * existing child with the same order. Implementations of bus_add_child * call device_add_child_ordered to add the child and often add * a suitable ivar to the device specific to that bus. * * @param _dev the bus device which will be the parent of the * new child device * @param _order a value which is used to partially sort the * children of @p _dev - devices created using * lower values of @p _order appear first in @p * _dev's list of children * @param _name devclass name for new device or @c NULL if not * specified * @param _unit unit number for new device or @c -1 if not * specified */ METHOD device_t add_child { device_t _dev; u_int _order; const char *_name; int _unit; } DEFAULT null_add_child; /** * @brief Rescan the bus * * This method is called by a parent bridge or devctl to trigger a bus * rescan. The rescan should delete devices no longer present and * enumerate devices that have newly arrived. * * @param _dev the bus device */ METHOD int rescan { device_t _dev; } DEFAULT bus_null_rescan; /** * @brief Allocate a system resource * * This method is called by child devices of a bus to allocate resources. * The types are defined in ; the meaning of the * resource-ID field varies from bus to bus (but @p *rid == 0 is always * valid if the resource type is). If a resource was allocated and the * caller did not use the RF_ACTIVE to specify that it should be * activated immediately, the caller is responsible for calling * BUS_ACTIVATE_RESOURCE() when it actually uses the resource. * * @param _dev the parent device of @p _child * @param _child the device which is requesting an allocation * @param _type the type of resource to allocate * @param _rid a pointer to the resource identifier * @param _start hint at the start of the resource range - pass * @c 0 for any start address * @param _end hint at the end of the resource range - pass * @c ~0 for any end address * @param _count hint at the size of range required - pass @c 1 * for any size * @param _flags any extra flags to control the resource * allocation - see @c RF_XXX flags in * for details * * @returns the resource which was allocated or @c NULL if no * resource could be allocated */ METHOD struct resource * alloc_resource { device_t _dev; device_t _child; int _type; int *_rid; rman_res_t _start; rman_res_t _end; rman_res_t _count; u_int _flags; } DEFAULT null_alloc_resource; /** * @brief Activate a resource * * Activate a resource previously allocated with * BUS_ALLOC_RESOURCE(). This may enable decoding of this resource in a * device for instance. It will also establish a mapping for the resource * unless RF_UNMAPPED was set when allocating the resource. * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource - * @param _type the type of resource - * @param _rid the resource identifier * @param _r the resource to activate */ METHOD int activate_resource { device_t _dev; device_t _child; - int _type; - int _rid; struct resource *_r; }; /** * @brief Map a resource * * Allocate a mapping for a range of an active resource. The mapping * is described by a struct resource_map object. This may for instance * map a memory region into the kernel's virtual address space. * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _r the resource to map * @param _args optional attributes of the mapping * @param _map the mapping */ METHOD int map_resource { device_t _dev; device_t _child; struct resource *_r; struct resource_map_request *_args; struct resource_map *_map; } DEFAULT bus_generic_map_resource; /** * @brief Unmap a resource * * Release a mapping previously allocated with * BUS_MAP_RESOURCE(). This may for instance unmap a memory region * from the kernel's virtual address space. * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _r the resource * @param _map the mapping to release */ METHOD int unmap_resource { device_t _dev; device_t _child; struct resource *_r; struct resource_map *_map; } DEFAULT bus_generic_unmap_resource; /** * @brief Deactivate a resource * * Deactivate a resource previously allocated with * BUS_ALLOC_RESOURCE(). * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource - * @param _type the type of resource - * @param _rid the resource identifier * @param _r the resource to deactivate */ METHOD int deactivate_resource { device_t _dev; device_t _child; - int _type; - int _rid; struct resource *_r; }; /** * @brief Adjust a resource * * Adjust the start and/or end of a resource allocated by * BUS_ALLOC_RESOURCE. At least part of the new address range must overlap * with the existing address range. If the successful, the resource's range * will be adjusted to [start, end] on return. * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _res the resource to adjust * @param _start the new starting address of the resource range * @param _end the new ending address of the resource range */ METHOD int adjust_resource { device_t _dev; device_t _child; struct resource *_res; rman_res_t _start; rman_res_t _end; }; /** * @brief translate a resource value * * Give a bus driver the opportunity to translate resource ranges. If * successful, the host's view of the resource starting at @p _start is * returned in @p _newstart, otherwise an error is returned. * * @param _dev the device associated with the resource * @param _type the type of resource * @param _start the starting address of the resource range * @param _newstart the new starting address of the resource range */ METHOD int translate_resource { device_t _dev; int _type; rman_res_t _start; rman_res_t *_newstart; } DEFAULT bus_generic_translate_resource; /** * @brief Release a resource * * Free a resource allocated by the BUS_ALLOC_RESOURCE. The @p _rid * value must be the same as the one returned by BUS_ALLOC_RESOURCE() * (which is not necessarily the same as the one the client passed). * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _type the type of resource * @param _rid the resource identifier * @param _r the resource to release */ METHOD int release_resource { device_t _dev; device_t _child; int _type; int _rid; struct resource *_res; }; /** * @brief Install an interrupt handler * * This method is used to associate an interrupt handler function with * an irq resource. When the interrupt triggers, the function @p _intr * will be called with the value of @p _arg as its single * argument. The value returned in @p *_cookiep is used to cancel the * interrupt handler - the caller should save this value to use in a * future call to BUS_TEARDOWN_INTR(). * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _irq the resource representing the interrupt * @param _flags a set of bits from enum intr_type specifying * the class of interrupt * @param _intr the function to call when the interrupt * triggers * @param _arg a value to use as the single argument in calls * to @p _intr * @param _cookiep a pointer to a location to receive a cookie * value that may be used to remove the interrupt * handler */ METHOD int setup_intr { device_t _dev; device_t _child; struct resource *_irq; int _flags; driver_filter_t *_filter; driver_intr_t *_intr; void *_arg; void **_cookiep; }; /** * @brief Uninstall an interrupt handler * * This method is used to disassociate an interrupt handler function * with an irq resource. The value of @p _cookie must be the value * returned from a previous call to BUS_SETUP_INTR(). * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _irq the resource representing the interrupt * @param _cookie the cookie value returned when the interrupt * was originally registered */ METHOD int teardown_intr { device_t _dev; device_t _child; struct resource *_irq; void *_cookie; }; /** * @brief Suspend an interrupt handler * * This method is used to mark a handler as suspended in the case * that the associated device is powered down and cannot be a source * for the, typically shared, interrupt. * The value of @p _irq must be the interrupt resource passed * to a previous call to BUS_SETUP_INTR(). * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _irq the resource representing the interrupt */ METHOD int suspend_intr { device_t _dev; device_t _child; struct resource *_irq; } DEFAULT bus_generic_suspend_intr; /** * @brief Resume an interrupt handler * * This method is used to clear suspended state of a handler when * the associated device is powered up and can be an interrupt source * again. * The value of @p _irq must be the interrupt resource passed * to a previous call to BUS_SETUP_INTR(). * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _irq the resource representing the interrupt */ METHOD int resume_intr { device_t _dev; device_t _child; struct resource *_irq; } DEFAULT bus_generic_resume_intr; /** * @brief Define a resource which can be allocated with * BUS_ALLOC_RESOURCE(). * * This method is used by some buses (typically ISA) to allow a * driver to describe a resource range that it would like to * allocate. The resource defined by @p _type and @p _rid is defined * to start at @p _start and to include @p _count indices in its * range. * * @param _dev the parent device of @p _child * @param _child the device which owns the resource * @param _type the type of resource * @param _rid the resource identifier * @param _start the start of the resource range * @param _count the size of the resource range */ METHOD int set_resource { device_t _dev; device_t _child; int _type; int _rid; rman_res_t _start; rman_res_t _count; }; /** * @brief Describe a resource * * This method allows a driver to examine the range used for a given * resource without actually allocating it. * * @param _dev the parent device of @p _child * @param _child the device which owns the resource * @param _type the type of resource * @param _rid the resource identifier * @param _start the address of a location to receive the start * index of the resource range * @param _count the address of a location to receive the size * of the resource range */ METHOD int get_resource { device_t _dev; device_t _child; int _type; int _rid; rman_res_t *_startp; rman_res_t *_countp; }; /** * @brief Delete a resource. * * Use this to delete a resource (possibly one previously added with * BUS_SET_RESOURCE()). * * @param _dev the parent device of @p _child * @param _child the device which owns the resource * @param _type the type of resource * @param _rid the resource identifier */ METHOD void delete_resource { device_t _dev; device_t _child; int _type; int _rid; }; /** * @brief Return a struct resource_list. * * Used by drivers which use bus_generic_rl_alloc_resource() etc. to * implement their resource handling. It should return the resource * list of the given child device. * * @param _dev the parent device of @p _child * @param _child the device which owns the resource list */ METHOD struct resource_list * get_resource_list { device_t _dev; device_t _child; } DEFAULT bus_generic_get_resource_list; /** * @brief Return a struct rman. * * Used by drivers which use bus_generic_rman_alloc_resource() etc. to * implement their resource handling. It should return the resource * manager used for the given resource type. * * @param _dev the bus device * @param _type the resource type * @param _flags resource flags (@c RF_XXX flags in * ) */ METHOD struct rman * get_rman { device_t _dev; int _type; u_int _flags; } DEFAULT null_get_rman; /** * @brief Is the hardware described by @p _child still attached to the * system? * * This method should return 0 if the device is not present. It * should return -1 if it is present. Any errors in determining * should be returned as a normal errno value. Client drivers are to * assume that the device is present, even if there is an error * determining if it is there. Buses are to try to avoid returning * errors, but newcard will return an error if the device fails to * implement this method. * * @param _dev the parent device of @p _child * @param _child the device which is being examined */ METHOD int child_present { device_t _dev; device_t _child; } DEFAULT bus_generic_child_present; /** * @brief Returns the pnp info for this device. * * Return it as a string, appended to @p _sb * * The string must be formatted as a space-separated list of * name=value pairs. Names may only contain alphanumeric characters, * underscores ('_') and hyphens ('-'). Values can contain any * non-whitespace characters. Values containing whitespace can be * quoted with double quotes ('"'). Double quotes and backslashes in * quoted values can be escaped with backslashes ('\'). * * @param _dev the parent device of @p _child * @param _child the device which is being examined * @param _sb sbuf for results string */ METHOD int child_pnpinfo { device_t _dev; device_t _child; struct sbuf *_sb; } DEFAULT bus_generic_child_pnpinfo; /** * @brief Returns the location for this device. * * Return it as a string, appended to @p _sb * * The string must be formatted as a space-separated list of * name=value pairs. Names may only contain alphanumeric characters, * underscores ('_') and hyphens ('-'). Values can contain any * non-whitespace characters. Values containing whitespace can be * quoted with double quotes ('"'). Double quotes and backslashes in * quoted values can be escaped with backslashes ('\'). * * @param _dev the parent device of @p _child * @param _child the device which is being examined * @param _sb sbuf for results string */ METHOD int child_location { device_t _dev; device_t _child; struct sbuf *_sb; } DEFAULT bus_generic_child_location; /** * @brief Allow drivers to request that an interrupt be bound to a specific * CPU. * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _irq the resource representing the interrupt * @param _cpu the CPU to bind the interrupt to */ METHOD int bind_intr { device_t _dev; device_t _child; struct resource *_irq; int _cpu; } DEFAULT bus_generic_bind_intr; /** * @brief Allow (bus) drivers to specify the trigger mode and polarity * of the specified interrupt. * * @param _dev the bus device * @param _irq the interrupt number to modify * @param _trig the trigger mode required * @param _pol the interrupt polarity required */ METHOD int config_intr { device_t _dev; int _irq; enum intr_trigger _trig; enum intr_polarity _pol; } DEFAULT bus_generic_config_intr; /** * @brief Allow drivers to associate a description with an active * interrupt handler. * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _irq the resource representing the interrupt * @param _cookie the cookie value returned when the interrupt * was originally registered * @param _descr the description to associate with the interrupt */ METHOD int describe_intr { device_t _dev; device_t _child; struct resource *_irq; void *_cookie; const char *_descr; } DEFAULT bus_generic_describe_intr; /** * @brief Notify a (bus) driver about a child that the hints mechanism * believes it has discovered. * * The bus is responsible for then adding the child in the right order * and discovering other things about the child. The bus driver is * free to ignore this hint, to do special things, etc. It is all up * to the bus driver to interpret. * * This method is only called in response to the parent bus asking for * hinted devices to be enumerated. * * @param _dev the bus device * @param _dname the name of the device w/o unit numbers * @param _dunit the unit number of the device */ METHOD void hinted_child { device_t _dev; const char *_dname; int _dunit; }; /** * @brief Returns bus_dma_tag_t for use w/ devices on the bus. * * @param _dev the parent device of @p _child * @param _child the device to which the tag will belong */ METHOD bus_dma_tag_t get_dma_tag { device_t _dev; device_t _child; } DEFAULT bus_generic_get_dma_tag; /** * @brief Returns bus_space_tag_t for use w/ devices on the bus. * * @param _dev the parent device of @p _child * @param _child the device to which the tag will belong */ METHOD bus_space_tag_t get_bus_tag { device_t _dev; device_t _child; } DEFAULT bus_generic_get_bus_tag; /** * @brief Allow the bus to determine the unit number of a device. * * @param _dev the parent device of @p _child * @param _child the device whose unit is to be wired * @param _name the name of the device's new devclass * @param _unitp a pointer to the device's new unit value */ METHOD void hint_device_unit { device_t _dev; device_t _child; const char *_name; int *_unitp; }; /** * @brief Notify a bus that the bus pass level has been changed * * @param _dev the bus device */ METHOD void new_pass { device_t _dev; } DEFAULT bus_generic_new_pass; /** * @brief Notify a bus that specified child's IRQ should be remapped. * * @param _dev the bus device * @param _child the child device * @param _irq the irq number */ METHOD int remap_intr { device_t _dev; device_t _child; u_int _irq; } DEFAULT null_remap_intr; /** * @brief Suspend a given child * * @param _dev the parent device of @p _child * @param _child the device to suspend */ METHOD int suspend_child { device_t _dev; device_t _child; } DEFAULT bus_generic_suspend_child; /** * @brief Resume a given child * * @param _dev the parent device of @p _child * @param _child the device to resume */ METHOD int resume_child { device_t _dev; device_t _child; } DEFAULT bus_generic_resume_child; /** * @brief Get the VM domain handle for the given bus and child. * * @param _dev the bus device * @param _child the child device * @param _domain a pointer to the bus's domain handle identifier */ METHOD int get_domain { device_t _dev; device_t _child; int *_domain; } DEFAULT bus_generic_get_domain; /** * @brief Request a set of CPUs * * @param _dev the bus device * @param _child the child device * @param _op type of CPUs to request * @param _setsize the size of the set passed in _cpuset * @param _cpuset a pointer to a cpuset to receive the requested * set of CPUs */ METHOD int get_cpus { device_t _dev; device_t _child; enum cpu_sets _op; size_t _setsize; struct _cpuset *_cpuset; } DEFAULT bus_generic_get_cpus; /** * @brief Prepares the given child of the bus for reset * * Typically bus detaches or suspends children' drivers, and then * calls this method to save bus-specific information, for instance, * PCI config space, which is damaged by reset. * * The bus_helper_reset_prepare() helper is provided to ease * implementing bus reset methods. * * @param _dev the bus device * @param _child the child device */ METHOD int reset_prepare { device_t _dev; device_t _child; } DEFAULT null_reset_prepare; /** * @brief Restores the child operations after the reset * * The bus_helper_reset_post() helper is provided to ease * implementing bus reset methods. * * @param _dev the bus device * @param _child the child device */ METHOD int reset_post { device_t _dev; device_t _child; } DEFAULT null_reset_post; /** * @brief Performs reset of the child * * @param _dev the bus device * @param _child the child device * @param _flags DEVF_RESET_ flags */ METHOD int reset_child { device_t _dev; device_t _child; int _flags; }; /** * @brief Gets child's specific property * * The bus_get_property can be used to access device * specific properties stored on the bus. If _propvalue * is NULL or _size is 0, then method only returns size * of the property. * * @param _dev the bus device * @param _child the child device * @param _propname property name * @param _propvalue property value destination * @param _size property value size * * @returns size of property if successful otherwise -1 */ METHOD ssize_t get_property { device_t _dev; device_t _child; const char *_propname; void *_propvalue; size_t _size; device_property_type_t type; } DEFAULT bus_generic_get_property; /** * @brief Gets a child's full path to the device * * The get_device_path method retrieves a device's * full path to the device using one of several * locators present in the system. * * @param _bus the bus device * @param _child the child device * @param _locator locator name * @param _sb buffer loaction string */ METHOD int get_device_path { device_t _bus; device_t _child; const char *_locator; struct sbuf *_sb; } DEFAULT bus_generic_get_device_path; diff --git a/sys/kern/subr_bus.c b/sys/kern/subr_bus.c index 34712ae511e5..450fc2742dad 100644 --- a/sys/kern/subr_bus.c +++ b/sys/kern/subr_bus.c @@ -1,6109 +1,6106 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1997,1998,2003 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_bus.h" #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include SYSCTL_NODE(_hw, OID_AUTO, bus, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, NULL); SYSCTL_ROOT_NODE(OID_AUTO, dev, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, NULL); static bool disable_failed_devs = false; SYSCTL_BOOL(_hw_bus, OID_AUTO, disable_failed_devices, CTLFLAG_RWTUN, &disable_failed_devs, 0, "Do not retry attaching devices that return an error from DEVICE_ATTACH the first time"); /* * Used to attach drivers to devclasses. */ typedef struct driverlink *driverlink_t; struct driverlink { kobj_class_t driver; TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ int pass; int flags; #define DL_DEFERRED_PROBE 1 /* Probe deferred on this */ TAILQ_ENTRY(driverlink) passlink; }; /* * Forward declarations */ typedef TAILQ_HEAD(devclass_list, devclass) devclass_list_t; typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; typedef TAILQ_HEAD(device_list, _device) device_list_t; struct devclass { TAILQ_ENTRY(devclass) link; devclass_t parent; /* parent in devclass hierarchy */ driver_list_t drivers; /* bus devclasses store drivers for bus */ char *name; device_t *devices; /* array of devices indexed by unit */ int maxunit; /* size of devices array */ int flags; #define DC_HAS_CHILDREN 1 struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; }; /** * @brief Implementation of _device. * * The structure is named "_device" instead of "device" to avoid type confusion * caused by other subsystems defining a (struct device). */ struct _device { /* * A device is a kernel object. The first field must be the * current ops table for the object. */ KOBJ_FIELDS; /* * Device hierarchy. */ TAILQ_ENTRY(_device) link; /**< list of devices in parent */ TAILQ_ENTRY(_device) devlink; /**< global device list membership */ device_t parent; /**< parent of this device */ device_list_t children; /**< list of child devices */ /* * Details of this device. */ driver_t *driver; /**< current driver */ devclass_t devclass; /**< current device class */ int unit; /**< current unit number */ char* nameunit; /**< name+unit e.g. foodev0 */ char* desc; /**< driver specific description */ u_int busy; /**< count of calls to device_busy() */ device_state_t state; /**< current device state */ uint32_t devflags; /**< api level flags for device_get_flags() */ u_int flags; /**< internal device flags */ u_int order; /**< order from device_add_child_ordered() */ void *ivars; /**< instance variables */ void *softc; /**< current driver's variables */ struct sysctl_ctx_list sysctl_ctx; /**< state for sysctl variables */ struct sysctl_oid *sysctl_tree; /**< state for sysctl variables */ }; static MALLOC_DEFINE(M_BUS, "bus", "Bus data structures"); static MALLOC_DEFINE(M_BUS_SC, "bus-sc", "Bus data structures, softc"); EVENTHANDLER_LIST_DEFINE(device_attach); EVENTHANDLER_LIST_DEFINE(device_detach); EVENTHANDLER_LIST_DEFINE(device_nomatch); EVENTHANDLER_LIST_DEFINE(dev_lookup); static void devctl2_init(void); static bool device_frozen; #define DRIVERNAME(d) ((d)? d->name : "no driver") #define DEVCLANAME(d) ((d)? d->name : "no devclass") #ifdef BUS_DEBUG static int bus_debug = 1; SYSCTL_INT(_debug, OID_AUTO, bus_debug, CTLFLAG_RWTUN, &bus_debug, 0, "Bus debug level"); #define PDEBUG(a) if (bus_debug) {printf("%s:%d: ", __func__, __LINE__), printf a; printf("\n");} #define DEVICENAME(d) ((d)? device_get_name(d): "no device") /** * Produce the indenting, indent*2 spaces plus a '.' ahead of that to * prevent syslog from deleting initial spaces */ #define indentprintf(p) do { int iJ; printf("."); for (iJ=0; iJparent ? dc->parent->name : ""; break; default: return (EINVAL); } return (SYSCTL_OUT_STR(req, value)); } static void devclass_sysctl_init(devclass_t dc) { if (dc->sysctl_tree != NULL) return; sysctl_ctx_init(&dc->sysctl_ctx); dc->sysctl_tree = SYSCTL_ADD_NODE(&dc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev), OID_AUTO, dc->name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); SYSCTL_ADD_PROC(&dc->sysctl_ctx, SYSCTL_CHILDREN(dc->sysctl_tree), OID_AUTO, "%parent", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dc, DEVCLASS_SYSCTL_PARENT, devclass_sysctl_handler, "A", "parent class"); } enum { DEVICE_SYSCTL_DESC, DEVICE_SYSCTL_DRIVER, DEVICE_SYSCTL_LOCATION, DEVICE_SYSCTL_PNPINFO, DEVICE_SYSCTL_PARENT, }; static int device_sysctl_handler(SYSCTL_HANDLER_ARGS) { struct sbuf sb; device_t dev = (device_t)arg1; int error; sbuf_new_for_sysctl(&sb, NULL, 1024, req); sbuf_clear_flags(&sb, SBUF_INCLUDENUL); bus_topo_lock(); switch (arg2) { case DEVICE_SYSCTL_DESC: sbuf_cat(&sb, dev->desc ? dev->desc : ""); break; case DEVICE_SYSCTL_DRIVER: sbuf_cat(&sb, dev->driver ? dev->driver->name : ""); break; case DEVICE_SYSCTL_LOCATION: bus_child_location(dev, &sb); break; case DEVICE_SYSCTL_PNPINFO: bus_child_pnpinfo(dev, &sb); break; case DEVICE_SYSCTL_PARENT: sbuf_cat(&sb, dev->parent ? dev->parent->nameunit : ""); break; default: error = EINVAL; goto out; } error = sbuf_finish(&sb); out: bus_topo_unlock(); sbuf_delete(&sb); return (error); } static void device_sysctl_init(device_t dev) { devclass_t dc = dev->devclass; int domain; if (dev->sysctl_tree != NULL) return; devclass_sysctl_init(dc); sysctl_ctx_init(&dev->sysctl_ctx); dev->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&dev->sysctl_ctx, SYSCTL_CHILDREN(dc->sysctl_tree), OID_AUTO, dev->nameunit + strlen(dc->name), CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "", "device_index"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%desc", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, DEVICE_SYSCTL_DESC, device_sysctl_handler, "A", "device description"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%driver", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, DEVICE_SYSCTL_DRIVER, device_sysctl_handler, "A", "device driver name"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%location", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, DEVICE_SYSCTL_LOCATION, device_sysctl_handler, "A", "device location relative to parent"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%pnpinfo", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, DEVICE_SYSCTL_PNPINFO, device_sysctl_handler, "A", "device identification"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%parent", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, DEVICE_SYSCTL_PARENT, device_sysctl_handler, "A", "parent device"); if (bus_get_domain(dev, &domain) == 0) SYSCTL_ADD_INT(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%domain", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, domain, "NUMA domain"); } static void device_sysctl_update(device_t dev) { devclass_t dc = dev->devclass; if (dev->sysctl_tree == NULL) return; sysctl_rename_oid(dev->sysctl_tree, dev->nameunit + strlen(dc->name)); } static void device_sysctl_fini(device_t dev) { if (dev->sysctl_tree == NULL) return; sysctl_ctx_free(&dev->sysctl_ctx); dev->sysctl_tree = NULL; } static struct device_list bus_data_devices; static int bus_data_generation = 1; static kobj_method_t null_methods[] = { KOBJMETHOD_END }; DEFINE_CLASS(null, null_methods, 0); void bus_topo_assert(void) { GIANT_REQUIRED; } struct mtx * bus_topo_mtx(void) { return (&Giant); } void bus_topo_lock(void) { mtx_lock(bus_topo_mtx()); } void bus_topo_unlock(void) { mtx_unlock(bus_topo_mtx()); } /* * Bus pass implementation */ static driver_list_t passes = TAILQ_HEAD_INITIALIZER(passes); int bus_current_pass = BUS_PASS_ROOT; /** * @internal * @brief Register the pass level of a new driver attachment * * Register a new driver attachment's pass level. If no driver * attachment with the same pass level has been added, then @p new * will be added to the global passes list. * * @param new the new driver attachment */ static void driver_register_pass(struct driverlink *new) { struct driverlink *dl; /* We only consider pass numbers during boot. */ if (bus_current_pass == BUS_PASS_DEFAULT) return; /* * Walk the passes list. If we already know about this pass * then there is nothing to do. If we don't, then insert this * driver link into the list. */ TAILQ_FOREACH(dl, &passes, passlink) { if (dl->pass < new->pass) continue; if (dl->pass == new->pass) return; TAILQ_INSERT_BEFORE(dl, new, passlink); return; } TAILQ_INSERT_TAIL(&passes, new, passlink); } /** * @brief Raise the current bus pass * * Raise the current bus pass level to @p pass. Call the BUS_NEW_PASS() * method on the root bus to kick off a new device tree scan for each * new pass level that has at least one driver. */ void bus_set_pass(int pass) { struct driverlink *dl; if (bus_current_pass > pass) panic("Attempt to lower bus pass level"); TAILQ_FOREACH(dl, &passes, passlink) { /* Skip pass values below the current pass level. */ if (dl->pass <= bus_current_pass) continue; /* * Bail once we hit a driver with a pass level that is * too high. */ if (dl->pass > pass) break; /* * Raise the pass level to the next level and rescan * the tree. */ bus_current_pass = dl->pass; BUS_NEW_PASS(root_bus); } /* * If there isn't a driver registered for the requested pass, * then bus_current_pass might still be less than 'pass'. Set * it to 'pass' in that case. */ if (bus_current_pass < pass) bus_current_pass = pass; KASSERT(bus_current_pass == pass, ("Failed to update bus pass level")); } /* * Devclass implementation */ static devclass_list_t devclasses = TAILQ_HEAD_INITIALIZER(devclasses); /** * @internal * @brief Find or create a device class * * If a device class with the name @p classname exists, return it, * otherwise if @p create is non-zero create and return a new device * class. * * If @p parentname is non-NULL, the parent of the devclass is set to * the devclass of that name. * * @param classname the devclass name to find or create * @param parentname the parent devclass name or @c NULL * @param create non-zero to create a devclass */ static devclass_t devclass_find_internal(const char *classname, const char *parentname, int create) { devclass_t dc; PDEBUG(("looking for %s", classname)); if (!classname) return (NULL); TAILQ_FOREACH(dc, &devclasses, link) { if (!strcmp(dc->name, classname)) break; } if (create && !dc) { PDEBUG(("creating %s", classname)); dc = malloc(sizeof(struct devclass) + strlen(classname) + 1, M_BUS, M_NOWAIT | M_ZERO); if (!dc) return (NULL); dc->parent = NULL; dc->name = (char*) (dc + 1); strcpy(dc->name, classname); TAILQ_INIT(&dc->drivers); TAILQ_INSERT_TAIL(&devclasses, dc, link); bus_data_generation_update(); } /* * If a parent class is specified, then set that as our parent so * that this devclass will support drivers for the parent class as * well. If the parent class has the same name don't do this though * as it creates a cycle that can trigger an infinite loop in * device_probe_child() if a device exists for which there is no * suitable driver. */ if (parentname && dc && !dc->parent && strcmp(classname, parentname) != 0) { dc->parent = devclass_find_internal(parentname, NULL, TRUE); dc->parent->flags |= DC_HAS_CHILDREN; } return (dc); } /** * @brief Create a device class * * If a device class with the name @p classname exists, return it, * otherwise create and return a new device class. * * @param classname the devclass name to find or create */ devclass_t devclass_create(const char *classname) { return (devclass_find_internal(classname, NULL, TRUE)); } /** * @brief Find a device class * * If a device class with the name @p classname exists, return it, * otherwise return @c NULL. * * @param classname the devclass name to find */ devclass_t devclass_find(const char *classname) { return (devclass_find_internal(classname, NULL, FALSE)); } /** * @brief Register that a device driver has been added to a devclass * * Register that a device driver has been added to a devclass. This * is called by devclass_add_driver to accomplish the recursive * notification of all the children classes of dc, as well as dc. * Each layer will have BUS_DRIVER_ADDED() called for all instances of * the devclass. * * We do a full search here of the devclass list at each iteration * level to save storing children-lists in the devclass structure. If * we ever move beyond a few dozen devices doing this, we may need to * reevaluate... * * @param dc the devclass to edit * @param driver the driver that was just added */ static void devclass_driver_added(devclass_t dc, driver_t *driver) { devclass_t parent; int i; /* * Call BUS_DRIVER_ADDED for any existing buses in this class. */ for (i = 0; i < dc->maxunit; i++) if (dc->devices[i] && device_is_attached(dc->devices[i])) BUS_DRIVER_ADDED(dc->devices[i], driver); /* * Walk through the children classes. Since we only keep a * single parent pointer around, we walk the entire list of * devclasses looking for children. We set the * DC_HAS_CHILDREN flag when a child devclass is created on * the parent, so we only walk the list for those devclasses * that have children. */ if (!(dc->flags & DC_HAS_CHILDREN)) return; parent = dc; TAILQ_FOREACH(dc, &devclasses, link) { if (dc->parent == parent) devclass_driver_added(dc, driver); } } static void device_handle_nomatch(device_t dev) { BUS_PROBE_NOMATCH(dev->parent, dev); EVENTHANDLER_DIRECT_INVOKE(device_nomatch, dev); dev->flags |= DF_DONENOMATCH; } /** * @brief Add a device driver to a device class * * Add a device driver to a devclass. This is normally called * automatically by DRIVER_MODULE(). The BUS_DRIVER_ADDED() method of * all devices in the devclass will be called to allow them to attempt * to re-probe any unmatched children. * * @param dc the devclass to edit * @param driver the driver to register */ int devclass_add_driver(devclass_t dc, driver_t *driver, int pass, devclass_t *dcp) { driverlink_t dl; devclass_t child_dc; const char *parentname; PDEBUG(("%s", DRIVERNAME(driver))); /* Don't allow invalid pass values. */ if (pass <= BUS_PASS_ROOT) return (EINVAL); dl = malloc(sizeof *dl, M_BUS, M_NOWAIT|M_ZERO); if (!dl) return (ENOMEM); /* * Compile the driver's methods. Also increase the reference count * so that the class doesn't get freed when the last instance * goes. This means we can safely use static methods and avoids a * double-free in devclass_delete_driver. */ kobj_class_compile((kobj_class_t) driver); /* * If the driver has any base classes, make the * devclass inherit from the devclass of the driver's * first base class. This will allow the system to * search for drivers in both devclasses for children * of a device using this driver. */ if (driver->baseclasses) parentname = driver->baseclasses[0]->name; else parentname = NULL; child_dc = devclass_find_internal(driver->name, parentname, TRUE); if (dcp != NULL) *dcp = child_dc; dl->driver = driver; TAILQ_INSERT_TAIL(&dc->drivers, dl, link); driver->refs++; /* XXX: kobj_mtx */ dl->pass = pass; driver_register_pass(dl); if (device_frozen) { dl->flags |= DL_DEFERRED_PROBE; } else { devclass_driver_added(dc, driver); } bus_data_generation_update(); return (0); } /** * @brief Register that a device driver has been deleted from a devclass * * Register that a device driver has been removed from a devclass. * This is called by devclass_delete_driver to accomplish the * recursive notification of all the children classes of busclass, as * well as busclass. Each layer will attempt to detach the driver * from any devices that are children of the bus's devclass. The function * will return an error if a device fails to detach. * * We do a full search here of the devclass list at each iteration * level to save storing children-lists in the devclass structure. If * we ever move beyond a few dozen devices doing this, we may need to * reevaluate... * * @param busclass the devclass of the parent bus * @param dc the devclass of the driver being deleted * @param driver the driver being deleted */ static int devclass_driver_deleted(devclass_t busclass, devclass_t dc, driver_t *driver) { devclass_t parent; device_t dev; int error, i; /* * Disassociate from any devices. We iterate through all the * devices in the devclass of the driver and detach any which are * using the driver and which have a parent in the devclass which * we are deleting from. * * Note that since a driver can be in multiple devclasses, we * should not detach devices which are not children of devices in * the affected devclass. * * If we're frozen, we don't generate NOMATCH events. Mark to * generate later. */ for (i = 0; i < dc->maxunit; i++) { if (dc->devices[i]) { dev = dc->devices[i]; if (dev->driver == driver && dev->parent && dev->parent->devclass == busclass) { if ((error = device_detach(dev)) != 0) return (error); if (device_frozen) { dev->flags &= ~DF_DONENOMATCH; dev->flags |= DF_NEEDNOMATCH; } else { device_handle_nomatch(dev); } } } } /* * Walk through the children classes. Since we only keep a * single parent pointer around, we walk the entire list of * devclasses looking for children. We set the * DC_HAS_CHILDREN flag when a child devclass is created on * the parent, so we only walk the list for those devclasses * that have children. */ if (!(busclass->flags & DC_HAS_CHILDREN)) return (0); parent = busclass; TAILQ_FOREACH(busclass, &devclasses, link) { if (busclass->parent == parent) { error = devclass_driver_deleted(busclass, dc, driver); if (error) return (error); } } return (0); } /** * @brief Delete a device driver from a device class * * Delete a device driver from a devclass. This is normally called * automatically by DRIVER_MODULE(). * * If the driver is currently attached to any devices, * devclass_delete_driver() will first attempt to detach from each * device. If one of the detach calls fails, the driver will not be * deleted. * * @param dc the devclass to edit * @param driver the driver to unregister */ int devclass_delete_driver(devclass_t busclass, driver_t *driver) { devclass_t dc = devclass_find(driver->name); driverlink_t dl; int error; PDEBUG(("%s from devclass %s", driver->name, DEVCLANAME(busclass))); if (!dc) return (0); /* * Find the link structure in the bus' list of drivers. */ TAILQ_FOREACH(dl, &busclass->drivers, link) { if (dl->driver == driver) break; } if (!dl) { PDEBUG(("%s not found in %s list", driver->name, busclass->name)); return (ENOENT); } error = devclass_driver_deleted(busclass, dc, driver); if (error != 0) return (error); TAILQ_REMOVE(&busclass->drivers, dl, link); free(dl, M_BUS); /* XXX: kobj_mtx */ driver->refs--; if (driver->refs == 0) kobj_class_free((kobj_class_t) driver); bus_data_generation_update(); return (0); } /** * @brief Quiesces a set of device drivers from a device class * * Quiesce a device driver from a devclass. This is normally called * automatically by DRIVER_MODULE(). * * If the driver is currently attached to any devices, * devclass_quiesece_driver() will first attempt to quiesce each * device. * * @param dc the devclass to edit * @param driver the driver to unregister */ static int devclass_quiesce_driver(devclass_t busclass, driver_t *driver) { devclass_t dc = devclass_find(driver->name); driverlink_t dl; device_t dev; int i; int error; PDEBUG(("%s from devclass %s", driver->name, DEVCLANAME(busclass))); if (!dc) return (0); /* * Find the link structure in the bus' list of drivers. */ TAILQ_FOREACH(dl, &busclass->drivers, link) { if (dl->driver == driver) break; } if (!dl) { PDEBUG(("%s not found in %s list", driver->name, busclass->name)); return (ENOENT); } /* * Quiesce all devices. We iterate through all the devices in * the devclass of the driver and quiesce any which are using * the driver and which have a parent in the devclass which we * are quiescing. * * Note that since a driver can be in multiple devclasses, we * should not quiesce devices which are not children of * devices in the affected devclass. */ for (i = 0; i < dc->maxunit; i++) { if (dc->devices[i]) { dev = dc->devices[i]; if (dev->driver == driver && dev->parent && dev->parent->devclass == busclass) { if ((error = device_quiesce(dev)) != 0) return (error); } } } return (0); } /** * @internal */ static driverlink_t devclass_find_driver_internal(devclass_t dc, const char *classname) { driverlink_t dl; PDEBUG(("%s in devclass %s", classname, DEVCLANAME(dc))); TAILQ_FOREACH(dl, &dc->drivers, link) { if (!strcmp(dl->driver->name, classname)) return (dl); } PDEBUG(("not found")); return (NULL); } /** * @brief Return the name of the devclass */ const char * devclass_get_name(devclass_t dc) { return (dc->name); } /** * @brief Find a device given a unit number * * @param dc the devclass to search * @param unit the unit number to search for * * @returns the device with the given unit number or @c * NULL if there is no such device */ device_t devclass_get_device(devclass_t dc, int unit) { if (dc == NULL || unit < 0 || unit >= dc->maxunit) return (NULL); return (dc->devices[unit]); } /** * @brief Find the softc field of a device given a unit number * * @param dc the devclass to search * @param unit the unit number to search for * * @returns the softc field of the device with the given * unit number or @c NULL if there is no such * device */ void * devclass_get_softc(devclass_t dc, int unit) { device_t dev; dev = devclass_get_device(dc, unit); if (!dev) return (NULL); return (device_get_softc(dev)); } /** * @brief Get a list of devices in the devclass * * An array containing a list of all the devices in the given devclass * is allocated and returned in @p *devlistp. The number of devices * in the array is returned in @p *devcountp. The caller should free * the array using @c free(p, M_TEMP), even if @p *devcountp is 0. * * @param dc the devclass to examine * @param devlistp points at location for array pointer return * value * @param devcountp points at location for array size return value * * @retval 0 success * @retval ENOMEM the array allocation failed */ int devclass_get_devices(devclass_t dc, device_t **devlistp, int *devcountp) { int count, i; device_t *list; count = devclass_get_count(dc); list = malloc(count * sizeof(device_t), M_TEMP, M_NOWAIT|M_ZERO); if (!list) return (ENOMEM); count = 0; for (i = 0; i < dc->maxunit; i++) { if (dc->devices[i]) { list[count] = dc->devices[i]; count++; } } *devlistp = list; *devcountp = count; return (0); } /** * @brief Get a list of drivers in the devclass * * An array containing a list of pointers to all the drivers in the * given devclass is allocated and returned in @p *listp. The number * of drivers in the array is returned in @p *countp. The caller should * free the array using @c free(p, M_TEMP). * * @param dc the devclass to examine * @param listp gives location for array pointer return value * @param countp gives location for number of array elements * return value * * @retval 0 success * @retval ENOMEM the array allocation failed */ int devclass_get_drivers(devclass_t dc, driver_t ***listp, int *countp) { driverlink_t dl; driver_t **list; int count; count = 0; TAILQ_FOREACH(dl, &dc->drivers, link) count++; list = malloc(count * sizeof(driver_t *), M_TEMP, M_NOWAIT); if (list == NULL) return (ENOMEM); count = 0; TAILQ_FOREACH(dl, &dc->drivers, link) { list[count] = dl->driver; count++; } *listp = list; *countp = count; return (0); } /** * @brief Get the number of devices in a devclass * * @param dc the devclass to examine */ int devclass_get_count(devclass_t dc) { int count, i; count = 0; for (i = 0; i < dc->maxunit; i++) if (dc->devices[i]) count++; return (count); } /** * @brief Get the maximum unit number used in a devclass * * Note that this is one greater than the highest currently-allocated * unit. If a null devclass_t is passed in, -1 is returned to indicate * that not even the devclass has been allocated yet. * * @param dc the devclass to examine */ int devclass_get_maxunit(devclass_t dc) { if (dc == NULL) return (-1); return (dc->maxunit); } /** * @brief Find a free unit number in a devclass * * This function searches for the first unused unit number greater * that or equal to @p unit. * * @param dc the devclass to examine * @param unit the first unit number to check */ int devclass_find_free_unit(devclass_t dc, int unit) { if (dc == NULL) return (unit); while (unit < dc->maxunit && dc->devices[unit] != NULL) unit++; return (unit); } /** * @brief Set the parent of a devclass * * The parent class is normally initialised automatically by * DRIVER_MODULE(). * * @param dc the devclass to edit * @param pdc the new parent devclass */ void devclass_set_parent(devclass_t dc, devclass_t pdc) { dc->parent = pdc; } /** * @brief Get the parent of a devclass * * @param dc the devclass to examine */ devclass_t devclass_get_parent(devclass_t dc) { return (dc->parent); } struct sysctl_ctx_list * devclass_get_sysctl_ctx(devclass_t dc) { return (&dc->sysctl_ctx); } struct sysctl_oid * devclass_get_sysctl_tree(devclass_t dc) { return (dc->sysctl_tree); } /** * @internal * @brief Allocate a unit number * * On entry, @p *unitp is the desired unit number (or @c -1 if any * will do). The allocated unit number is returned in @p *unitp. * @param dc the devclass to allocate from * @param unitp points at the location for the allocated unit * number * * @retval 0 success * @retval EEXIST the requested unit number is already allocated * @retval ENOMEM memory allocation failure */ static int devclass_alloc_unit(devclass_t dc, device_t dev, int *unitp) { const char *s; int unit = *unitp; PDEBUG(("unit %d in devclass %s", unit, DEVCLANAME(dc))); /* Ask the parent bus if it wants to wire this device. */ if (unit == -1) BUS_HINT_DEVICE_UNIT(device_get_parent(dev), dev, dc->name, &unit); /* If we were given a wired unit number, check for existing device */ /* XXX imp XXX */ if (unit != -1) { if (unit >= 0 && unit < dc->maxunit && dc->devices[unit] != NULL) { if (bootverbose) printf("%s: %s%d already exists; skipping it\n", dc->name, dc->name, *unitp); return (EEXIST); } } else { /* Unwired device, find the next available slot for it */ unit = 0; for (unit = 0;; unit++) { /* If this device slot is already in use, skip it. */ if (unit < dc->maxunit && dc->devices[unit] != NULL) continue; /* If there is an "at" hint for a unit then skip it. */ if (resource_string_value(dc->name, unit, "at", &s) == 0) continue; break; } } /* * We've selected a unit beyond the length of the table, so let's * extend the table to make room for all units up to and including * this one. */ if (unit >= dc->maxunit) { device_t *newlist, *oldlist; int newsize; oldlist = dc->devices; newsize = roundup((unit + 1), MAX(1, MINALLOCSIZE / sizeof(device_t))); newlist = malloc(sizeof(device_t) * newsize, M_BUS, M_NOWAIT); if (!newlist) return (ENOMEM); if (oldlist != NULL) bcopy(oldlist, newlist, sizeof(device_t) * dc->maxunit); bzero(newlist + dc->maxunit, sizeof(device_t) * (newsize - dc->maxunit)); dc->devices = newlist; dc->maxunit = newsize; if (oldlist != NULL) free(oldlist, M_BUS); } PDEBUG(("now: unit %d in devclass %s", unit, DEVCLANAME(dc))); *unitp = unit; return (0); } /** * @internal * @brief Add a device to a devclass * * A unit number is allocated for the device (using the device's * preferred unit number if any) and the device is registered in the * devclass. This allows the device to be looked up by its unit * number, e.g. by decoding a dev_t minor number. * * @param dc the devclass to add to * @param dev the device to add * * @retval 0 success * @retval EEXIST the requested unit number is already allocated * @retval ENOMEM memory allocation failure */ static int devclass_add_device(devclass_t dc, device_t dev) { int buflen, error; PDEBUG(("%s in devclass %s", DEVICENAME(dev), DEVCLANAME(dc))); buflen = snprintf(NULL, 0, "%s%d$", dc->name, INT_MAX); if (buflen < 0) return (ENOMEM); dev->nameunit = malloc(buflen, M_BUS, M_NOWAIT|M_ZERO); if (!dev->nameunit) return (ENOMEM); if ((error = devclass_alloc_unit(dc, dev, &dev->unit)) != 0) { free(dev->nameunit, M_BUS); dev->nameunit = NULL; return (error); } dc->devices[dev->unit] = dev; dev->devclass = dc; snprintf(dev->nameunit, buflen, "%s%d", dc->name, dev->unit); return (0); } /** * @internal * @brief Delete a device from a devclass * * The device is removed from the devclass's device list and its unit * number is freed. * @param dc the devclass to delete from * @param dev the device to delete * * @retval 0 success */ static int devclass_delete_device(devclass_t dc, device_t dev) { if (!dc || !dev) return (0); PDEBUG(("%s in devclass %s", DEVICENAME(dev), DEVCLANAME(dc))); if (dev->devclass != dc || dc->devices[dev->unit] != dev) panic("devclass_delete_device: inconsistent device class"); dc->devices[dev->unit] = NULL; if (dev->flags & DF_WILDCARD) dev->unit = -1; dev->devclass = NULL; free(dev->nameunit, M_BUS); dev->nameunit = NULL; return (0); } /** * @internal * @brief Make a new device and add it as a child of @p parent * * @param parent the parent of the new device * @param name the devclass name of the new device or @c NULL * to leave the devclass unspecified * @parem unit the unit number of the new device of @c -1 to * leave the unit number unspecified * * @returns the new device */ static device_t make_device(device_t parent, const char *name, int unit) { device_t dev; devclass_t dc; PDEBUG(("%s at %s as unit %d", name, DEVICENAME(parent), unit)); if (name) { dc = devclass_find_internal(name, NULL, TRUE); if (!dc) { printf("make_device: can't find device class %s\n", name); return (NULL); } } else { dc = NULL; } dev = malloc(sizeof(*dev), M_BUS, M_NOWAIT|M_ZERO); if (!dev) return (NULL); dev->parent = parent; TAILQ_INIT(&dev->children); kobj_init((kobj_t) dev, &null_class); dev->driver = NULL; dev->devclass = NULL; dev->unit = unit; dev->nameunit = NULL; dev->desc = NULL; dev->busy = 0; dev->devflags = 0; dev->flags = DF_ENABLED; dev->order = 0; if (unit == -1) dev->flags |= DF_WILDCARD; if (name) { dev->flags |= DF_FIXEDCLASS; if (devclass_add_device(dc, dev)) { kobj_delete((kobj_t) dev, M_BUS); return (NULL); } } if (parent != NULL && device_has_quiet_children(parent)) dev->flags |= DF_QUIET | DF_QUIET_CHILDREN; dev->ivars = NULL; dev->softc = NULL; dev->state = DS_NOTPRESENT; TAILQ_INSERT_TAIL(&bus_data_devices, dev, devlink); bus_data_generation_update(); return (dev); } /** * @internal * @brief Print a description of a device. */ static int device_print_child(device_t dev, device_t child) { int retval = 0; if (device_is_alive(child)) retval += BUS_PRINT_CHILD(dev, child); else retval += device_printf(child, " not found\n"); return (retval); } /** * @brief Create a new device * * This creates a new device and adds it as a child of an existing * parent device. The new device will be added after the last existing * child with order zero. * * @param dev the device which will be the parent of the * new child device * @param name devclass name for new device or @c NULL if not * specified * @param unit unit number for new device or @c -1 if not * specified * * @returns the new device */ device_t device_add_child(device_t dev, const char *name, int unit) { return (device_add_child_ordered(dev, 0, name, unit)); } /** * @brief Create a new device * * This creates a new device and adds it as a child of an existing * parent device. The new device will be added after the last existing * child with the same order. * * @param dev the device which will be the parent of the * new child device * @param order a value which is used to partially sort the * children of @p dev - devices created using * lower values of @p order appear first in @p * dev's list of children * @param name devclass name for new device or @c NULL if not * specified * @param unit unit number for new device or @c -1 if not * specified * * @returns the new device */ device_t device_add_child_ordered(device_t dev, u_int order, const char *name, int unit) { device_t child; device_t place; PDEBUG(("%s at %s with order %u as unit %d", name, DEVICENAME(dev), order, unit)); KASSERT(name != NULL || unit == -1, ("child device with wildcard name and specific unit number")); child = make_device(dev, name, unit); if (child == NULL) return (child); child->order = order; TAILQ_FOREACH(place, &dev->children, link) { if (place->order > order) break; } if (place) { /* * The device 'place' is the first device whose order is * greater than the new child. */ TAILQ_INSERT_BEFORE(place, child, link); } else { /* * The new child's order is greater or equal to the order of * any existing device. Add the child to the tail of the list. */ TAILQ_INSERT_TAIL(&dev->children, child, link); } bus_data_generation_update(); return (child); } /** * @brief Delete a device * * This function deletes a device along with all of its children. If * the device currently has a driver attached to it, the device is * detached first using device_detach(). * * @param dev the parent device * @param child the device to delete * * @retval 0 success * @retval non-zero a unit error code describing the error */ int device_delete_child(device_t dev, device_t child) { int error; device_t grandchild; PDEBUG(("%s from %s", DEVICENAME(child), DEVICENAME(dev))); /* detach parent before deleting children, if any */ if ((error = device_detach(child)) != 0) return (error); /* remove children second */ while ((grandchild = TAILQ_FIRST(&child->children)) != NULL) { error = device_delete_child(child, grandchild); if (error) return (error); } if (child->devclass) devclass_delete_device(child->devclass, child); if (child->parent) BUS_CHILD_DELETED(dev, child); TAILQ_REMOVE(&dev->children, child, link); TAILQ_REMOVE(&bus_data_devices, child, devlink); kobj_delete((kobj_t) child, M_BUS); bus_data_generation_update(); return (0); } /** * @brief Delete all children devices of the given device, if any. * * This function deletes all children devices of the given device, if * any, using the device_delete_child() function for each device it * finds. If a child device cannot be deleted, this function will * return an error code. * * @param dev the parent device * * @retval 0 success * @retval non-zero a device would not detach */ int device_delete_children(device_t dev) { device_t child; int error; PDEBUG(("Deleting all children of %s", DEVICENAME(dev))); error = 0; while ((child = TAILQ_FIRST(&dev->children)) != NULL) { error = device_delete_child(dev, child); if (error) { PDEBUG(("Failed deleting %s", DEVICENAME(child))); break; } } return (error); } /** * @brief Find a device given a unit number * * This is similar to devclass_get_devices() but only searches for * devices which have @p dev as a parent. * * @param dev the parent device to search * @param unit the unit number to search for. If the unit is -1, * return the first child of @p dev which has name * @p classname (that is, the one with the lowest unit.) * * @returns the device with the given unit number or @c * NULL if there is no such device */ device_t device_find_child(device_t dev, const char *classname, int unit) { devclass_t dc; device_t child; dc = devclass_find(classname); if (!dc) return (NULL); if (unit != -1) { child = devclass_get_device(dc, unit); if (child && child->parent == dev) return (child); } else { for (unit = 0; unit < devclass_get_maxunit(dc); unit++) { child = devclass_get_device(dc, unit); if (child && child->parent == dev) return (child); } } return (NULL); } /** * @internal */ static driverlink_t first_matching_driver(devclass_t dc, device_t dev) { if (dev->devclass) return (devclass_find_driver_internal(dc, dev->devclass->name)); return (TAILQ_FIRST(&dc->drivers)); } /** * @internal */ static driverlink_t next_matching_driver(devclass_t dc, device_t dev, driverlink_t last) { if (dev->devclass) { driverlink_t dl; for (dl = TAILQ_NEXT(last, link); dl; dl = TAILQ_NEXT(dl, link)) if (!strcmp(dev->devclass->name, dl->driver->name)) return (dl); return (NULL); } return (TAILQ_NEXT(last, link)); } /** * @internal */ int device_probe_child(device_t dev, device_t child) { devclass_t dc; driverlink_t best = NULL; driverlink_t dl; int result, pri = 0; /* We should preserve the devclass (or lack of) set by the bus. */ int hasclass = (child->devclass != NULL); bus_topo_assert(); dc = dev->devclass; if (!dc) panic("device_probe_child: parent device has no devclass"); /* * If the state is already probed, then return. */ if (child->state == DS_ALIVE) return (0); for (; dc; dc = dc->parent) { for (dl = first_matching_driver(dc, child); dl; dl = next_matching_driver(dc, child, dl)) { /* If this driver's pass is too high, then ignore it. */ if (dl->pass > bus_current_pass) continue; PDEBUG(("Trying %s", DRIVERNAME(dl->driver))); result = device_set_driver(child, dl->driver); if (result == ENOMEM) return (result); else if (result != 0) continue; if (!hasclass) { if (device_set_devclass(child, dl->driver->name) != 0) { char const * devname = device_get_name(child); if (devname == NULL) devname = "(unknown)"; printf("driver bug: Unable to set " "devclass (class: %s " "devname: %s)\n", dl->driver->name, devname); (void)device_set_driver(child, NULL); continue; } } /* Fetch any flags for the device before probing. */ resource_int_value(dl->driver->name, child->unit, "flags", &child->devflags); result = DEVICE_PROBE(child); /* * If the driver returns SUCCESS, there can be * no higher match for this device. */ if (result == 0) { best = dl; pri = 0; break; } /* Reset flags and devclass before the next probe. */ child->devflags = 0; if (!hasclass) (void)device_set_devclass(child, NULL); /* * Reset DF_QUIET in case this driver doesn't * end up as the best driver. */ device_verbose(child); /* * Probes that return BUS_PROBE_NOWILDCARD or lower * only match on devices whose driver was explicitly * specified. */ if (result <= BUS_PROBE_NOWILDCARD && !(child->flags & DF_FIXEDCLASS)) { result = ENXIO; } /* * The driver returned an error so it * certainly doesn't match. */ if (result > 0) { (void)device_set_driver(child, NULL); continue; } /* * A priority lower than SUCCESS, remember the * best matching driver. Initialise the value * of pri for the first match. */ if (best == NULL || result > pri) { best = dl; pri = result; continue; } } /* * If we have an unambiguous match in this devclass, * don't look in the parent. */ if (best && pri == 0) break; } if (best == NULL) return (ENXIO); /* * If we found a driver, change state and initialise the devclass. */ if (pri < 0) { /* Set the winning driver, devclass, and flags. */ result = device_set_driver(child, best->driver); if (result != 0) return (result); if (!child->devclass) { result = device_set_devclass(child, best->driver->name); if (result != 0) { (void)device_set_driver(child, NULL); return (result); } } resource_int_value(best->driver->name, child->unit, "flags", &child->devflags); /* * A bit bogus. Call the probe method again to make sure * that we have the right description. */ result = DEVICE_PROBE(child); if (result > 0) { if (!hasclass) (void)device_set_devclass(child, NULL); (void)device_set_driver(child, NULL); return (result); } } child->state = DS_ALIVE; bus_data_generation_update(); return (0); } /** * @brief Return the parent of a device */ device_t device_get_parent(device_t dev) { return (dev->parent); } /** * @brief Get a list of children of a device * * An array containing a list of all the children of the given device * is allocated and returned in @p *devlistp. The number of devices * in the array is returned in @p *devcountp. The caller should free * the array using @c free(p, M_TEMP). * * @param dev the device to examine * @param devlistp points at location for array pointer return * value * @param devcountp points at location for array size return value * * @retval 0 success * @retval ENOMEM the array allocation failed */ int device_get_children(device_t dev, device_t **devlistp, int *devcountp) { int count; device_t child; device_t *list; count = 0; TAILQ_FOREACH(child, &dev->children, link) { count++; } if (count == 0) { *devlistp = NULL; *devcountp = 0; return (0); } list = malloc(count * sizeof(device_t), M_TEMP, M_NOWAIT|M_ZERO); if (!list) return (ENOMEM); count = 0; TAILQ_FOREACH(child, &dev->children, link) { list[count] = child; count++; } *devlistp = list; *devcountp = count; return (0); } /** * @brief Return the current driver for the device or @c NULL if there * is no driver currently attached */ driver_t * device_get_driver(device_t dev) { return (dev->driver); } /** * @brief Return the current devclass for the device or @c NULL if * there is none. */ devclass_t device_get_devclass(device_t dev) { return (dev->devclass); } /** * @brief Return the name of the device's devclass or @c NULL if there * is none. */ const char * device_get_name(device_t dev) { if (dev != NULL && dev->devclass) return (devclass_get_name(dev->devclass)); return (NULL); } /** * @brief Return a string containing the device's devclass name * followed by an ascii representation of the device's unit number * (e.g. @c "foo2"). */ const char * device_get_nameunit(device_t dev) { return (dev->nameunit); } /** * @brief Return the device's unit number. */ int device_get_unit(device_t dev) { return (dev->unit); } /** * @brief Return the device's description string */ const char * device_get_desc(device_t dev) { return (dev->desc); } /** * @brief Return the device's flags */ uint32_t device_get_flags(device_t dev) { return (dev->devflags); } struct sysctl_ctx_list * device_get_sysctl_ctx(device_t dev) { return (&dev->sysctl_ctx); } struct sysctl_oid * device_get_sysctl_tree(device_t dev) { return (dev->sysctl_tree); } /** * @brief Print the name of the device followed by a colon and a space * * @returns the number of characters printed */ int device_print_prettyname(device_t dev) { const char *name = device_get_name(dev); if (name == NULL) return (printf("unknown: ")); return (printf("%s%d: ", name, device_get_unit(dev))); } /** * @brief Print the name of the device followed by a colon, a space * and the result of calling vprintf() with the value of @p fmt and * the following arguments. * * @returns the number of characters printed */ int device_printf(device_t dev, const char * fmt, ...) { char buf[128]; struct sbuf sb; const char *name; va_list ap; size_t retval; retval = 0; sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); sbuf_set_drain(&sb, sbuf_printf_drain, &retval); name = device_get_name(dev); if (name == NULL) sbuf_cat(&sb, "unknown: "); else sbuf_printf(&sb, "%s%d: ", name, device_get_unit(dev)); va_start(ap, fmt); sbuf_vprintf(&sb, fmt, ap); va_end(ap); sbuf_finish(&sb); sbuf_delete(&sb); return (retval); } /** * @brief Print the name of the device followed by a colon, a space * and the result of calling log() with the value of @p fmt and * the following arguments. * * @returns the number of characters printed */ int device_log(device_t dev, int pri, const char * fmt, ...) { char buf[128]; struct sbuf sb; const char *name; va_list ap; size_t retval; retval = 0; sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); name = device_get_name(dev); if (name == NULL) sbuf_cat(&sb, "unknown: "); else sbuf_printf(&sb, "%s%d: ", name, device_get_unit(dev)); va_start(ap, fmt); sbuf_vprintf(&sb, fmt, ap); va_end(ap); sbuf_finish(&sb); log(pri, "%.*s", (int) sbuf_len(&sb), sbuf_data(&sb)); retval = sbuf_len(&sb); sbuf_delete(&sb); return (retval); } /** * @internal */ static void device_set_desc_internal(device_t dev, const char *desc, bool allocated) { if (dev->desc && (dev->flags & DF_DESCMALLOCED)) { free(dev->desc, M_BUS); dev->flags &= ~DF_DESCMALLOCED; dev->desc = NULL; } if (allocated && desc) dev->flags |= DF_DESCMALLOCED; dev->desc = __DECONST(char *, desc); bus_data_generation_update(); } /** * @brief Set the device's description * * The value of @c desc should be a string constant that will not * change (at least until the description is changed in a subsequent * call to device_set_desc() or device_set_desc_copy()). */ void device_set_desc(device_t dev, const char *desc) { device_set_desc_internal(dev, desc, false); } /** * @brief Set the device's description * * A printf-like version of device_set_desc(). */ void device_set_descf(device_t dev, const char *fmt, ...) { va_list ap; char *buf = NULL; va_start(ap, fmt); vasprintf(&buf, M_BUS, fmt, ap); va_end(ap); device_set_desc_internal(dev, buf, true); } /** * @brief Set the device's description * * The string pointed to by @c desc is copied. Use this function if * the device description is generated, (e.g. with sprintf()). */ void device_set_desc_copy(device_t dev, const char *desc) { char *buf; buf = strdup_flags(desc, M_BUS, M_NOWAIT); device_set_desc_internal(dev, buf, true); } /** * @brief Set the device's flags */ void device_set_flags(device_t dev, uint32_t flags) { dev->devflags = flags; } /** * @brief Return the device's softc field * * The softc is allocated and zeroed when a driver is attached, based * on the size field of the driver. */ void * device_get_softc(device_t dev) { return (dev->softc); } /** * @brief Set the device's softc field * * Most drivers do not need to use this since the softc is allocated * automatically when the driver is attached. */ void device_set_softc(device_t dev, void *softc) { if (dev->softc && !(dev->flags & DF_EXTERNALSOFTC)) free(dev->softc, M_BUS_SC); dev->softc = softc; if (dev->softc) dev->flags |= DF_EXTERNALSOFTC; else dev->flags &= ~DF_EXTERNALSOFTC; } /** * @brief Free claimed softc * * Most drivers do not need to use this since the softc is freed * automatically when the driver is detached. */ void device_free_softc(void *softc) { free(softc, M_BUS_SC); } /** * @brief Claim softc * * This function can be used to let the driver free the automatically * allocated softc using "device_free_softc()". This function is * useful when the driver is refcounting the softc and the softc * cannot be freed when the "device_detach" method is called. */ void device_claim_softc(device_t dev) { if (dev->softc) dev->flags |= DF_EXTERNALSOFTC; else dev->flags &= ~DF_EXTERNALSOFTC; } /** * @brief Get the device's ivars field * * The ivars field is used by the parent device to store per-device * state (e.g. the physical location of the device or a list of * resources). */ void * device_get_ivars(device_t dev) { KASSERT(dev != NULL, ("device_get_ivars(NULL, ...)")); return (dev->ivars); } /** * @brief Set the device's ivars field */ void device_set_ivars(device_t dev, void * ivars) { KASSERT(dev != NULL, ("device_set_ivars(NULL, ...)")); dev->ivars = ivars; } /** * @brief Return the device's state */ device_state_t device_get_state(device_t dev) { return (dev->state); } /** * @brief Set the DF_ENABLED flag for the device */ void device_enable(device_t dev) { dev->flags |= DF_ENABLED; } /** * @brief Clear the DF_ENABLED flag for the device */ void device_disable(device_t dev) { dev->flags &= ~DF_ENABLED; } /** * @brief Increment the busy counter for the device */ void device_busy(device_t dev) { /* * Mark the device as busy, recursively up the tree if this busy count * goes 0->1. */ if (refcount_acquire(&dev->busy) == 0 && dev->parent != NULL) device_busy(dev->parent); } /** * @brief Decrement the busy counter for the device */ void device_unbusy(device_t dev) { /* * Mark the device as unbsy, recursively if this is the last busy count. */ if (refcount_release(&dev->busy) && dev->parent != NULL) device_unbusy(dev->parent); } /** * @brief Set the DF_QUIET flag for the device */ void device_quiet(device_t dev) { dev->flags |= DF_QUIET; } /** * @brief Set the DF_QUIET_CHILDREN flag for the device */ void device_quiet_children(device_t dev) { dev->flags |= DF_QUIET_CHILDREN; } /** * @brief Clear the DF_QUIET flag for the device */ void device_verbose(device_t dev) { dev->flags &= ~DF_QUIET; } ssize_t device_get_property(device_t dev, const char *prop, void *val, size_t sz, device_property_type_t type) { device_t bus = device_get_parent(dev); switch (type) { case DEVICE_PROP_ANY: case DEVICE_PROP_BUFFER: case DEVICE_PROP_HANDLE: /* Size checks done in implementation. */ break; case DEVICE_PROP_UINT32: if (sz % 4 != 0) return (-1); break; case DEVICE_PROP_UINT64: if (sz % 8 != 0) return (-1); break; default: return (-1); } return (BUS_GET_PROPERTY(bus, dev, prop, val, sz, type)); } bool device_has_property(device_t dev, const char *prop) { return (device_get_property(dev, prop, NULL, 0, DEVICE_PROP_ANY) >= 0); } /** * @brief Return non-zero if the DF_QUIET_CHIDLREN flag is set on the device */ int device_has_quiet_children(device_t dev) { return ((dev->flags & DF_QUIET_CHILDREN) != 0); } /** * @brief Return non-zero if the DF_QUIET flag is set on the device */ int device_is_quiet(device_t dev) { return ((dev->flags & DF_QUIET) != 0); } /** * @brief Return non-zero if the DF_ENABLED flag is set on the device */ int device_is_enabled(device_t dev) { return ((dev->flags & DF_ENABLED) != 0); } /** * @brief Return non-zero if the device was successfully probed */ int device_is_alive(device_t dev) { return (dev->state >= DS_ALIVE); } /** * @brief Return non-zero if the device currently has a driver * attached to it */ int device_is_attached(device_t dev) { return (dev->state >= DS_ATTACHED); } /** * @brief Return non-zero if the device is currently suspended. */ int device_is_suspended(device_t dev) { return ((dev->flags & DF_SUSPENDED) != 0); } /** * @brief Set the devclass of a device * @see devclass_add_device(). */ int device_set_devclass(device_t dev, const char *classname) { devclass_t dc; int error; if (!classname) { if (dev->devclass) devclass_delete_device(dev->devclass, dev); return (0); } if (dev->devclass) { printf("device_set_devclass: device class already set\n"); return (EINVAL); } dc = devclass_find_internal(classname, NULL, TRUE); if (!dc) return (ENOMEM); error = devclass_add_device(dc, dev); bus_data_generation_update(); return (error); } /** * @brief Set the devclass of a device and mark the devclass fixed. * @see device_set_devclass() */ int device_set_devclass_fixed(device_t dev, const char *classname) { int error; if (classname == NULL) return (EINVAL); error = device_set_devclass(dev, classname); if (error) return (error); dev->flags |= DF_FIXEDCLASS; return (0); } /** * @brief Query the device to determine if it's of a fixed devclass * @see device_set_devclass_fixed() */ bool device_is_devclass_fixed(device_t dev) { return ((dev->flags & DF_FIXEDCLASS) != 0); } /** * @brief Set the driver of a device * * @retval 0 success * @retval EBUSY the device already has a driver attached * @retval ENOMEM a memory allocation failure occurred */ int device_set_driver(device_t dev, driver_t *driver) { int domain; struct domainset *policy; if (dev->state >= DS_ATTACHED) return (EBUSY); if (dev->driver == driver) return (0); if (dev->softc && !(dev->flags & DF_EXTERNALSOFTC)) { free(dev->softc, M_BUS_SC); dev->softc = NULL; } device_set_desc(dev, NULL); kobj_delete((kobj_t) dev, NULL); dev->driver = driver; if (driver) { kobj_init((kobj_t) dev, (kobj_class_t) driver); if (!(dev->flags & DF_EXTERNALSOFTC) && driver->size > 0) { if (bus_get_domain(dev, &domain) == 0) policy = DOMAINSET_PREF(domain); else policy = DOMAINSET_RR(); dev->softc = malloc_domainset(driver->size, M_BUS_SC, policy, M_NOWAIT | M_ZERO); if (!dev->softc) { kobj_delete((kobj_t) dev, NULL); kobj_init((kobj_t) dev, &null_class); dev->driver = NULL; return (ENOMEM); } } } else { kobj_init((kobj_t) dev, &null_class); } bus_data_generation_update(); return (0); } /** * @brief Probe a device, and return this status. * * This function is the core of the device autoconfiguration * system. Its purpose is to select a suitable driver for a device and * then call that driver to initialise the hardware appropriately. The * driver is selected by calling the DEVICE_PROBE() method of a set of * candidate drivers and then choosing the driver which returned the * best value. This driver is then attached to the device using * device_attach(). * * The set of suitable drivers is taken from the list of drivers in * the parent device's devclass. If the device was originally created * with a specific class name (see device_add_child()), only drivers * with that name are probed, otherwise all drivers in the devclass * are probed. If no drivers return successful probe values in the * parent devclass, the search continues in the parent of that * devclass (see devclass_get_parent()) if any. * * @param dev the device to initialise * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code * @retval -1 Device already attached */ int device_probe(device_t dev) { int error; bus_topo_assert(); if (dev->state >= DS_ALIVE) return (-1); if (!(dev->flags & DF_ENABLED)) { if (bootverbose && device_get_name(dev) != NULL) { device_print_prettyname(dev); printf("not probed (disabled)\n"); } return (-1); } if ((error = device_probe_child(dev->parent, dev)) != 0) { if (bus_current_pass == BUS_PASS_DEFAULT && !(dev->flags & DF_DONENOMATCH)) { device_handle_nomatch(dev); } return (error); } return (0); } /** * @brief Probe a device and attach a driver if possible * * calls device_probe() and attaches if that was successful. */ int device_probe_and_attach(device_t dev) { int error; bus_topo_assert(); error = device_probe(dev); if (error == -1) return (0); else if (error != 0) return (error); CURVNET_SET_QUIET(vnet0); error = device_attach(dev); CURVNET_RESTORE(); return error; } /** * @brief Attach a device driver to a device * * This function is a wrapper around the DEVICE_ATTACH() driver * method. In addition to calling DEVICE_ATTACH(), it initialises the * device's sysctl tree, optionally prints a description of the device * and queues a notification event for user-based device management * services. * * Normally this function is only called internally from * device_probe_and_attach(). * * @param dev the device to initialise * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code */ int device_attach(device_t dev) { uint64_t attachtime; uint16_t attachentropy; int error; if (resource_disabled(dev->driver->name, dev->unit)) { device_disable(dev); if (bootverbose) device_printf(dev, "disabled via hints entry\n"); return (ENXIO); } device_sysctl_init(dev); if (!device_is_quiet(dev)) device_print_child(dev->parent, dev); attachtime = get_cyclecount(); dev->state = DS_ATTACHING; if ((error = DEVICE_ATTACH(dev)) != 0) { printf("device_attach: %s%d attach returned %d\n", dev->driver->name, dev->unit, error); if (disable_failed_devs) { /* * When the user has asked to disable failed devices, we * directly disable the device, but leave it in the * attaching state. It will not try to probe/attach the * device further. This leaves the device numbering * intact for other similar devices in the system. It * can be removed from this state with devctl. */ device_disable(dev); } else { /* * Otherwise, when attach fails, tear down the state * around that so we can retry when, for example, new * drivers are loaded. */ if (!(dev->flags & DF_FIXEDCLASS)) devclass_delete_device(dev->devclass, dev); (void)device_set_driver(dev, NULL); device_sysctl_fini(dev); KASSERT(dev->busy == 0, ("attach failed but busy")); dev->state = DS_NOTPRESENT; } return (error); } dev->flags |= DF_ATTACHED_ONCE; /* * We only need the low bits of this time, but ranges from tens to thousands * have been seen, so keep 2 bytes' worth. */ attachentropy = (uint16_t)(get_cyclecount() - attachtime); random_harvest_direct(&attachentropy, sizeof(attachentropy), RANDOM_ATTACH); device_sysctl_update(dev); dev->state = DS_ATTACHED; dev->flags &= ~DF_DONENOMATCH; EVENTHANDLER_DIRECT_INVOKE(device_attach, dev); return (0); } /** * @brief Detach a driver from a device * * This function is a wrapper around the DEVICE_DETACH() driver * method. If the call to DEVICE_DETACH() succeeds, it calls * BUS_CHILD_DETACHED() for the parent of @p dev, queues a * notification event for user-based device management services and * cleans up the device's sysctl tree. * * @param dev the device to un-initialise * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code */ int device_detach(device_t dev) { int error; bus_topo_assert(); PDEBUG(("%s", DEVICENAME(dev))); if (dev->busy > 0) return (EBUSY); if (dev->state == DS_ATTACHING) { device_printf(dev, "device in attaching state! Deferring detach.\n"); return (EBUSY); } if (dev->state != DS_ATTACHED) return (0); EVENTHANDLER_DIRECT_INVOKE(device_detach, dev, EVHDEV_DETACH_BEGIN); if ((error = DEVICE_DETACH(dev)) != 0) { EVENTHANDLER_DIRECT_INVOKE(device_detach, dev, EVHDEV_DETACH_FAILED); return (error); } else { EVENTHANDLER_DIRECT_INVOKE(device_detach, dev, EVHDEV_DETACH_COMPLETE); } if (!device_is_quiet(dev)) device_printf(dev, "detached\n"); if (dev->parent) BUS_CHILD_DETACHED(dev->parent, dev); if (!(dev->flags & DF_FIXEDCLASS)) devclass_delete_device(dev->devclass, dev); device_verbose(dev); dev->state = DS_NOTPRESENT; (void)device_set_driver(dev, NULL); device_sysctl_fini(dev); return (0); } /** * @brief Tells a driver to quiesce itself. * * This function is a wrapper around the DEVICE_QUIESCE() driver * method. If the call to DEVICE_QUIESCE() succeeds. * * @param dev the device to quiesce * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code */ int device_quiesce(device_t dev) { PDEBUG(("%s", DEVICENAME(dev))); if (dev->busy > 0) return (EBUSY); if (dev->state != DS_ATTACHED) return (0); return (DEVICE_QUIESCE(dev)); } /** * @brief Notify a device of system shutdown * * This function calls the DEVICE_SHUTDOWN() driver method if the * device currently has an attached driver. * * @returns the value returned by DEVICE_SHUTDOWN() */ int device_shutdown(device_t dev) { if (dev->state < DS_ATTACHED) return (0); return (DEVICE_SHUTDOWN(dev)); } /** * @brief Set the unit number of a device * * This function can be used to override the unit number used for a * device (e.g. to wire a device to a pre-configured unit number). */ int device_set_unit(device_t dev, int unit) { devclass_t dc; int err; if (unit == dev->unit) return (0); dc = device_get_devclass(dev); if (unit < dc->maxunit && dc->devices[unit]) return (EBUSY); err = devclass_delete_device(dc, dev); if (err) return (err); dev->unit = unit; err = devclass_add_device(dc, dev); if (err) return (err); bus_data_generation_update(); return (0); } /*======================================*/ /* * Some useful method implementations to make life easier for bus drivers. */ /** * @brief Initialize a resource mapping request * * This is the internal implementation of the public API * resource_init_map_request. Callers may be using a different layout * of struct resource_map_request than the kernel, so callers pass in * the size of the structure they are using to identify the structure * layout. */ void resource_init_map_request_impl(struct resource_map_request *args, size_t sz) { bzero(args, sz); args->size = sz; args->memattr = VM_MEMATTR_DEVICE; } /** * @brief Validate a resource mapping request * * Translate a device driver's mapping request (@p in) to a struct * resource_map_request using the current structure layout (@p out). * In addition, validate the offset and length from the mapping * request against the bounds of the resource @p r. If the offset or * length are invalid, fail with EINVAL. If the offset and length are * valid, the absolute starting address of the requested mapping is * returned in @p startp and the length of the requested mapping is * returned in @p lengthp. */ int resource_validate_map_request(struct resource *r, struct resource_map_request *in, struct resource_map_request *out, rman_res_t *startp, rman_res_t *lengthp) { rman_res_t end, length, start; /* * This assumes that any callers of this function are compiled * into the kernel and use the same version of the structure * as this file. */ MPASS(out->size == sizeof(struct resource_map_request)); if (in != NULL) bcopy(in, out, imin(in->size, out->size)); start = rman_get_start(r) + out->offset; if (out->length == 0) length = rman_get_size(r); else length = out->length; end = start + length - 1; if (start > rman_get_end(r) || start < rman_get_start(r)) return (EINVAL); if (end > rman_get_end(r) || end < start) return (EINVAL); *lengthp = length; *startp = start; return (0); } /** * @brief Initialise a resource list. * * @param rl the resource list to initialise */ void resource_list_init(struct resource_list *rl) { STAILQ_INIT(rl); } /** * @brief Reclaim memory used by a resource list. * * This function frees the memory for all resource entries on the list * (if any). * * @param rl the resource list to free */ void resource_list_free(struct resource_list *rl) { struct resource_list_entry *rle; while ((rle = STAILQ_FIRST(rl)) != NULL) { if (rle->res) panic("resource_list_free: resource entry is busy"); STAILQ_REMOVE_HEAD(rl, link); free(rle, M_BUS); } } /** * @brief Add a resource entry. * * This function adds a resource entry using the given @p type, @p * start, @p end and @p count values. A rid value is chosen by * searching sequentially for the first unused rid starting at zero. * * @param rl the resource list to edit * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param start the start address of the resource * @param end the end address of the resource * @param count XXX end-start+1 */ int resource_list_add_next(struct resource_list *rl, int type, rman_res_t start, rman_res_t end, rman_res_t count) { int rid; rid = 0; while (resource_list_find(rl, type, rid) != NULL) rid++; resource_list_add(rl, type, rid, start, end, count); return (rid); } /** * @brief Add or modify a resource entry. * * If an existing entry exists with the same type and rid, it will be * modified using the given values of @p start, @p end and @p * count. If no entry exists, a new one will be created using the * given values. The resource list entry that matches is then returned. * * @param rl the resource list to edit * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * @param start the start address of the resource * @param end the end address of the resource * @param count XXX end-start+1 */ struct resource_list_entry * resource_list_add(struct resource_list *rl, int type, int rid, rman_res_t start, rman_res_t end, rman_res_t count) { struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (!rle) { rle = malloc(sizeof(struct resource_list_entry), M_BUS, M_NOWAIT); if (!rle) panic("resource_list_add: can't record entry"); STAILQ_INSERT_TAIL(rl, rle, link); rle->type = type; rle->rid = rid; rle->res = NULL; rle->flags = 0; } if (rle->res) panic("resource_list_add: resource entry is busy"); rle->start = start; rle->end = end; rle->count = count; return (rle); } /** * @brief Determine if a resource entry is busy. * * Returns true if a resource entry is busy meaning that it has an * associated resource that is not an unallocated "reserved" resource. * * @param rl the resource list to search * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * * @returns Non-zero if the entry is busy, zero otherwise. */ int resource_list_busy(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (rle == NULL || rle->res == NULL) return (0); if ((rle->flags & (RLE_RESERVED | RLE_ALLOCATED)) == RLE_RESERVED) { KASSERT(!(rman_get_flags(rle->res) & RF_ACTIVE), ("reserved resource is active")); return (0); } return (1); } /** * @brief Determine if a resource entry is reserved. * * Returns true if a resource entry is reserved meaning that it has an * associated "reserved" resource. The resource can either be * allocated or unallocated. * * @param rl the resource list to search * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * * @returns Non-zero if the entry is reserved, zero otherwise. */ int resource_list_reserved(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (rle != NULL && rle->flags & RLE_RESERVED) return (1); return (0); } /** * @brief Find a resource entry by type and rid. * * @param rl the resource list to search * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * * @returns the resource entry pointer or NULL if there is no such * entry. */ struct resource_list_entry * resource_list_find(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle; STAILQ_FOREACH(rle, rl, link) { if (rle->type == type && rle->rid == rid) return (rle); } return (NULL); } /** * @brief Delete a resource entry. * * @param rl the resource list to edit * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier */ void resource_list_delete(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle = resource_list_find(rl, type, rid); if (rle) { if (rle->res != NULL) panic("resource_list_delete: resource has not been released"); STAILQ_REMOVE(rl, rle, resource_list_entry, link); free(rle, M_BUS); } } /** * @brief Allocate a reserved resource * * This can be used by buses to force the allocation of resources * that are always active in the system even if they are not allocated * by a driver (e.g. PCI BARs). This function is usually called when * adding a new child to the bus. The resource is allocated from the * parent bus when it is reserved. The resource list entry is marked * with RLE_RESERVED to note that it is a reserved resource. * * Subsequent attempts to allocate the resource with * resource_list_alloc() will succeed the first time and will set * RLE_ALLOCATED to note that it has been allocated. When a reserved * resource that has been allocated is released with * resource_list_release() the resource RLE_ALLOCATED is cleared, but * the actual resource remains allocated. The resource can be released to * the parent bus by calling resource_list_unreserve(). * * @param rl the resource list to allocate from * @param bus the parent device of @p child * @param child the device for which the resource is being reserved * @param type the type of resource to allocate * @param rid a pointer to the resource identifier * @param start hint at the start of the resource range - pass * @c 0 for any start address * @param end hint at the end of the resource range - pass * @c ~0 for any end address * @param count hint at the size of range required - pass @c 1 * for any size * @param flags any extra flags to control the resource * allocation - see @c RF_XXX flags in * for details * * @returns the resource which was allocated or @c NULL if no * resource could be allocated */ struct resource * resource_list_reserve(struct resource_list *rl, device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); struct resource *r; if (passthrough) panic( "resource_list_reserve() should only be called for direct children"); if (flags & RF_ACTIVE) panic( "resource_list_reserve() should only reserve inactive resources"); r = resource_list_alloc(rl, bus, child, type, rid, start, end, count, flags); if (r != NULL) { rle = resource_list_find(rl, type, *rid); rle->flags |= RLE_RESERVED; } return (r); } /** * @brief Helper function for implementing BUS_ALLOC_RESOURCE() * * Implement BUS_ALLOC_RESOURCE() by looking up a resource from the list * and passing the allocation up to the parent of @p bus. This assumes * that the first entry of @c device_get_ivars(child) is a struct * resource_list. This also handles 'passthrough' allocations where a * child is a remote descendant of bus by passing the allocation up to * the parent of bus. * * Typically, a bus driver would store a list of child resources * somewhere in the child device's ivars (see device_get_ivars()) and * its implementation of BUS_ALLOC_RESOURCE() would find that list and * then call resource_list_alloc() to perform the allocation. * * @param rl the resource list to allocate from * @param bus the parent device of @p child * @param child the device which is requesting an allocation * @param type the type of resource to allocate * @param rid a pointer to the resource identifier * @param start hint at the start of the resource range - pass * @c 0 for any start address * @param end hint at the end of the resource range - pass * @c ~0 for any end address * @param count hint at the size of range required - pass @c 1 * for any size * @param flags any extra flags to control the resource * allocation - see @c RF_XXX flags in * for details * * @returns the resource which was allocated or @c NULL if no * resource could be allocated */ struct resource * resource_list_alloc(struct resource_list *rl, device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); int isdefault = RMAN_IS_DEFAULT_RANGE(start, end); if (passthrough) { return (BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags)); } rle = resource_list_find(rl, type, *rid); if (!rle) return (NULL); /* no resource of that type/rid */ if (rle->res) { if (rle->flags & RLE_RESERVED) { if (rle->flags & RLE_ALLOCATED) return (NULL); if ((flags & RF_ACTIVE) && bus_activate_resource(child, type, *rid, rle->res) != 0) return (NULL); rle->flags |= RLE_ALLOCATED; return (rle->res); } device_printf(bus, "resource entry %#x type %d for child %s is busy\n", *rid, type, device_get_nameunit(child)); return (NULL); } if (isdefault) { start = rle->start; count = ulmax(count, rle->count); end = ulmax(rle->end, start + count - 1); } rle->res = BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags); /* * Record the new range. */ if (rle->res) { rle->start = rman_get_start(rle->res); rle->end = rman_get_end(rle->res); rle->count = count; } return (rle->res); } /** * @brief Helper function for implementing BUS_RELEASE_RESOURCE() * * Implement BUS_RELEASE_RESOURCE() using a resource list. Normally * used with resource_list_alloc(). * * @param rl the resource list which was allocated from * @param bus the parent device of @p child * @param child the device which is requesting a release * @param type the type of resource to release * @param rid the resource identifier * @param res the resource to release * * @retval 0 success * @retval non-zero a standard unix error code indicating what * error condition prevented the operation */ int resource_list_release(struct resource_list *rl, device_t bus, device_t child, int type, int rid, struct resource *res) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); int error; if (passthrough) { return (BUS_RELEASE_RESOURCE(device_get_parent(bus), child, type, rid, res)); } rle = resource_list_find(rl, type, rid); if (!rle) panic("resource_list_release: can't find resource"); if (!rle->res) panic("resource_list_release: resource entry is not busy"); if (rle->flags & RLE_RESERVED) { if (rle->flags & RLE_ALLOCATED) { if (rman_get_flags(res) & RF_ACTIVE) { error = bus_deactivate_resource(child, type, rid, res); if (error) return (error); } rle->flags &= ~RLE_ALLOCATED; return (0); } return (EINVAL); } error = BUS_RELEASE_RESOURCE(device_get_parent(bus), child, type, rid, res); if (error) return (error); rle->res = NULL; return (0); } /** * @brief Release all active resources of a given type * * Release all active resources of a specified type. This is intended * to be used to cleanup resources leaked by a driver after detach or * a failed attach. * * @param rl the resource list which was allocated from * @param bus the parent device of @p child * @param child the device whose active resources are being released * @param type the type of resources to release * * @retval 0 success * @retval EBUSY at least one resource was active */ int resource_list_release_active(struct resource_list *rl, device_t bus, device_t child, int type) { struct resource_list_entry *rle; int error, retval; retval = 0; STAILQ_FOREACH(rle, rl, link) { if (rle->type != type) continue; if (rle->res == NULL) continue; if ((rle->flags & (RLE_RESERVED | RLE_ALLOCATED)) == RLE_RESERVED) continue; retval = EBUSY; error = resource_list_release(rl, bus, child, type, rman_get_rid(rle->res), rle->res); if (error != 0) device_printf(bus, "Failed to release active resource: %d\n", error); } return (retval); } /** * @brief Fully release a reserved resource * * Fully releases a resource reserved via resource_list_reserve(). * * @param rl the resource list which was allocated from * @param bus the parent device of @p child * @param child the device whose reserved resource is being released * @param type the type of resource to release * @param rid the resource identifier * @param res the resource to release * * @retval 0 success * @retval non-zero a standard unix error code indicating what * error condition prevented the operation */ int resource_list_unreserve(struct resource_list *rl, device_t bus, device_t child, int type, int rid) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); if (passthrough) panic( "resource_list_unreserve() should only be called for direct children"); rle = resource_list_find(rl, type, rid); if (!rle) panic("resource_list_unreserve: can't find resource"); if (!(rle->flags & RLE_RESERVED)) return (EINVAL); if (rle->flags & RLE_ALLOCATED) return (EBUSY); rle->flags &= ~RLE_RESERVED; return (resource_list_release(rl, bus, child, type, rid, rle->res)); } /** * @brief Print a description of resources in a resource list * * Print all resources of a specified type, for use in BUS_PRINT_CHILD(). * The name is printed if at least one resource of the given type is available. * The format is used to print resource start and end. * * @param rl the resource list to print * @param name the name of @p type, e.g. @c "memory" * @param type type type of resource entry to print * @param format printf(9) format string to print resource * start and end values * * @returns the number of characters printed */ int resource_list_print_type(struct resource_list *rl, const char *name, int type, const char *format) { struct resource_list_entry *rle; int printed, retval; printed = 0; retval = 0; /* Yes, this is kinda cheating */ STAILQ_FOREACH(rle, rl, link) { if (rle->type == type) { if (printed == 0) retval += printf(" %s ", name); else retval += printf(","); printed++; retval += printf(format, rle->start); if (rle->count > 1) { retval += printf("-"); retval += printf(format, rle->start + rle->count - 1); } } } return (retval); } /** * @brief Releases all the resources in a list. * * @param rl The resource list to purge. * * @returns nothing */ void resource_list_purge(struct resource_list *rl) { struct resource_list_entry *rle; while ((rle = STAILQ_FIRST(rl)) != NULL) { if (rle->res) bus_release_resource(rman_get_device(rle->res), rle->type, rle->rid, rle->res); STAILQ_REMOVE_HEAD(rl, link); free(rle, M_BUS); } } device_t bus_generic_add_child(device_t dev, u_int order, const char *name, int unit) { return (device_add_child_ordered(dev, order, name, unit)); } /** * @brief Helper function for implementing DEVICE_PROBE() * * This function can be used to help implement the DEVICE_PROBE() for * a bus (i.e. a device which has other devices attached to it). It * calls the DEVICE_IDENTIFY() method of each driver in the device's * devclass. */ int bus_generic_probe(device_t dev) { devclass_t dc = dev->devclass; driverlink_t dl; TAILQ_FOREACH(dl, &dc->drivers, link) { /* * If this driver's pass is too high, then ignore it. * For most drivers in the default pass, this will * never be true. For early-pass drivers they will * only call the identify routines of eligible drivers * when this routine is called. Drivers for later * passes should have their identify routines called * on early-pass buses during BUS_NEW_PASS(). */ if (dl->pass > bus_current_pass) continue; DEVICE_IDENTIFY(dl->driver, dev); } return (0); } /** * @brief Helper function for implementing DEVICE_ATTACH() * * This function can be used to help implement the DEVICE_ATTACH() for * a bus. It calls device_probe_and_attach() for each of the device's * children. */ int bus_generic_attach(device_t dev) { device_t child; TAILQ_FOREACH(child, &dev->children, link) { device_probe_and_attach(child); } return (0); } /** * @brief Helper function for delaying attaching children * * Many buses can't run transactions on the bus which children need to probe and * attach until after interrupts and/or timers are running. This function * delays their attach until interrupts and timers are enabled. */ int bus_delayed_attach_children(device_t dev) { /* Probe and attach the bus children when interrupts are available */ config_intrhook_oneshot((ich_func_t)bus_generic_attach, dev); return (0); } /** * @brief Helper function for implementing DEVICE_DETACH() * * This function can be used to help implement the DEVICE_DETACH() for * a bus. It calls device_detach() for each of the device's * children. */ int bus_generic_detach(device_t dev) { device_t child; int error; if (dev->state != DS_ATTACHED) return (EBUSY); /* * Detach children in the reverse order. * See bus_generic_suspend for details. */ TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) { if ((error = device_detach(child)) != 0) return (error); } return (0); } /** * @brief Helper function for implementing DEVICE_SHUTDOWN() * * This function can be used to help implement the DEVICE_SHUTDOWN() * for a bus. It calls device_shutdown() for each of the device's * children. */ int bus_generic_shutdown(device_t dev) { device_t child; /* * Shut down children in the reverse order. * See bus_generic_suspend for details. */ TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) { device_shutdown(child); } return (0); } /** * @brief Default function for suspending a child device. * * This function is to be used by a bus's DEVICE_SUSPEND_CHILD(). */ int bus_generic_suspend_child(device_t dev, device_t child) { int error; error = DEVICE_SUSPEND(child); if (error == 0) { child->flags |= DF_SUSPENDED; } else { printf("DEVICE_SUSPEND(%s) failed: %d\n", device_get_nameunit(child), error); } return (error); } /** * @brief Default function for resuming a child device. * * This function is to be used by a bus's DEVICE_RESUME_CHILD(). */ int bus_generic_resume_child(device_t dev, device_t child) { DEVICE_RESUME(child); child->flags &= ~DF_SUSPENDED; return (0); } /** * @brief Helper function for implementing DEVICE_SUSPEND() * * This function can be used to help implement the DEVICE_SUSPEND() * for a bus. It calls DEVICE_SUSPEND() for each of the device's * children. If any call to DEVICE_SUSPEND() fails, the suspend * operation is aborted and any devices which were suspended are * resumed immediately by calling their DEVICE_RESUME() methods. */ int bus_generic_suspend(device_t dev) { int error; device_t child; /* * Suspend children in the reverse order. * For most buses all children are equal, so the order does not matter. * Other buses, such as acpi, carefully order their child devices to * express implicit dependencies between them. For such buses it is * safer to bring down devices in the reverse order. */ TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) { error = BUS_SUSPEND_CHILD(dev, child); if (error != 0) { child = TAILQ_NEXT(child, link); if (child != NULL) { TAILQ_FOREACH_FROM(child, &dev->children, link) BUS_RESUME_CHILD(dev, child); } return (error); } } return (0); } /** * @brief Helper function for implementing DEVICE_RESUME() * * This function can be used to help implement the DEVICE_RESUME() for * a bus. It calls DEVICE_RESUME() on each of the device's children. */ int bus_generic_resume(device_t dev) { device_t child; TAILQ_FOREACH(child, &dev->children, link) { BUS_RESUME_CHILD(dev, child); /* if resume fails, there's nothing we can usefully do... */ } return (0); } /** * @brief Helper function for implementing BUS_RESET_POST * * Bus can use this function to implement common operations of * re-attaching or resuming the children after the bus itself was * reset, and after restoring bus-unique state of children. * * @param dev The bus * #param flags DEVF_RESET_* */ int bus_helper_reset_post(device_t dev, int flags) { device_t child; int error, error1; error = 0; TAILQ_FOREACH(child, &dev->children,link) { BUS_RESET_POST(dev, child); error1 = (flags & DEVF_RESET_DETACH) != 0 ? device_probe_and_attach(child) : BUS_RESUME_CHILD(dev, child); if (error == 0 && error1 != 0) error = error1; } return (error); } static void bus_helper_reset_prepare_rollback(device_t dev, device_t child, int flags) { child = TAILQ_NEXT(child, link); if (child == NULL) return; TAILQ_FOREACH_FROM(child, &dev->children,link) { BUS_RESET_POST(dev, child); if ((flags & DEVF_RESET_DETACH) != 0) device_probe_and_attach(child); else BUS_RESUME_CHILD(dev, child); } } /** * @brief Helper function for implementing BUS_RESET_PREPARE * * Bus can use this function to implement common operations of * detaching or suspending the children before the bus itself is * reset, and then save bus-unique state of children that must * persists around reset. * * @param dev The bus * #param flags DEVF_RESET_* */ int bus_helper_reset_prepare(device_t dev, int flags) { device_t child; int error; if (dev->state != DS_ATTACHED) return (EBUSY); TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) { if ((flags & DEVF_RESET_DETACH) != 0) { error = device_get_state(child) == DS_ATTACHED ? device_detach(child) : 0; } else { error = BUS_SUSPEND_CHILD(dev, child); } if (error == 0) { error = BUS_RESET_PREPARE(dev, child); if (error != 0) { if ((flags & DEVF_RESET_DETACH) != 0) device_probe_and_attach(child); else BUS_RESUME_CHILD(dev, child); } } if (error != 0) { bus_helper_reset_prepare_rollback(dev, child, flags); return (error); } } return (0); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function prints the first part of the ascii representation of * @p child, including its name, unit and description (if any - see * device_set_desc()). * * @returns the number of characters printed */ int bus_print_child_header(device_t dev, device_t child) { int retval = 0; if (device_get_desc(child)) { retval += device_printf(child, "<%s>", device_get_desc(child)); } else { retval += printf("%s", device_get_nameunit(child)); } return (retval); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function prints the last part of the ascii representation of * @p child, which consists of the string @c " on " followed by the * name and unit of the @p dev. * * @returns the number of characters printed */ int bus_print_child_footer(device_t dev, device_t child) { return (printf(" on %s\n", device_get_nameunit(dev))); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function prints out the VM domain for the given device. * * @returns the number of characters printed */ int bus_print_child_domain(device_t dev, device_t child) { int domain; /* No domain? Don't print anything */ if (BUS_GET_DOMAIN(dev, child, &domain) != 0) return (0); return (printf(" numa-domain %d", domain)); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function simply calls bus_print_child_header() followed by * bus_print_child_footer(). * * @returns the number of characters printed */ int bus_generic_print_child(device_t dev, device_t child) { int retval = 0; retval += bus_print_child_header(dev, child); retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } /** * @brief Stub function for implementing BUS_READ_IVAR(). * * @returns ENOENT */ int bus_generic_read_ivar(device_t dev, device_t child, int index, uintptr_t * result) { return (ENOENT); } /** * @brief Stub function for implementing BUS_WRITE_IVAR(). * * @returns ENOENT */ int bus_generic_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { return (ENOENT); } /** * @brief Helper function for implementing BUS_GET_PROPERTY(). * * This simply calls the BUS_GET_PROPERTY of the parent of dev, * until a non-default implementation is found. */ ssize_t bus_generic_get_property(device_t dev, device_t child, const char *propname, void *propvalue, size_t size, device_property_type_t type) { if (device_get_parent(dev) != NULL) return (BUS_GET_PROPERTY(device_get_parent(dev), child, propname, propvalue, size, type)); return (-1); } /** * @brief Stub function for implementing BUS_GET_RESOURCE_LIST(). * * @returns NULL */ struct resource_list * bus_generic_get_resource_list(device_t dev, device_t child) { return (NULL); } /** * @brief Helper function for implementing BUS_DRIVER_ADDED(). * * This implementation of BUS_DRIVER_ADDED() simply calls the driver's * DEVICE_IDENTIFY() method to allow it to add new children to the bus * and then calls device_probe_and_attach() for each unattached child. */ void bus_generic_driver_added(device_t dev, driver_t *driver) { device_t child; DEVICE_IDENTIFY(driver, dev); TAILQ_FOREACH(child, &dev->children, link) { if (child->state == DS_NOTPRESENT) device_probe_and_attach(child); } } /** * @brief Helper function for implementing BUS_NEW_PASS(). * * This implementing of BUS_NEW_PASS() first calls the identify * routines for any drivers that probe at the current pass. Then it * walks the list of devices for this bus. If a device is already * attached, then it calls BUS_NEW_PASS() on that device. If the * device is not already attached, it attempts to attach a driver to * it. */ void bus_generic_new_pass(device_t dev) { driverlink_t dl; devclass_t dc; device_t child; dc = dev->devclass; TAILQ_FOREACH(dl, &dc->drivers, link) { if (dl->pass == bus_current_pass) DEVICE_IDENTIFY(dl->driver, dev); } TAILQ_FOREACH(child, &dev->children, link) { if (child->state >= DS_ATTACHED) BUS_NEW_PASS(child); else if (child->state == DS_NOTPRESENT) device_probe_and_attach(child); } } /** * @brief Helper function for implementing BUS_SETUP_INTR(). * * This simple implementation of BUS_SETUP_INTR() simply calls the * BUS_SETUP_INTR() method of the parent of @p dev. */ int bus_generic_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_SETUP_INTR(dev->parent, child, irq, flags, filter, intr, arg, cookiep)); return (EINVAL); } /** * @brief Helper function for implementing BUS_TEARDOWN_INTR(). * * This simple implementation of BUS_TEARDOWN_INTR() simply calls the * BUS_TEARDOWN_INTR() method of the parent of @p dev. */ int bus_generic_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_TEARDOWN_INTR(dev->parent, child, irq, cookie)); return (EINVAL); } /** * @brief Helper function for implementing BUS_SUSPEND_INTR(). * * This simple implementation of BUS_SUSPEND_INTR() simply calls the * BUS_SUSPEND_INTR() method of the parent of @p dev. */ int bus_generic_suspend_intr(device_t dev, device_t child, struct resource *irq) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_SUSPEND_INTR(dev->parent, child, irq)); return (EINVAL); } /** * @brief Helper function for implementing BUS_RESUME_INTR(). * * This simple implementation of BUS_RESUME_INTR() simply calls the * BUS_RESUME_INTR() method of the parent of @p dev. */ int bus_generic_resume_intr(device_t dev, device_t child, struct resource *irq) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_RESUME_INTR(dev->parent, child, irq)); return (EINVAL); } /** * @brief Helper function for implementing BUS_ADJUST_RESOURCE(). * * This simple implementation of BUS_ADJUST_RESOURCE() simply calls the * BUS_ADJUST_RESOURCE() method of the parent of @p dev. */ int bus_generic_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_ADJUST_RESOURCE(dev->parent, child, r, start, end)); return (EINVAL); } /* * @brief Helper function for implementing BUS_TRANSLATE_RESOURCE(). * * This simple implementation of BUS_TRANSLATE_RESOURCE() simply calls the * BUS_TRANSLATE_RESOURCE() method of the parent of @p dev. If there is no * parent, no translation happens. */ int bus_generic_translate_resource(device_t dev, int type, rman_res_t start, rman_res_t *newstart) { if (dev->parent) return (BUS_TRANSLATE_RESOURCE(dev->parent, type, start, newstart)); *newstart = start; return (0); } /** * @brief Helper function for implementing BUS_ALLOC_RESOURCE(). * * This simple implementation of BUS_ALLOC_RESOURCE() simply calls the * BUS_ALLOC_RESOURCE() method of the parent of @p dev. */ struct resource * bus_generic_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_ALLOC_RESOURCE(dev->parent, child, type, rid, start, end, count, flags)); return (NULL); } /** * @brief Helper function for implementing BUS_RELEASE_RESOURCE(). * * This simple implementation of BUS_RELEASE_RESOURCE() simply calls the * BUS_RELEASE_RESOURCE() method of the parent of @p dev. */ int bus_generic_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_RELEASE_RESOURCE(dev->parent, child, type, rid, r)); return (EINVAL); } /** * @brief Helper function for implementing BUS_ACTIVATE_RESOURCE(). * * This simple implementation of BUS_ACTIVATE_RESOURCE() simply calls the * BUS_ACTIVATE_RESOURCE() method of the parent of @p dev. */ int -bus_generic_activate_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +bus_generic_activate_resource(device_t dev, device_t child, struct resource *r) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) - return (BUS_ACTIVATE_RESOURCE(dev->parent, child, type, rid, - r)); + return (BUS_ACTIVATE_RESOURCE(dev->parent, child, r)); return (EINVAL); } /** * @brief Helper function for implementing BUS_DEACTIVATE_RESOURCE(). * * This simple implementation of BUS_DEACTIVATE_RESOURCE() simply calls the * BUS_DEACTIVATE_RESOURCE() method of the parent of @p dev. */ int -bus_generic_deactivate_resource(device_t dev, device_t child, int type, - int rid, struct resource *r) +bus_generic_deactivate_resource(device_t dev, device_t child, + struct resource *r) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) - return (BUS_DEACTIVATE_RESOURCE(dev->parent, child, type, rid, - r)); + return (BUS_DEACTIVATE_RESOURCE(dev->parent, child, r)); return (EINVAL); } /** * @brief Helper function for implementing BUS_MAP_RESOURCE(). * * This simple implementation of BUS_MAP_RESOURCE() simply calls the * BUS_MAP_RESOURCE() method of the parent of @p dev. */ int bus_generic_map_resource(device_t dev, device_t child, struct resource *r, struct resource_map_request *args, struct resource_map *map) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_MAP_RESOURCE(dev->parent, child, r, args, map)); return (EINVAL); } /** * @brief Helper function for implementing BUS_UNMAP_RESOURCE(). * * This simple implementation of BUS_UNMAP_RESOURCE() simply calls the * BUS_UNMAP_RESOURCE() method of the parent of @p dev. */ int bus_generic_unmap_resource(device_t dev, device_t child, struct resource *r, struct resource_map *map) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_UNMAP_RESOURCE(dev->parent, child, r, map)); return (EINVAL); } /** * @brief Helper function for implementing BUS_BIND_INTR(). * * This simple implementation of BUS_BIND_INTR() simply calls the * BUS_BIND_INTR() method of the parent of @p dev. */ int bus_generic_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_BIND_INTR(dev->parent, child, irq, cpu)); return (EINVAL); } /** * @brief Helper function for implementing BUS_CONFIG_INTR(). * * This simple implementation of BUS_CONFIG_INTR() simply calls the * BUS_CONFIG_INTR() method of the parent of @p dev. */ int bus_generic_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_CONFIG_INTR(dev->parent, irq, trig, pol)); return (EINVAL); } /** * @brief Helper function for implementing BUS_DESCRIBE_INTR(). * * This simple implementation of BUS_DESCRIBE_INTR() simply calls the * BUS_DESCRIBE_INTR() method of the parent of @p dev. */ int bus_generic_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_DESCRIBE_INTR(dev->parent, child, irq, cookie, descr)); return (EINVAL); } /** * @brief Helper function for implementing BUS_GET_CPUS(). * * This simple implementation of BUS_GET_CPUS() simply calls the * BUS_GET_CPUS() method of the parent of @p dev. */ int bus_generic_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent != NULL) return (BUS_GET_CPUS(dev->parent, child, op, setsize, cpuset)); return (EINVAL); } /** * @brief Helper function for implementing BUS_GET_DMA_TAG(). * * This simple implementation of BUS_GET_DMA_TAG() simply calls the * BUS_GET_DMA_TAG() method of the parent of @p dev. */ bus_dma_tag_t bus_generic_get_dma_tag(device_t dev, device_t child) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent != NULL) return (BUS_GET_DMA_TAG(dev->parent, child)); return (NULL); } /** * @brief Helper function for implementing BUS_GET_BUS_TAG(). * * This simple implementation of BUS_GET_BUS_TAG() simply calls the * BUS_GET_BUS_TAG() method of the parent of @p dev. */ bus_space_tag_t bus_generic_get_bus_tag(device_t dev, device_t child) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent != NULL) return (BUS_GET_BUS_TAG(dev->parent, child)); return ((bus_space_tag_t)0); } /** * @brief Helper function for implementing BUS_GET_RESOURCE(). * * This implementation of BUS_GET_RESOURCE() uses the * resource_list_find() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list to * search. */ int bus_generic_rl_get_resource(device_t dev, device_t child, int type, int rid, rman_res_t *startp, rman_res_t *countp) { struct resource_list * rl = NULL; struct resource_list_entry * rle = NULL; rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (EINVAL); rle = resource_list_find(rl, type, rid); if (!rle) return (ENOENT); if (startp) *startp = rle->start; if (countp) *countp = rle->count; return (0); } /** * @brief Helper function for implementing BUS_SET_RESOURCE(). * * This implementation of BUS_SET_RESOURCE() uses the * resource_list_add() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list to * edit. */ int bus_generic_rl_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct resource_list * rl = NULL; rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (EINVAL); resource_list_add(rl, type, rid, start, (start + count - 1), count); return (0); } /** * @brief Helper function for implementing BUS_DELETE_RESOURCE(). * * This implementation of BUS_DELETE_RESOURCE() uses the * resource_list_delete() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list to * edit. */ void bus_generic_rl_delete_resource(device_t dev, device_t child, int type, int rid) { struct resource_list * rl = NULL; rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return; resource_list_delete(rl, type, rid); return; } /** * @brief Helper function for implementing BUS_RELEASE_RESOURCE(). * * This implementation of BUS_RELEASE_RESOURCE() uses the * resource_list_release() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list. */ int bus_generic_rl_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct resource_list * rl = NULL; if (device_get_parent(child) != dev) return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child, type, rid, r)); rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (EINVAL); return (resource_list_release(rl, dev, child, type, rid, r)); } /** * @brief Helper function for implementing BUS_ALLOC_RESOURCE(). * * This implementation of BUS_ALLOC_RESOURCE() uses the * resource_list_alloc() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list. */ struct resource * bus_generic_rl_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list * rl = NULL; if (device_get_parent(child) != dev) return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid, start, end, count, flags)); rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (NULL); return (resource_list_alloc(rl, dev, child, type, rid, start, end, count, flags)); } /** * @brief Helper function for implementing BUS_ALLOC_RESOURCE(). * * This implementation of BUS_ALLOC_RESOURCE() allocates a * resource from a resource manager. It uses BUS_GET_RMAN() * to obtain the resource manager. */ struct resource * bus_generic_rman_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *r; struct rman *rm; rm = BUS_GET_RMAN(dev, type, flags); if (rm == NULL) return (NULL); r = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE, child); if (r == NULL) return (NULL); rman_set_rid(r, *rid); rman_set_type(r, type); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, r) != 0) { rman_release_resource(r); return (NULL); } } return (r); } /** * @brief Helper function for implementing BUS_ADJUST_RESOURCE(). * * This implementation of BUS_ADJUST_RESOURCE() adjusts resources only * if they were allocated from the resource manager returned by * BUS_GET_RMAN(). */ int bus_generic_rman_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct rman *rm; rm = BUS_GET_RMAN(dev, rman_get_type(r), rman_get_flags(r)); if (rm == NULL) return (ENXIO); if (!rman_is_region_manager(r, rm)) return (EINVAL); return (rman_adjust_resource(r, start, end)); } /** * @brief Helper function for implementing BUS_RELEASE_RESOURCE(). * * This implementation of BUS_RELEASE_RESOURCE() releases resources * allocated by bus_generic_rman_alloc_resource. */ int bus_generic_rman_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { #ifdef INVARIANTS struct rman *rm; #endif int error; #ifdef INVARIANTS rm = BUS_GET_RMAN(dev, type, rman_get_flags(r)); KASSERT(rman_is_region_manager(r, rm), ("%s: rman %p doesn't match for resource %p", __func__, rm, r)); #endif if (rman_get_flags(r) & RF_ACTIVE) { error = bus_deactivate_resource(child, type, rid, r); if (error != 0) return (error); } return (rman_release_resource(r)); } /** * @brief Helper function for implementing BUS_ACTIVATE_RESOURCE(). * * This implementation of BUS_ACTIVATE_RESOURCE() activates resources * allocated by bus_generic_rman_alloc_resource. */ int -bus_generic_rman_activate_resource(device_t dev, device_t child, int type, - int rid, struct resource *r) +bus_generic_rman_activate_resource(device_t dev, device_t child, + struct resource *r) { struct resource_map map; #ifdef INVARIANTS struct rman *rm; #endif - int error; + int error, type; + type = rman_get_type(r); #ifdef INVARIANTS rm = BUS_GET_RMAN(dev, type, rman_get_flags(r)); KASSERT(rman_is_region_manager(r, rm), ("%s: rman %p doesn't match for resource %p", __func__, rm, r)); #endif error = rman_activate_resource(r); if (error != 0) return (error); if ((rman_get_flags(r) & RF_UNMAPPED) == 0 && (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT)) { error = BUS_MAP_RESOURCE(dev, child, r, NULL, &map); if (error != 0) { rman_deactivate_resource(r); return (error); } rman_set_mapping(r, &map); } return (0); } /** * @brief Helper function for implementing BUS_DEACTIVATE_RESOURCE(). * * This implementation of BUS_DEACTIVATE_RESOURCE() deactivates * resources allocated by bus_generic_rman_alloc_resource. */ int -bus_generic_rman_deactivate_resource(device_t dev, device_t child, int type, - int rid, struct resource *r) +bus_generic_rman_deactivate_resource(device_t dev, device_t child, + struct resource *r) { struct resource_map map; #ifdef INVARIANTS struct rman *rm; #endif - int error; + int error, type; + type = rman_get_type(r); #ifdef INVARIANTS rm = BUS_GET_RMAN(dev, type, rman_get_flags(r)); KASSERT(rman_is_region_manager(r, rm), ("%s: rman %p doesn't match for resource %p", __func__, rm, r)); #endif error = rman_deactivate_resource(r); if (error != 0) return (error); if ((rman_get_flags(r) & RF_UNMAPPED) == 0 && (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT)) { rman_get_mapping(r, &map); BUS_UNMAP_RESOURCE(dev, child, r, &map); } return (0); } /** * @brief Helper function for implementing BUS_CHILD_PRESENT(). * * This simple implementation of BUS_CHILD_PRESENT() simply calls the * BUS_CHILD_PRESENT() method of the parent of @p dev. */ int bus_generic_child_present(device_t dev, device_t child) { return (BUS_CHILD_PRESENT(device_get_parent(dev), dev)); } /** * @brief Helper function for implementing BUS_GET_DOMAIN(). * * This simple implementation of BUS_GET_DOMAIN() calls the * BUS_GET_DOMAIN() method of the parent of @p dev. If @p dev * does not have a parent, the function fails with ENOENT. */ int bus_generic_get_domain(device_t dev, device_t child, int *domain) { if (dev->parent) return (BUS_GET_DOMAIN(dev->parent, dev, domain)); return (ENOENT); } /** * @brief Helper function to implement normal BUS_GET_DEVICE_PATH() * * This function knows how to (a) pass the request up the tree if there's * a parent and (b) Knows how to supply a FreeBSD locator. * * @param bus bus in the walk up the tree * @param child leaf node to print information about * @param locator BUS_LOCATOR_xxx string for locator * @param sb Buffer to print information into */ int bus_generic_get_device_path(device_t bus, device_t child, const char *locator, struct sbuf *sb) { int rv = 0; device_t parent; /* * We don't recurse on ACPI since either we know the handle for the * device or we don't. And if we're in the generic routine, we don't * have a ACPI override. All other locators build up a path by having * their parents create a path and then adding the path element for this * node. That's why we recurse with parent, bus rather than the typical * parent, child: each spot in the tree is independent of what our child * will do with this path. */ parent = device_get_parent(bus); if (parent != NULL && strcmp(locator, BUS_LOCATOR_ACPI) != 0) { rv = BUS_GET_DEVICE_PATH(parent, bus, locator, sb); } if (strcmp(locator, BUS_LOCATOR_FREEBSD) == 0) { if (rv == 0) { sbuf_printf(sb, "/%s", device_get_nameunit(child)); } return (rv); } /* * Don't know what to do. So assume we do nothing. Not sure that's * the right thing, but keeps us from having a big list here. */ return (0); } /** * @brief Helper function for implementing BUS_RESCAN(). * * This null implementation of BUS_RESCAN() always fails to indicate * the bus does not support rescanning. */ int bus_null_rescan(device_t dev) { return (ENODEV); } /* * Some convenience functions to make it easier for drivers to use the * resource-management functions. All these really do is hide the * indirection through the parent's method table, making for slightly * less-wordy code. In the future, it might make sense for this code * to maintain some sort of a list of resources allocated by each device. */ int bus_alloc_resources(device_t dev, struct resource_spec *rs, struct resource **res) { int i; for (i = 0; rs[i].type != -1; i++) res[i] = NULL; for (i = 0; rs[i].type != -1; i++) { res[i] = bus_alloc_resource_any(dev, rs[i].type, &rs[i].rid, rs[i].flags); if (res[i] == NULL && !(rs[i].flags & RF_OPTIONAL)) { bus_release_resources(dev, rs, res); return (ENXIO); } } return (0); } void bus_release_resources(device_t dev, const struct resource_spec *rs, struct resource **res) { int i; for (i = 0; rs[i].type != -1; i++) if (res[i] != NULL) { bus_release_resource( dev, rs[i].type, rs[i].rid, res[i]); res[i] = NULL; } } /** * @brief Wrapper function for BUS_ALLOC_RESOURCE(). * * This function simply calls the BUS_ALLOC_RESOURCE() method of the * parent of @p dev. */ struct resource * bus_alloc_resource(device_t dev, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; if (dev->parent == NULL) return (NULL); res = BUS_ALLOC_RESOURCE(dev->parent, dev, type, rid, start, end, count, flags); return (res); } /** * @brief Wrapper function for BUS_ADJUST_RESOURCE(). * * This function simply calls the BUS_ADJUST_RESOURCE() method of the * parent of @p dev. */ int bus_adjust_resource(device_t dev, struct resource *r, rman_res_t start, rman_res_t end) { if (dev->parent == NULL) return (EINVAL); return (BUS_ADJUST_RESOURCE(dev->parent, dev, r, start, end)); } int bus_adjust_resource_old(device_t dev, int type __unused, struct resource *r, rman_res_t start, rman_res_t end) { return (bus_adjust_resource(dev, r, start, end)); } /** * @brief Wrapper function for BUS_TRANSLATE_RESOURCE(). * * This function simply calls the BUS_TRANSLATE_RESOURCE() method of the * parent of @p dev. */ int bus_translate_resource(device_t dev, int type, rman_res_t start, rman_res_t *newstart) { if (dev->parent == NULL) return (EINVAL); return (BUS_TRANSLATE_RESOURCE(dev->parent, type, start, newstart)); } /** * @brief Wrapper function for BUS_ACTIVATE_RESOURCE(). * * This function simply calls the BUS_ACTIVATE_RESOURCE() method of the * parent of @p dev. */ int -bus_activate_resource(device_t dev, int type, int rid, struct resource *r) +bus_activate_resource(device_t dev, struct resource *r) { if (dev->parent == NULL) return (EINVAL); - return (BUS_ACTIVATE_RESOURCE(dev->parent, dev, type, rid, r)); + return (BUS_ACTIVATE_RESOURCE(dev->parent, dev, r)); } int -bus_activate_resource_new(device_t dev, struct resource *r) +bus_activate_resource_old(device_t dev, int type, int rid, struct resource *r) { - return (bus_activate_resource(dev, rman_get_type(r), rman_get_rid(r), - r)); + return (bus_activate_resource(dev, r)); } /** * @brief Wrapper function for BUS_DEACTIVATE_RESOURCE(). * * This function simply calls the BUS_DEACTIVATE_RESOURCE() method of the * parent of @p dev. */ int -bus_deactivate_resource(device_t dev, int type, int rid, struct resource *r) +bus_deactivate_resource(device_t dev, struct resource *r) { if (dev->parent == NULL) return (EINVAL); - return (BUS_DEACTIVATE_RESOURCE(dev->parent, dev, type, rid, r)); + return (BUS_DEACTIVATE_RESOURCE(dev->parent, dev, r)); } int -bus_deactivate_resource_new(device_t dev, struct resource *r) +bus_deactivate_resource_old(device_t dev, int type, int rid, struct resource *r) { - return (bus_deactivate_resource(dev, rman_get_type(r), rman_get_rid(r), - r)); + return (bus_deactivate_resource(dev, r)); } /** * @brief Wrapper function for BUS_MAP_RESOURCE(). * * This function simply calls the BUS_MAP_RESOURCE() method of the * parent of @p dev. */ int bus_map_resource(device_t dev, struct resource *r, struct resource_map_request *args, struct resource_map *map) { if (dev->parent == NULL) return (EINVAL); return (BUS_MAP_RESOURCE(dev->parent, dev, r, args, map)); } int bus_map_resource_old(device_t dev, int type, struct resource *r, struct resource_map_request *args, struct resource_map *map) { return (bus_map_resource(dev, r, args, map)); } /** * @brief Wrapper function for BUS_UNMAP_RESOURCE(). * * This function simply calls the BUS_UNMAP_RESOURCE() method of the * parent of @p dev. */ int bus_unmap_resource(device_t dev, struct resource *r, struct resource_map *map) { if (dev->parent == NULL) return (EINVAL); return (BUS_UNMAP_RESOURCE(dev->parent, dev, r, map)); } int bus_unmap_resource_old(device_t dev, int type, struct resource *r, struct resource_map *map) { return (bus_unmap_resource(dev, r, map)); } /** * @brief Wrapper function for BUS_RELEASE_RESOURCE(). * * This function simply calls the BUS_RELEASE_RESOURCE() method of the * parent of @p dev. */ int bus_release_resource(device_t dev, int type, int rid, struct resource *r) { int rv; if (dev->parent == NULL) return (EINVAL); rv = BUS_RELEASE_RESOURCE(dev->parent, dev, type, rid, r); return (rv); } int bus_release_resource_new(device_t dev, struct resource *r) { return (bus_release_resource(dev, rman_get_type(r), rman_get_rid(r), r)); } /** * @brief Wrapper function for BUS_SETUP_INTR(). * * This function simply calls the BUS_SETUP_INTR() method of the * parent of @p dev. */ int bus_setup_intr(device_t dev, struct resource *r, int flags, driver_filter_t filter, driver_intr_t handler, void *arg, void **cookiep) { int error; if (dev->parent == NULL) return (EINVAL); error = BUS_SETUP_INTR(dev->parent, dev, r, flags, filter, handler, arg, cookiep); if (error != 0) return (error); if (handler != NULL && !(flags & INTR_MPSAFE)) device_printf(dev, "[GIANT-LOCKED]\n"); return (0); } /** * @brief Wrapper function for BUS_TEARDOWN_INTR(). * * This function simply calls the BUS_TEARDOWN_INTR() method of the * parent of @p dev. */ int bus_teardown_intr(device_t dev, struct resource *r, void *cookie) { if (dev->parent == NULL) return (EINVAL); return (BUS_TEARDOWN_INTR(dev->parent, dev, r, cookie)); } /** * @brief Wrapper function for BUS_SUSPEND_INTR(). * * This function simply calls the BUS_SUSPEND_INTR() method of the * parent of @p dev. */ int bus_suspend_intr(device_t dev, struct resource *r) { if (dev->parent == NULL) return (EINVAL); return (BUS_SUSPEND_INTR(dev->parent, dev, r)); } /** * @brief Wrapper function for BUS_RESUME_INTR(). * * This function simply calls the BUS_RESUME_INTR() method of the * parent of @p dev. */ int bus_resume_intr(device_t dev, struct resource *r) { if (dev->parent == NULL) return (EINVAL); return (BUS_RESUME_INTR(dev->parent, dev, r)); } /** * @brief Wrapper function for BUS_BIND_INTR(). * * This function simply calls the BUS_BIND_INTR() method of the * parent of @p dev. */ int bus_bind_intr(device_t dev, struct resource *r, int cpu) { if (dev->parent == NULL) return (EINVAL); return (BUS_BIND_INTR(dev->parent, dev, r, cpu)); } /** * @brief Wrapper function for BUS_DESCRIBE_INTR(). * * This function first formats the requested description into a * temporary buffer and then calls the BUS_DESCRIBE_INTR() method of * the parent of @p dev. */ int bus_describe_intr(device_t dev, struct resource *irq, void *cookie, const char *fmt, ...) { va_list ap; char descr[MAXCOMLEN + 1]; if (dev->parent == NULL) return (EINVAL); va_start(ap, fmt); vsnprintf(descr, sizeof(descr), fmt, ap); va_end(ap); return (BUS_DESCRIBE_INTR(dev->parent, dev, irq, cookie, descr)); } /** * @brief Wrapper function for BUS_SET_RESOURCE(). * * This function simply calls the BUS_SET_RESOURCE() method of the * parent of @p dev. */ int bus_set_resource(device_t dev, int type, int rid, rman_res_t start, rman_res_t count) { return (BUS_SET_RESOURCE(device_get_parent(dev), dev, type, rid, start, count)); } /** * @brief Wrapper function for BUS_GET_RESOURCE(). * * This function simply calls the BUS_GET_RESOURCE() method of the * parent of @p dev. */ int bus_get_resource(device_t dev, int type, int rid, rman_res_t *startp, rman_res_t *countp) { return (BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid, startp, countp)); } /** * @brief Wrapper function for BUS_GET_RESOURCE(). * * This function simply calls the BUS_GET_RESOURCE() method of the * parent of @p dev and returns the start value. */ rman_res_t bus_get_resource_start(device_t dev, int type, int rid) { rman_res_t start; rman_res_t count; int error; error = BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid, &start, &count); if (error) return (0); return (start); } /** * @brief Wrapper function for BUS_GET_RESOURCE(). * * This function simply calls the BUS_GET_RESOURCE() method of the * parent of @p dev and returns the count value. */ rman_res_t bus_get_resource_count(device_t dev, int type, int rid) { rman_res_t start; rman_res_t count; int error; error = BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid, &start, &count); if (error) return (0); return (count); } /** * @brief Wrapper function for BUS_DELETE_RESOURCE(). * * This function simply calls the BUS_DELETE_RESOURCE() method of the * parent of @p dev. */ void bus_delete_resource(device_t dev, int type, int rid) { BUS_DELETE_RESOURCE(device_get_parent(dev), dev, type, rid); } /** * @brief Wrapper function for BUS_CHILD_PRESENT(). * * This function simply calls the BUS_CHILD_PRESENT() method of the * parent of @p dev. */ int bus_child_present(device_t child) { return (BUS_CHILD_PRESENT(device_get_parent(child), child)); } /** * @brief Wrapper function for BUS_CHILD_PNPINFO(). * * This function simply calls the BUS_CHILD_PNPINFO() method of the parent of @p * dev. */ int bus_child_pnpinfo(device_t child, struct sbuf *sb) { device_t parent; parent = device_get_parent(child); if (parent == NULL) return (0); return (BUS_CHILD_PNPINFO(parent, child, sb)); } /** * @brief Generic implementation that does nothing for bus_child_pnpinfo * * This function has the right signature and returns 0 since the sbuf is passed * to us to append to. */ int bus_generic_child_pnpinfo(device_t dev, device_t child, struct sbuf *sb) { return (0); } /** * @brief Wrapper function for BUS_CHILD_LOCATION(). * * This function simply calls the BUS_CHILD_LOCATION() method of the parent of * @p dev. */ int bus_child_location(device_t child, struct sbuf *sb) { device_t parent; parent = device_get_parent(child); if (parent == NULL) return (0); return (BUS_CHILD_LOCATION(parent, child, sb)); } /** * @brief Generic implementation that does nothing for bus_child_location * * This function has the right signature and returns 0 since the sbuf is passed * to us to append to. */ int bus_generic_child_location(device_t dev, device_t child, struct sbuf *sb) { return (0); } /** * @brief Wrapper function for BUS_GET_CPUS(). * * This function simply calls the BUS_GET_CPUS() method of the * parent of @p dev. */ int bus_get_cpus(device_t dev, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { device_t parent; parent = device_get_parent(dev); if (parent == NULL) return (EINVAL); return (BUS_GET_CPUS(parent, dev, op, setsize, cpuset)); } /** * @brief Wrapper function for BUS_GET_DMA_TAG(). * * This function simply calls the BUS_GET_DMA_TAG() method of the * parent of @p dev. */ bus_dma_tag_t bus_get_dma_tag(device_t dev) { device_t parent; parent = device_get_parent(dev); if (parent == NULL) return (NULL); return (BUS_GET_DMA_TAG(parent, dev)); } /** * @brief Wrapper function for BUS_GET_BUS_TAG(). * * This function simply calls the BUS_GET_BUS_TAG() method of the * parent of @p dev. */ bus_space_tag_t bus_get_bus_tag(device_t dev) { device_t parent; parent = device_get_parent(dev); if (parent == NULL) return ((bus_space_tag_t)0); return (BUS_GET_BUS_TAG(parent, dev)); } /** * @brief Wrapper function for BUS_GET_DOMAIN(). * * This function simply calls the BUS_GET_DOMAIN() method of the * parent of @p dev. */ int bus_get_domain(device_t dev, int *domain) { return (BUS_GET_DOMAIN(device_get_parent(dev), dev, domain)); } /* Resume all devices and then notify userland that we're up again. */ static int root_resume(device_t dev) { int error; error = bus_generic_resume(dev); if (error == 0) { devctl_notify("kernel", "power", "resume", NULL); } return (error); } static int root_print_child(device_t dev, device_t child) { int retval = 0; retval += bus_print_child_header(dev, child); retval += printf("\n"); return (retval); } static int root_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { /* * If an interrupt mapping gets to here something bad has happened. */ panic("root_setup_intr"); } /* * If we get here, assume that the device is permanent and really is * present in the system. Removable bus drivers are expected to intercept * this call long before it gets here. We return -1 so that drivers that * really care can check vs -1 or some ERRNO returned higher in the food * chain. */ static int root_child_present(device_t dev, device_t child) { return (-1); } static int root_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { switch (op) { case INTR_CPUS: /* Default to returning the set of all CPUs. */ if (setsize != sizeof(cpuset_t)) return (EINVAL); *cpuset = all_cpus; return (0); default: return (EINVAL); } } static kobj_method_t root_methods[] = { /* Device interface */ KOBJMETHOD(device_shutdown, bus_generic_shutdown), KOBJMETHOD(device_suspend, bus_generic_suspend), KOBJMETHOD(device_resume, root_resume), /* Bus interface */ KOBJMETHOD(bus_print_child, root_print_child), KOBJMETHOD(bus_read_ivar, bus_generic_read_ivar), KOBJMETHOD(bus_write_ivar, bus_generic_write_ivar), KOBJMETHOD(bus_setup_intr, root_setup_intr), KOBJMETHOD(bus_child_present, root_child_present), KOBJMETHOD(bus_get_cpus, root_get_cpus), KOBJMETHOD_END }; static driver_t root_driver = { "root", root_methods, 1, /* no softc */ }; device_t root_bus; devclass_t root_devclass; static int root_bus_module_handler(module_t mod, int what, void* arg) { switch (what) { case MOD_LOAD: TAILQ_INIT(&bus_data_devices); kobj_class_compile((kobj_class_t) &root_driver); root_bus = make_device(NULL, "root", 0); root_bus->desc = "System root bus"; kobj_init((kobj_t) root_bus, (kobj_class_t) &root_driver); root_bus->driver = &root_driver; root_bus->state = DS_ATTACHED; root_devclass = devclass_find_internal("root", NULL, FALSE); devctl2_init(); return (0); case MOD_SHUTDOWN: device_shutdown(root_bus); return (0); default: return (EOPNOTSUPP); } return (0); } static moduledata_t root_bus_mod = { "rootbus", root_bus_module_handler, NULL }; DECLARE_MODULE(rootbus, root_bus_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); /** * @brief Automatically configure devices * * This function begins the autoconfiguration process by calling * device_probe_and_attach() for each child of the @c root0 device. */ void root_bus_configure(void) { PDEBUG((".")); /* Eventually this will be split up, but this is sufficient for now. */ bus_set_pass(BUS_PASS_DEFAULT); } /** * @brief Module handler for registering device drivers * * This module handler is used to automatically register device * drivers when modules are loaded. If @p what is MOD_LOAD, it calls * devclass_add_driver() for the driver described by the * driver_module_data structure pointed to by @p arg */ int driver_module_handler(module_t mod, int what, void *arg) { struct driver_module_data *dmd; devclass_t bus_devclass; kobj_class_t driver; int error, pass; dmd = (struct driver_module_data *)arg; bus_devclass = devclass_find_internal(dmd->dmd_busname, NULL, TRUE); error = 0; switch (what) { case MOD_LOAD: if (dmd->dmd_chainevh) error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg); pass = dmd->dmd_pass; driver = dmd->dmd_driver; PDEBUG(("Loading module: driver %s on bus %s (pass %d)", DRIVERNAME(driver), dmd->dmd_busname, pass)); error = devclass_add_driver(bus_devclass, driver, pass, dmd->dmd_devclass); break; case MOD_UNLOAD: PDEBUG(("Unloading module: driver %s from bus %s", DRIVERNAME(dmd->dmd_driver), dmd->dmd_busname)); error = devclass_delete_driver(bus_devclass, dmd->dmd_driver); if (!error && dmd->dmd_chainevh) error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg); break; case MOD_QUIESCE: PDEBUG(("Quiesce module: driver %s from bus %s", DRIVERNAME(dmd->dmd_driver), dmd->dmd_busname)); error = devclass_quiesce_driver(bus_devclass, dmd->dmd_driver); if (!error && dmd->dmd_chainevh) error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg); break; default: error = EOPNOTSUPP; break; } return (error); } /** * @brief Enumerate all hinted devices for this bus. * * Walks through the hints for this bus and calls the bus_hinted_child * routine for each one it fines. It searches first for the specific * bus that's being probed for hinted children (eg isa0), and then for * generic children (eg isa). * * @param dev bus device to enumerate */ void bus_enumerate_hinted_children(device_t bus) { int i; const char *dname, *busname; int dunit; /* * enumerate all devices on the specific bus */ busname = device_get_nameunit(bus); i = 0; while (resource_find_match(&i, &dname, &dunit, "at", busname) == 0) BUS_HINTED_CHILD(bus, dname, dunit); /* * and all the generic ones. */ busname = device_get_name(bus); i = 0; while (resource_find_match(&i, &dname, &dunit, "at", busname) == 0) BUS_HINTED_CHILD(bus, dname, dunit); } #ifdef BUS_DEBUG /* the _short versions avoid iteration by not calling anything that prints * more than oneliners. I love oneliners. */ static void print_device_short(device_t dev, int indent) { if (!dev) return; indentprintf(("device %d: <%s> %sparent,%schildren,%s%s%s%s%s,%sivars,%ssoftc,busy=%d\n", dev->unit, dev->desc, (dev->parent? "":"no "), (TAILQ_EMPTY(&dev->children)? "no ":""), (dev->flags&DF_ENABLED? "enabled,":"disabled,"), (dev->flags&DF_FIXEDCLASS? "fixed,":""), (dev->flags&DF_WILDCARD? "wildcard,":""), (dev->flags&DF_DESCMALLOCED? "descmalloced,":""), (dev->flags&DF_SUSPENDED? "suspended,":""), (dev->ivars? "":"no "), (dev->softc? "":"no "), dev->busy)); } static void print_device(device_t dev, int indent) { if (!dev) return; print_device_short(dev, indent); indentprintf(("Parent:\n")); print_device_short(dev->parent, indent+1); indentprintf(("Driver:\n")); print_driver_short(dev->driver, indent+1); indentprintf(("Devclass:\n")); print_devclass_short(dev->devclass, indent+1); } void print_device_tree_short(device_t dev, int indent) /* print the device and all its children (indented) */ { device_t child; if (!dev) return; print_device_short(dev, indent); TAILQ_FOREACH(child, &dev->children, link) { print_device_tree_short(child, indent+1); } } void print_device_tree(device_t dev, int indent) /* print the device and all its children (indented) */ { device_t child; if (!dev) return; print_device(dev, indent); TAILQ_FOREACH(child, &dev->children, link) { print_device_tree(child, indent+1); } } static void print_driver_short(driver_t *driver, int indent) { if (!driver) return; indentprintf(("driver %s: softc size = %zd\n", driver->name, driver->size)); } static void print_driver(driver_t *driver, int indent) { if (!driver) return; print_driver_short(driver, indent); } static void print_driver_list(driver_list_t drivers, int indent) { driverlink_t driver; TAILQ_FOREACH(driver, &drivers, link) { print_driver(driver->driver, indent); } } static void print_devclass_short(devclass_t dc, int indent) { if ( !dc ) return; indentprintf(("devclass %s: max units = %d\n", dc->name, dc->maxunit)); } static void print_devclass(devclass_t dc, int indent) { int i; if ( !dc ) return; print_devclass_short(dc, indent); indentprintf(("Drivers:\n")); print_driver_list(dc->drivers, indent+1); indentprintf(("Devices:\n")); for (i = 0; i < dc->maxunit; i++) if (dc->devices[i]) print_device(dc->devices[i], indent+1); } void print_devclass_list_short(void) { devclass_t dc; printf("Short listing of devclasses, drivers & devices:\n"); TAILQ_FOREACH(dc, &devclasses, link) { print_devclass_short(dc, 0); } } void print_devclass_list(void) { devclass_t dc; printf("Full listing of devclasses, drivers & devices:\n"); TAILQ_FOREACH(dc, &devclasses, link) { print_devclass(dc, 0); } } #endif /* * User-space access to the device tree. * * We implement a small set of nodes: * * hw.bus Single integer read method to obtain the * current generation count. * hw.bus.devices Reads the entire device tree in flat space. * hw.bus.rman Resource manager interface * * We might like to add the ability to scan devclasses and/or drivers to * determine what else is currently loaded/available. */ static int sysctl_bus_info(SYSCTL_HANDLER_ARGS) { struct u_businfo ubus; ubus.ub_version = BUS_USER_VERSION; ubus.ub_generation = bus_data_generation; return (SYSCTL_OUT(req, &ubus, sizeof(ubus))); } SYSCTL_PROC(_hw_bus, OID_AUTO, info, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, sysctl_bus_info, "S,u_businfo", "bus-related data"); static int sysctl_devices(SYSCTL_HANDLER_ARGS) { struct sbuf sb; int *name = (int *)arg1; u_int namelen = arg2; int index; device_t dev; struct u_device *udev; int error; if (namelen != 2) return (EINVAL); if (bus_data_generation_check(name[0])) return (EINVAL); index = name[1]; /* * Scan the list of devices, looking for the requested index. */ TAILQ_FOREACH(dev, &bus_data_devices, devlink) { if (index-- == 0) break; } if (dev == NULL) return (ENOENT); /* * Populate the return item, careful not to overflow the buffer. */ udev = malloc(sizeof(*udev), M_BUS, M_WAITOK | M_ZERO); if (udev == NULL) return (ENOMEM); udev->dv_handle = (uintptr_t)dev; udev->dv_parent = (uintptr_t)dev->parent; udev->dv_devflags = dev->devflags; udev->dv_flags = dev->flags; udev->dv_state = dev->state; sbuf_new(&sb, udev->dv_fields, sizeof(udev->dv_fields), SBUF_FIXEDLEN); if (dev->nameunit != NULL) sbuf_cat(&sb, dev->nameunit); sbuf_putc(&sb, '\0'); if (dev->desc != NULL) sbuf_cat(&sb, dev->desc); sbuf_putc(&sb, '\0'); if (dev->driver != NULL) sbuf_cat(&sb, dev->driver->name); sbuf_putc(&sb, '\0'); bus_child_pnpinfo(dev, &sb); sbuf_putc(&sb, '\0'); bus_child_location(dev, &sb); sbuf_putc(&sb, '\0'); error = sbuf_finish(&sb); if (error == 0) error = SYSCTL_OUT(req, udev, sizeof(*udev)); sbuf_delete(&sb); free(udev, M_BUS); return (error); } SYSCTL_NODE(_hw_bus, OID_AUTO, devices, CTLFLAG_RD | CTLFLAG_NEEDGIANT, sysctl_devices, "system device tree"); int bus_data_generation_check(int generation) { if (generation != bus_data_generation) return (1); /* XXX generate optimised lists here? */ return (0); } void bus_data_generation_update(void) { atomic_add_int(&bus_data_generation, 1); } int bus_free_resource(device_t dev, int type, struct resource *r) { if (r == NULL) return (0); return (bus_release_resource(dev, type, rman_get_rid(r), r)); } device_t device_lookup_by_name(const char *name) { device_t dev; TAILQ_FOREACH(dev, &bus_data_devices, devlink) { if (dev->nameunit != NULL && strcmp(dev->nameunit, name) == 0) return (dev); } return (NULL); } /* * /dev/devctl2 implementation. The existing /dev/devctl device has * implicit semantics on open, so it could not be reused for this. * Another option would be to call this /dev/bus? */ static int find_device(struct devreq *req, device_t *devp) { device_t dev; /* * First, ensure that the name is nul terminated. */ if (memchr(req->dr_name, '\0', sizeof(req->dr_name)) == NULL) return (EINVAL); /* * Second, try to find an attached device whose name matches * 'name'. */ dev = device_lookup_by_name(req->dr_name); if (dev != NULL) { *devp = dev; return (0); } /* Finally, give device enumerators a chance. */ dev = NULL; EVENTHANDLER_DIRECT_INVOKE(dev_lookup, req->dr_name, &dev); if (dev == NULL) return (ENOENT); *devp = dev; return (0); } static bool driver_exists(device_t bus, const char *driver) { devclass_t dc; for (dc = bus->devclass; dc != NULL; dc = dc->parent) { if (devclass_find_driver_internal(dc, driver) != NULL) return (true); } return (false); } static void device_gen_nomatch(device_t dev) { device_t child; if (dev->flags & DF_NEEDNOMATCH && dev->state == DS_NOTPRESENT) { device_handle_nomatch(dev); } dev->flags &= ~DF_NEEDNOMATCH; TAILQ_FOREACH(child, &dev->children, link) { device_gen_nomatch(child); } } static void device_do_deferred_actions(void) { devclass_t dc; driverlink_t dl; /* * Walk through the devclasses to find all the drivers we've tagged as * deferred during the freeze and call the driver added routines. They * have already been added to the lists in the background, so the driver * added routines that trigger a probe will have all the right bidders * for the probe auction. */ TAILQ_FOREACH(dc, &devclasses, link) { TAILQ_FOREACH(dl, &dc->drivers, link) { if (dl->flags & DL_DEFERRED_PROBE) { devclass_driver_added(dc, dl->driver); dl->flags &= ~DL_DEFERRED_PROBE; } } } /* * We also defer no-match events during a freeze. Walk the tree and * generate all the pent-up events that are still relevant. */ device_gen_nomatch(root_bus); bus_data_generation_update(); } static int device_get_path(device_t dev, const char *locator, struct sbuf *sb) { device_t parent; int error; KASSERT(sb != NULL, ("sb is NULL")); parent = device_get_parent(dev); if (parent == NULL) { error = sbuf_putc(sb, '/'); } else { error = BUS_GET_DEVICE_PATH(parent, dev, locator, sb); if (error == 0) { error = sbuf_error(sb); if (error == 0 && sbuf_len(sb) <= 1) error = EIO; } } sbuf_finish(sb); return (error); } static int devctl2_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct devreq *req; device_t dev; int error, old; /* Locate the device to control. */ bus_topo_lock(); req = (struct devreq *)data; switch (cmd) { case DEV_ATTACH: case DEV_DETACH: case DEV_ENABLE: case DEV_DISABLE: case DEV_SUSPEND: case DEV_RESUME: case DEV_SET_DRIVER: case DEV_CLEAR_DRIVER: case DEV_RESCAN: case DEV_DELETE: case DEV_RESET: error = priv_check(td, PRIV_DRIVER); if (error == 0) error = find_device(req, &dev); break; case DEV_FREEZE: case DEV_THAW: error = priv_check(td, PRIV_DRIVER); break; case DEV_GET_PATH: error = find_device(req, &dev); break; default: error = ENOTTY; break; } if (error) { bus_topo_unlock(); return (error); } /* Perform the requested operation. */ switch (cmd) { case DEV_ATTACH: if (device_is_attached(dev)) error = EBUSY; else if (!device_is_enabled(dev)) error = ENXIO; else error = device_probe_and_attach(dev); break; case DEV_DETACH: if (!device_is_attached(dev)) { error = ENXIO; break; } if (!(req->dr_flags & DEVF_FORCE_DETACH)) { error = device_quiesce(dev); if (error) break; } error = device_detach(dev); break; case DEV_ENABLE: if (device_is_enabled(dev)) { error = EBUSY; break; } /* * If the device has been probed but not attached (e.g. * when it has been disabled by a loader hint), just * attach the device rather than doing a full probe. */ device_enable(dev); if (device_is_alive(dev)) { /* * If the device was disabled via a hint, clear * the hint. */ if (resource_disabled(dev->driver->name, dev->unit)) resource_unset_value(dev->driver->name, dev->unit, "disabled"); error = device_attach(dev); } else error = device_probe_and_attach(dev); break; case DEV_DISABLE: if (!device_is_enabled(dev)) { error = ENXIO; break; } if (!(req->dr_flags & DEVF_FORCE_DETACH)) { error = device_quiesce(dev); if (error) break; } /* * Force DF_FIXEDCLASS on around detach to preserve * the existing name. */ old = dev->flags; dev->flags |= DF_FIXEDCLASS; error = device_detach(dev); if (!(old & DF_FIXEDCLASS)) dev->flags &= ~DF_FIXEDCLASS; if (error == 0) device_disable(dev); break; case DEV_SUSPEND: if (device_is_suspended(dev)) { error = EBUSY; break; } if (device_get_parent(dev) == NULL) { error = EINVAL; break; } error = BUS_SUSPEND_CHILD(device_get_parent(dev), dev); break; case DEV_RESUME: if (!device_is_suspended(dev)) { error = EINVAL; break; } if (device_get_parent(dev) == NULL) { error = EINVAL; break; } error = BUS_RESUME_CHILD(device_get_parent(dev), dev); break; case DEV_SET_DRIVER: { devclass_t dc; char driver[128]; error = copyinstr(req->dr_data, driver, sizeof(driver), NULL); if (error) break; if (driver[0] == '\0') { error = EINVAL; break; } if (dev->devclass != NULL && strcmp(driver, dev->devclass->name) == 0) /* XXX: Could possibly force DF_FIXEDCLASS on? */ break; /* * Scan drivers for this device's bus looking for at * least one matching driver. */ if (dev->parent == NULL) { error = EINVAL; break; } if (!driver_exists(dev->parent, driver)) { error = ENOENT; break; } dc = devclass_create(driver); if (dc == NULL) { error = ENOMEM; break; } /* Detach device if necessary. */ if (device_is_attached(dev)) { if (req->dr_flags & DEVF_SET_DRIVER_DETACH) error = device_detach(dev); else error = EBUSY; if (error) break; } /* Clear any previously-fixed device class and unit. */ if (dev->flags & DF_FIXEDCLASS) devclass_delete_device(dev->devclass, dev); dev->flags |= DF_WILDCARD; dev->unit = -1; /* Force the new device class. */ error = devclass_add_device(dc, dev); if (error) break; dev->flags |= DF_FIXEDCLASS; error = device_probe_and_attach(dev); break; } case DEV_CLEAR_DRIVER: if (!(dev->flags & DF_FIXEDCLASS)) { error = 0; break; } if (device_is_attached(dev)) { if (req->dr_flags & DEVF_CLEAR_DRIVER_DETACH) error = device_detach(dev); else error = EBUSY; if (error) break; } dev->flags &= ~DF_FIXEDCLASS; dev->flags |= DF_WILDCARD; devclass_delete_device(dev->devclass, dev); error = device_probe_and_attach(dev); break; case DEV_RESCAN: if (!device_is_attached(dev)) { error = ENXIO; break; } error = BUS_RESCAN(dev); break; case DEV_DELETE: { device_t parent; parent = device_get_parent(dev); if (parent == NULL) { error = EINVAL; break; } if (!(req->dr_flags & DEVF_FORCE_DELETE)) { if (bus_child_present(dev) != 0) { error = EBUSY; break; } } error = device_delete_child(parent, dev); break; } case DEV_FREEZE: if (device_frozen) error = EBUSY; else device_frozen = true; break; case DEV_THAW: if (!device_frozen) error = EBUSY; else { device_do_deferred_actions(); device_frozen = false; } break; case DEV_RESET: if ((req->dr_flags & ~(DEVF_RESET_DETACH)) != 0) { error = EINVAL; break; } error = BUS_RESET_CHILD(device_get_parent(dev), dev, req->dr_flags); break; case DEV_GET_PATH: { struct sbuf *sb; char locator[64]; ssize_t len; error = copyinstr(req->dr_buffer.buffer, locator, sizeof(locator), NULL); if (error != 0) break; sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND | SBUF_INCLUDENUL /* | SBUF_WAITOK */); error = device_get_path(dev, locator, sb); if (error == 0) { len = sbuf_len(sb); if (req->dr_buffer.length < len) { error = ENAMETOOLONG; } else { error = copyout(sbuf_data(sb), req->dr_buffer.buffer, len); } req->dr_buffer.length = len; } sbuf_delete(sb); break; } } bus_topo_unlock(); return (error); } static struct cdevsw devctl2_cdevsw = { .d_version = D_VERSION, .d_ioctl = devctl2_ioctl, .d_name = "devctl2", }; static void devctl2_init(void) { make_dev_credf(MAKEDEV_ETERNAL, &devctl2_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0644, "devctl2"); } /* * For maintaining device 'at' location info to avoid recomputing it */ struct device_location_node { const char *dln_locator; const char *dln_path; TAILQ_ENTRY(device_location_node) dln_link; }; typedef TAILQ_HEAD(device_location_list, device_location_node) device_location_list_t; struct device_location_cache { device_location_list_t dlc_list; }; /* * Location cache for wired devices. */ device_location_cache_t * dev_wired_cache_init(void) { device_location_cache_t *dcp; dcp = malloc(sizeof(*dcp), M_BUS, M_WAITOK | M_ZERO); TAILQ_INIT(&dcp->dlc_list); return (dcp); } void dev_wired_cache_fini(device_location_cache_t *dcp) { struct device_location_node *dln, *tdln; TAILQ_FOREACH_SAFE(dln, &dcp->dlc_list, dln_link, tdln) { free(dln, M_BUS); } free(dcp, M_BUS); } static struct device_location_node * dev_wired_cache_lookup(device_location_cache_t *dcp, const char *locator) { struct device_location_node *dln; TAILQ_FOREACH(dln, &dcp->dlc_list, dln_link) { if (strcmp(locator, dln->dln_locator) == 0) return (dln); } return (NULL); } static struct device_location_node * dev_wired_cache_add(device_location_cache_t *dcp, const char *locator, const char *path) { struct device_location_node *dln; size_t loclen, pathlen; loclen = strlen(locator) + 1; pathlen = strlen(path) + 1; dln = malloc(sizeof(*dln) + loclen + pathlen, M_BUS, M_WAITOK | M_ZERO); dln->dln_locator = (char *)(dln + 1); memcpy(__DECONST(char *, dln->dln_locator), locator, loclen); dln->dln_path = dln->dln_locator + loclen; memcpy(__DECONST(char *, dln->dln_path), path, pathlen); TAILQ_INSERT_HEAD(&dcp->dlc_list, dln, dln_link); return (dln); } bool dev_wired_cache_match(device_location_cache_t *dcp, device_t dev, const char *at) { struct sbuf *sb; const char *cp; char locator[32]; int error, len; struct device_location_node *res; cp = strchr(at, ':'); if (cp == NULL) return (false); len = cp - at; if (len > sizeof(locator) - 1) /* Skip too long locator */ return (false); memcpy(locator, at, len); locator[len] = '\0'; cp++; error = 0; /* maybe cache this inside device_t and look that up, but not yet */ res = dev_wired_cache_lookup(dcp, locator); if (res == NULL) { sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND | SBUF_INCLUDENUL | SBUF_NOWAIT); if (sb != NULL) { error = device_get_path(dev, locator, sb); if (error == 0) { res = dev_wired_cache_add(dcp, locator, sbuf_data(sb)); } sbuf_delete(sb); } } if (error != 0 || res == NULL || res->dln_path == NULL) return (false); return (strcmp(res->dln_path, cp) == 0); } /* * APIs to manage deprecation and obsolescence. */ static int obsolete_panic = 0; SYSCTL_INT(_debug, OID_AUTO, obsolete_panic, CTLFLAG_RWTUN, &obsolete_panic, 0, "Panic when obsolete features are used (0 = never, 1 = if obsolete, " "2 = if deprecated)"); static void gone_panic(int major, int running, const char *msg) { switch (obsolete_panic) { case 0: return; case 1: if (running < major) return; /* FALLTHROUGH */ default: panic("%s", msg); } } void _gone_in(int major, const char *msg) { gone_panic(major, P_OSREL_MAJOR(__FreeBSD_version), msg); if (P_OSREL_MAJOR(__FreeBSD_version) >= major) printf("Obsolete code will be removed soon: %s\n", msg); else printf("Deprecated code (to be removed in FreeBSD %d): %s\n", major, msg); } void _gone_in_dev(device_t dev, int major, const char *msg) { gone_panic(major, P_OSREL_MAJOR(__FreeBSD_version), msg); if (P_OSREL_MAJOR(__FreeBSD_version) >= major) device_printf(dev, "Obsolete code will be removed soon: %s\n", msg); else device_printf(dev, "Deprecated code (to be removed in FreeBSD %d): %s\n", major, msg); } #ifdef DDB DB_SHOW_COMMAND(device, db_show_device) { device_t dev; if (!have_addr) return; dev = (device_t)addr; db_printf("name: %s\n", device_get_nameunit(dev)); db_printf(" driver: %s\n", DRIVERNAME(dev->driver)); db_printf(" class: %s\n", DEVCLANAME(dev->devclass)); db_printf(" addr: %p\n", dev); db_printf(" parent: %p\n", dev->parent); db_printf(" softc: %p\n", dev->softc); db_printf(" ivars: %p\n", dev->ivars); } DB_SHOW_ALL_COMMAND(devices, db_show_all_devices) { device_t dev; TAILQ_FOREACH(dev, &bus_data_devices, devlink) { db_show_device((db_expr_t)dev, true, count, modif); } } #endif diff --git a/sys/powerpc/mpc85xx/lbc.c b/sys/powerpc/mpc85xx/lbc.c index 20f0baf8c395..6884df611494 100644 --- a/sys/powerpc/mpc85xx/lbc.c +++ b/sys/powerpc/mpc85xx/lbc.c @@ -1,939 +1,928 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2006-2008, Juniper Networks, Inc. * Copyright (c) 2008 Semihalf, Rafal Czubak * Copyright (c) 2009 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Semihalf * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ofw_bus_if.h" #include "lbc.h" #ifdef DEBUG #define debugf(fmt, args...) do { printf("%s(): ", __func__); \ printf(fmt,##args); } while (0) #else #define debugf(fmt, args...) #endif static MALLOC_DEFINE(M_LBC, "localbus", "localbus devices information"); static int lbc_probe(device_t); static int lbc_attach(device_t); static int lbc_shutdown(device_t); static int lbc_map_resource(device_t, device_t, struct resource *, struct resource_map_request *, struct resource_map *); static int lbc_unmap_resource(device_t, device_t, struct resource *, struct resource_map *map); static int lbc_activate_resource(device_t bus, device_t child, - int type, int rid, struct resource *r); -static int lbc_deactivate_resource(device_t bus, - device_t child, int type __unused, int rid, + struct resource *r); +static int lbc_deactivate_resource(device_t bus, device_t child, struct resource *r); static struct rman *lbc_get_rman(device_t, int, u_int); static struct resource *lbc_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int lbc_adjust_resource(device_t, device_t, struct resource *, rman_res_t, rman_res_t); static int lbc_print_child(device_t, device_t); static int lbc_release_resource(device_t, device_t, int, int, struct resource *); static const struct ofw_bus_devinfo *lbc_get_devinfo(device_t, device_t); /* * Bus interface definition */ static device_method_t lbc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, lbc_probe), DEVMETHOD(device_attach, lbc_attach), DEVMETHOD(device_shutdown, lbc_shutdown), /* Bus interface */ DEVMETHOD(bus_print_child, lbc_print_child), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, NULL), DEVMETHOD(bus_get_rman, lbc_get_rman), DEVMETHOD(bus_alloc_resource, lbc_alloc_resource), DEVMETHOD(bus_adjust_resource, lbc_adjust_resource), DEVMETHOD(bus_release_resource, lbc_release_resource), DEVMETHOD(bus_activate_resource, lbc_activate_resource), DEVMETHOD(bus_deactivate_resource, lbc_deactivate_resource), DEVMETHOD(bus_map_resource, lbc_map_resource), DEVMETHOD(bus_unmap_resource, lbc_unmap_resource), /* OFW bus interface */ DEVMETHOD(ofw_bus_get_devinfo, lbc_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), { 0, 0 } }; static driver_t lbc_driver = { "lbc", lbc_methods, sizeof(struct lbc_softc) }; EARLY_DRIVER_MODULE(lbc, ofwbus, lbc_driver, 0, 0, BUS_PASS_BUS); /* * Calculate address mask used by OR(n) registers. Use memory region size to * determine mask value. The size must be a power of two and within the range * of 32KB - 4GB. Otherwise error code is returned. Value representing * 4GB size can be passed as 0xffffffff. */ static uint32_t lbc_address_mask(uint32_t size) { int n = 15; if (size == ~0) return (0); while (n < 32) { if (size == (1U << n)) break; n++; } if (n == 32) return (EINVAL); return (0xffff8000 << (n - 15)); } static void lbc_banks_unmap(struct lbc_softc *sc) { int r; r = 0; while (r < LBC_DEV_MAX) { if (sc->sc_range[r].size == 0) return; pmap_unmapdev((void *)sc->sc_range[r].kva, sc->sc_range[r].size); law_disable(OCP85XX_TGTIF_LBC, sc->sc_range[r].addr, sc->sc_range[r].size); r++; } } static int lbc_banks_map(struct lbc_softc *sc) { vm_paddr_t end, start; vm_size_t size; u_int i, r, ranges, s; int error; bzero(sc->sc_range, sizeof(sc->sc_range)); /* * Determine number of discontiguous address ranges to program. */ ranges = 0; for (i = 0; i < LBC_DEV_MAX; i++) { size = sc->sc_banks[i].size; if (size == 0) continue; start = sc->sc_banks[i].addr; for (r = 0; r < ranges; r++) { /* Avoid wrap-around bugs. */ end = sc->sc_range[r].addr - 1 + sc->sc_range[r].size; if (start > 0 && end == start - 1) { sc->sc_range[r].size += size; break; } /* Avoid wrap-around bugs. */ end = start - 1 + size; if (sc->sc_range[r].addr > 0 && end == sc->sc_range[r].addr - 1) { sc->sc_range[r].addr = start; sc->sc_range[r].size += size; break; } } if (r == ranges) { /* New range; add using insertion sort */ r = 0; while (r < ranges && sc->sc_range[r].addr < start) r++; for (s = ranges; s > r; s--) sc->sc_range[s] = sc->sc_range[s-1]; sc->sc_range[r].addr = start; sc->sc_range[r].size = size; ranges++; } } /* * Ranges are sorted so quickly go over the list to merge ranges * that grew toward each other while building the ranges. */ r = 0; while (r < ranges - 1) { end = sc->sc_range[r].addr + sc->sc_range[r].size; if (end != sc->sc_range[r+1].addr) { r++; continue; } sc->sc_range[r].size += sc->sc_range[r+1].size; for (s = r + 1; s < ranges - 1; s++) sc->sc_range[s] = sc->sc_range[s+1]; bzero(&sc->sc_range[s], sizeof(sc->sc_range[s])); ranges--; } /* * Configure LAW for the LBC ranges and map the physical memory * range into KVA. */ for (r = 0; r < ranges; r++) { start = sc->sc_range[r].addr; size = sc->sc_range[r].size; error = law_enable(OCP85XX_TGTIF_LBC, start, size); if (error) return (error); sc->sc_range[r].kva = (vm_offset_t)pmap_mapdev(start, size); } /* XXX: need something better here? */ if (ranges == 0) return (EINVAL); /* Assign KVA to banks based on the enclosing range. */ for (i = 0; i < LBC_DEV_MAX; i++) { size = sc->sc_banks[i].size; if (size == 0) continue; start = sc->sc_banks[i].addr; for (r = 0; r < ranges; r++) { end = sc->sc_range[r].addr - 1 + sc->sc_range[r].size; if (start >= sc->sc_range[r].addr && start - 1 + size <= end) break; } if (r < ranges) { sc->sc_banks[i].kva = sc->sc_range[r].kva + (start - sc->sc_range[r].addr); } } return (0); } static int lbc_banks_enable(struct lbc_softc *sc) { uint32_t size; uint32_t regval; int error, i; for (i = 0; i < LBC_DEV_MAX; i++) { size = sc->sc_banks[i].size; if (size == 0) continue; /* * Compute and program BR value. */ regval = sc->sc_banks[i].addr; switch (sc->sc_banks[i].width) { case 8: regval |= (1 << 11); break; case 16: regval |= (2 << 11); break; case 32: regval |= (3 << 11); break; default: error = EINVAL; goto fail; } regval |= (sc->sc_banks[i].decc << 9); regval |= (sc->sc_banks[i].wp << 8); regval |= (sc->sc_banks[i].msel << 5); regval |= (sc->sc_banks[i].atom << 2); regval |= 1; bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_BR(i), regval); /* * Compute and program OR value. */ regval = lbc_address_mask(size); switch (sc->sc_banks[i].msel) { case LBCRES_MSEL_GPCM: /* TODO Add flag support for option registers */ regval |= 0x0ff7; break; case LBCRES_MSEL_FCM: /* TODO Add flag support for options register */ regval |= 0x0796; break; case LBCRES_MSEL_UPMA: case LBCRES_MSEL_UPMB: case LBCRES_MSEL_UPMC: printf("UPM mode not supported yet!"); error = ENOSYS; goto fail; } bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_OR(i), regval); } return (0); fail: lbc_banks_unmap(sc); return (error); } static void fdt_lbc_fixup(phandle_t node, struct lbc_softc *sc, struct lbc_devinfo *di) { pcell_t width; int bank; if (OF_getprop(node, "bank-width", (void *)&width, sizeof(width)) <= 0) return; bank = di->di_bank; if (sc->sc_banks[bank].size == 0) return; /* Express width in bits. */ sc->sc_banks[bank].width = width * 8; } static int fdt_lbc_reg_decode(phandle_t node, struct lbc_softc *sc, struct lbc_devinfo *di) { rman_res_t start, end, count; pcell_t *reg, *regptr; pcell_t addr_cells, size_cells; int tuple_size, tuples; int i, j, rv, bank; if (fdt_addrsize_cells(OF_parent(node), &addr_cells, &size_cells) != 0) return (ENXIO); tuple_size = sizeof(pcell_t) * (addr_cells + size_cells); tuples = OF_getencprop_alloc_multi(node, "reg", tuple_size, (void **)®); debugf("addr_cells = %d, size_cells = %d\n", addr_cells, size_cells); debugf("tuples = %d, tuple size = %d\n", tuples, tuple_size); if (tuples <= 0) /* No 'reg' property in this node. */ return (0); regptr = reg; for (i = 0; i < tuples; i++) { bank = fdt_data_get((void *)reg, 1); di->di_bank = bank; reg += 1; /* Get address/size. */ start = count = 0; for (j = 0; j < addr_cells - 1; j++) { start <<= 32; start |= reg[j]; } for (j = 0; j < size_cells; j++) { count <<= 32; count |= reg[addr_cells + j - 1]; } reg += addr_cells - 1 + size_cells; /* Calculate address range relative to VA base. */ start = sc->sc_banks[bank].kva + start; end = start + count - 1; debugf("reg addr bank = %d, start = %jx, end = %jx, " "count = %jx\n", bank, start, end, count); /* Use bank (CS) cell as rid. */ resource_list_add(&di->di_res, SYS_RES_MEMORY, bank, start, end, count); } rv = 0; OF_prop_free(regptr); return (rv); } static void lbc_intr(void *arg) { struct lbc_softc *sc = arg; uint32_t ltesr; ltesr = bus_space_read_4(sc->sc_bst, sc->sc_bsh, LBC85XX_LTESR); sc->sc_ltesr = ltesr; bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_LTESR, ltesr); wakeup(sc->sc_dev); } static int lbc_probe(device_t dev) { if (!(ofw_bus_is_compatible(dev, "fsl,lbc") || ofw_bus_is_compatible(dev, "fsl,elbc"))) return (ENXIO); device_set_desc(dev, "Freescale Local Bus Controller"); return (BUS_PROBE_DEFAULT); } static int lbc_attach(device_t dev) { struct lbc_softc *sc; struct lbc_devinfo *di; struct rman *rm; uintmax_t offset, size; vm_paddr_t start; device_t cdev; phandle_t node, child; pcell_t *ranges, *rangesptr; int tuple_size, tuples; int par_addr_cells; int bank, error, i, j; sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_mrid = 0; sc->sc_mres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_mrid, RF_ACTIVE); if (sc->sc_mres == NULL) return (ENXIO); sc->sc_bst = rman_get_bustag(sc->sc_mres); sc->sc_bsh = rman_get_bushandle(sc->sc_mres); for (bank = 0; bank < LBC_DEV_MAX; bank++) { bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_BR(bank), 0); bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_OR(bank), 0); } /* * Initialize configuration register: * - enable Local Bus * - set data buffer control signal function * - disable parity byte select * - set ECC parity type * - set bus monitor timing and timer prescale */ bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_LBCR, 0); /* * Initialize clock ratio register: * - disable PLL bypass mode * - configure LCLK delay cycles for the assertion of LALE * - set system clock divider */ bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_LCRR, 0x00030008); bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_LTEDR, 0); bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_LTESR, ~0); bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_LTEIR, 0x64080001); sc->sc_irid = 0; sc->sc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irid, RF_ACTIVE | RF_SHAREABLE); if (sc->sc_ires != NULL) { error = bus_setup_intr(dev, sc->sc_ires, INTR_TYPE_MISC | INTR_MPSAFE, NULL, lbc_intr, sc, &sc->sc_icookie); if (error) { device_printf(dev, "could not activate interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irid, sc->sc_ires); sc->sc_ires = NULL; } } sc->sc_ltesr = ~0; rangesptr = NULL; rm = &sc->sc_rman; rm->rm_type = RMAN_ARRAY; rm->rm_descr = "Local Bus Space"; error = rman_init(rm); if (error) goto fail; error = rman_manage_region(rm, rm->rm_start, rm->rm_end); if (error) { rman_fini(rm); goto fail; } /* * Process 'ranges' property. */ node = ofw_bus_get_node(dev); if ((fdt_addrsize_cells(node, &sc->sc_addr_cells, &sc->sc_size_cells)) != 0) { error = ENXIO; goto fail; } par_addr_cells = fdt_parent_addr_cells(node); if (par_addr_cells > 2) { device_printf(dev, "unsupported parent #addr-cells\n"); error = ERANGE; goto fail; } tuple_size = sizeof(pcell_t) * (sc->sc_addr_cells + par_addr_cells + sc->sc_size_cells); tuples = OF_getencprop_alloc_multi(node, "ranges", tuple_size, (void **)&ranges); if (tuples < 0) { device_printf(dev, "could not retrieve 'ranges' property\n"); error = ENXIO; goto fail; } rangesptr = ranges; debugf("par addr_cells = %d, addr_cells = %d, size_cells = %d, " "tuple_size = %d, tuples = %d\n", par_addr_cells, sc->sc_addr_cells, sc->sc_size_cells, tuple_size, tuples); start = 0; size = 0; for (i = 0; i < tuples; i++) { /* The first cell is the bank (chip select) number. */ bank = fdt_data_get(ranges, 1); if (bank < 0 || bank > LBC_DEV_MAX) { device_printf(dev, "bank out of range: %d\n", bank); error = ERANGE; goto fail; } ranges += 1; /* * Remaining cells of the child address define offset into * this CS. */ offset = 0; for (j = 0; j < sc->sc_addr_cells - 1; j++) { offset <<= sizeof(pcell_t) * 8; offset |= *ranges; ranges++; } /* Parent bus start address of this bank. */ start = 0; for (j = 0; j < par_addr_cells; j++) { start <<= sizeof(pcell_t) * 8; start |= *ranges; ranges++; } size = fdt_data_get((void *)ranges, sc->sc_size_cells); ranges += sc->sc_size_cells; debugf("bank = %d, start = %jx, size = %jx\n", bank, (uintmax_t)start, size); sc->sc_banks[bank].addr = start + offset; sc->sc_banks[bank].size = size; /* * Attributes for the bank. * * XXX Note there are no DT bindings defined for them at the * moment, so we need to provide some defaults. */ sc->sc_banks[bank].width = 16; sc->sc_banks[bank].msel = LBCRES_MSEL_GPCM; sc->sc_banks[bank].decc = LBCRES_DECC_DISABLED; sc->sc_banks[bank].atom = LBCRES_ATOM_DISABLED; sc->sc_banks[bank].wp = 0; } /* * Initialize mem-mappings for the LBC banks (i.e. chip selects). */ error = lbc_banks_map(sc); if (error) goto fail; /* * Walk the localbus and add direct subordinates as our children. */ for (child = OF_child(node); child != 0; child = OF_peer(child)) { di = malloc(sizeof(*di), M_LBC, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(&di->di_ofw, child) != 0) { free(di, M_LBC); device_printf(dev, "could not set up devinfo\n"); continue; } resource_list_init(&di->di_res); if (fdt_lbc_reg_decode(child, sc, di)) { device_printf(dev, "could not process 'reg' " "property\n"); ofw_bus_gen_destroy_devinfo(&di->di_ofw); free(di, M_LBC); continue; } fdt_lbc_fixup(child, sc, di); /* Add newbus device for this FDT node */ cdev = device_add_child(dev, NULL, -1); if (cdev == NULL) { device_printf(dev, "could not add child: %s\n", di->di_ofw.obd_name); resource_list_free(&di->di_res); ofw_bus_gen_destroy_devinfo(&di->di_ofw); free(di, M_LBC); continue; } debugf("added child name='%s', node=%x\n", di->di_ofw.obd_name, child); device_set_ivars(cdev, di); } /* * Enable the LBC. */ lbc_banks_enable(sc); OF_prop_free(rangesptr); return (bus_generic_attach(dev)); fail: OF_prop_free(rangesptr); bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mrid, sc->sc_mres); return (error); } static int lbc_shutdown(device_t dev) { /* TODO */ return(0); } static struct rman * lbc_get_rman(device_t bus, int type, u_int flags) { struct lbc_softc *sc; sc = device_get_softc(bus); switch (type) { case SYS_RES_MEMORY: return (&sc->sc_rman); default: return (NULL); } } static struct resource * lbc_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct lbc_devinfo *di; struct resource_list_entry *rle; /* We only support default allocations. */ if (!RMAN_IS_DEFAULT_RANGE(start, end)) return (NULL); if (type == SYS_RES_IRQ) return (bus_alloc_resource(bus, type, rid, start, end, count, flags)); /* * Request for the default allocation with a given rid: use resource * list stored in the local device info. */ if ((di = device_get_ivars(child)) == NULL) return (NULL); if (type == SYS_RES_IOPORT) type = SYS_RES_MEMORY; /* * XXX: We are supposed to return a value to the user, so this * doesn't seem right. */ rid = &di->di_bank; rle = resource_list_find(&di->di_res, type, *rid); if (rle == NULL) { device_printf(bus, "no default resources for " "rid = %d, type = %d\n", *rid, type); return (NULL); } start = rle->start; count = rle->count; end = start + count - 1; return (bus_generic_rman_alloc_resource(bus, child, type, rid, start, end, count, flags)); } static int lbc_print_child(device_t dev, device_t child) { struct lbc_devinfo *di; struct resource_list *rl; int rv; di = device_get_ivars(child); rl = &di->di_res; rv = 0; rv += bus_print_child_header(dev, child); rv += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); rv += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); rv += bus_print_child_footer(dev, child); return (rv); } static int lbc_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { switch (rman_get_type(r)) { case SYS_RES_MEMORY: return (bus_generic_rman_adjust_resource(dev, child, r, start, end)); case SYS_RES_IRQ: return (bus_generic_adjust_resource(dev, child, r, start, end)); default: return (EINVAL); } } static int lbc_release_resource(device_t dev, device_t child, int type, int rid, struct resource *res) { switch (type) { case SYS_RES_IOPORT: type = SYS_RES_MEMORY; /* FALLTHROUGH */ case SYS_RES_MEMORY: return (bus_generic_rman_release_resource(dev, child, type, rid, res)); case SYS_RES_IRQ: return (bus_generic_release_resource(dev, child, type, rid, res)); default: return (EINVAL); } } static int -lbc_activate_resource(device_t bus, device_t child, int type, int rid, - struct resource *r) +lbc_activate_resource(device_t bus, device_t child, struct resource *r) { - switch (type) { - case SYS_RES_IOPORT: - type = SYS_RES_MEMORY; - /* FALLTHROUGH */ + switch (rman_get_type(r)) { case SYS_RES_MEMORY: - return (bus_generic_rman_activate_resource(bus, child, type, - rid, r)); + return (bus_generic_rman_activate_resource(bus, child, r)); case SYS_RES_IRQ: - return (bus_generic_activate_resource(bus, child, type, rid, r)); + return (bus_generic_activate_resource(bus, child, r)); default: return (EINVAL); } } static int -lbc_deactivate_resource(device_t bus, device_t child, int type, int rid, - struct resource *r) +lbc_deactivate_resource(device_t bus, device_t child, struct resource *r) { - switch (type) { - case SYS_RES_IOPORT: - type = SYS_RES_MEMORY; - /* FALLTHROUGH */ + switch (rman_get_type(r)) { case SYS_RES_MEMORY: - return (bus_generic_rman_deactivate_resource(bus, child, type, - rid, r)); + return (bus_generic_rman_deactivate_resource(bus, child, r)); case SYS_RES_IRQ: - return (bus_generic_deactivate_resource(bus, child, type, rid, r)); + return (bus_generic_deactivate_resource(bus, child, r)); default: return (EINVAL); } } static int lbc_map_resource(device_t bus, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); /* Mappings are only supported on I/O and memory resources. */ switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); map->r_bustag = &bs_be_tag; map->r_bushandle = start; map->r_size = length; map->r_vaddr = NULL; return (0); } static int lbc_unmap_resource(device_t bus, device_t child, struct resource *r, struct resource_map *map) { /* Mappings are only supported on I/O and memory resources. */ switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (EINVAL); } return (0); } static const struct ofw_bus_devinfo * lbc_get_devinfo(device_t bus, device_t child) { struct lbc_devinfo *di; di = device_get_ivars(child); return (&di->di_ofw); } void lbc_write_reg(device_t child, u_int off, uint32_t val) { device_t dev; struct lbc_softc *sc; dev = device_get_parent(child); if (off >= 0x1000) { device_printf(dev, "%s(%s): invalid offset %#x\n", __func__, device_get_nameunit(child), off); return; } sc = device_get_softc(dev); if (off == LBC85XX_LTESR && sc->sc_ltesr != ~0u) { sc->sc_ltesr ^= (val & sc->sc_ltesr); return; } if (off == LBC85XX_LTEATR && (val & 1) == 0) sc->sc_ltesr = ~0u; bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, val); } uint32_t lbc_read_reg(device_t child, u_int off) { device_t dev; struct lbc_softc *sc; uint32_t val; dev = device_get_parent(child); if (off >= 0x1000) { device_printf(dev, "%s(%s): invalid offset %#x\n", __func__, device_get_nameunit(child), off); return (~0U); } sc = device_get_softc(dev); if (off == LBC85XX_LTESR && sc->sc_ltesr != ~0U) val = sc->sc_ltesr; else val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, off); return (val); } diff --git a/sys/powerpc/powermac/macgpio.c b/sys/powerpc/powermac/macgpio.c index d76b5abe79e1..37bd63392449 100644 --- a/sys/powerpc/powermac/macgpio.c +++ b/sys/powerpc/powermac/macgpio.c @@ -1,396 +1,394 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2008 by Nathan Whitehorn. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Driver for MacIO GPIO controller */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Macgpio softc */ struct macgpio_softc { phandle_t sc_node; struct resource *sc_gpios; int sc_gpios_rid; uint32_t sc_saved_gpio_levels[2]; uint32_t sc_saved_gpios[GPIO_COUNT]; uint32_t sc_saved_extint_gpios[GPIO_EXTINT_COUNT]; }; static MALLOC_DEFINE(M_MACGPIO, "macgpio", "macgpio device information"); static int macgpio_probe(device_t); static int macgpio_attach(device_t); static int macgpio_print_child(device_t dev, device_t child); static void macgpio_probe_nomatch(device_t, device_t); static struct resource *macgpio_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); -static int macgpio_activate_resource(device_t, device_t, int, int, +static int macgpio_activate_resource(device_t, device_t, struct resource *); -static int macgpio_deactivate_resource(device_t, device_t, int, int, +static int macgpio_deactivate_resource(device_t, device_t, struct resource *); static ofw_bus_get_devinfo_t macgpio_get_devinfo; static int macgpio_suspend(device_t dev); static int macgpio_resume(device_t dev); /* * Bus interface definition */ static device_method_t macgpio_methods[] = { /* Device interface */ DEVMETHOD(device_probe, macgpio_probe), DEVMETHOD(device_attach, macgpio_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, macgpio_suspend), DEVMETHOD(device_resume, macgpio_resume), /* Bus interface */ DEVMETHOD(bus_print_child, macgpio_print_child), DEVMETHOD(bus_probe_nomatch, macgpio_probe_nomatch), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, macgpio_alloc_resource), DEVMETHOD(bus_activate_resource, macgpio_activate_resource), DEVMETHOD(bus_deactivate_resource, macgpio_deactivate_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, macgpio_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), { 0, 0 } }; static driver_t macgpio_pci_driver = { "macgpio", macgpio_methods, sizeof(struct macgpio_softc) }; EARLY_DRIVER_MODULE(macgpio, macio, macgpio_pci_driver, 0, 0, BUS_PASS_BUS); struct macgpio_devinfo { struct ofw_bus_devinfo mdi_obdinfo; struct resource_list mdi_resources; int gpio_num; }; static int macgpio_probe(device_t dev) { const char *name; name = ofw_bus_get_name(dev); if (name && strcmp(name, "gpio") == 0) { device_set_desc(dev, "MacIO GPIO Controller"); return (0); } return (ENXIO); } /* * Scan Open Firmware child nodes, and attach these as children * of the macgpio bus */ static int macgpio_attach(device_t dev) { struct macgpio_softc *sc; struct macgpio_devinfo *dinfo; phandle_t root, child, iparent; device_t cdev; uint32_t irq[2]; sc = device_get_softc(dev); root = sc->sc_node = ofw_bus_get_node(dev); sc->sc_gpios = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_gpios_rid, RF_ACTIVE); /* * Iterate through the sub-devices */ for (child = OF_child(root); child != 0; child = OF_peer(child)) { dinfo = malloc(sizeof(*dinfo), M_MACGPIO, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(&dinfo->mdi_obdinfo, child) != 0) { free(dinfo, M_MACGPIO); continue; } if (OF_getencprop(child, "reg", &dinfo->gpio_num, sizeof(dinfo->gpio_num)) != sizeof(dinfo->gpio_num)) { /* * Some early GPIO controllers don't provide GPIO * numbers for GPIOs designed only to provide * interrupt resources. We should still allow these * to attach, but with caution. */ dinfo->gpio_num = -1; } resource_list_init(&dinfo->mdi_resources); if (OF_getencprop(child, "interrupts", irq, sizeof(irq)) == sizeof(irq)) { OF_searchencprop(child, "interrupt-parent", &iparent, sizeof(iparent)); resource_list_add(&dinfo->mdi_resources, SYS_RES_IRQ, 0, MAP_IRQ(iparent, irq[0]), MAP_IRQ(iparent, irq[0]), 1); } /* Fix messed-up offsets */ if (dinfo->gpio_num > 0x50) dinfo->gpio_num -= 0x50; cdev = device_add_child(dev, NULL, -1); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", dinfo->mdi_obdinfo.obd_name); ofw_bus_gen_destroy_devinfo(&dinfo->mdi_obdinfo); free(dinfo, M_MACGPIO); continue; } device_set_ivars(cdev, dinfo); } return (bus_generic_attach(dev)); } static int macgpio_print_child(device_t dev, device_t child) { struct macgpio_devinfo *dinfo; int retval = 0; dinfo = device_get_ivars(child); retval += bus_print_child_header(dev, child); if (dinfo->gpio_num >= GPIO_BASE) printf(" gpio %d", dinfo->gpio_num - GPIO_BASE); else if (dinfo->gpio_num >= GPIO_EXTINT_BASE) printf(" extint-gpio %d", dinfo->gpio_num - GPIO_EXTINT_BASE); else if (dinfo->gpio_num >= 0) printf(" addr 0x%02x", dinfo->gpio_num); /* should not happen */ resource_list_print_type(&dinfo->mdi_resources, "irq", SYS_RES_IRQ, "%jd"); retval += bus_print_child_footer(dev, child); return (retval); } static void macgpio_probe_nomatch(device_t dev, device_t child) { struct macgpio_devinfo *dinfo; const char *type; if (bootverbose) { dinfo = device_get_ivars(child); if ((type = ofw_bus_get_type(child)) == NULL) type = "(unknown)"; device_printf(dev, "<%s, %s>", type, ofw_bus_get_name(child)); if (dinfo->gpio_num >= 0) printf(" gpio %d",dinfo->gpio_num); resource_list_print_type(&dinfo->mdi_resources, "irq", SYS_RES_IRQ, "%jd"); printf(" (no driver attached)\n"); } } static struct resource * macgpio_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct macgpio_devinfo *dinfo; dinfo = device_get_ivars(child); if (type != SYS_RES_IRQ) return (NULL); return (resource_list_alloc(&dinfo->mdi_resources, bus, child, type, rid, start, end, count, flags)); } static int -macgpio_activate_resource(device_t bus, device_t child, int type, int rid, - struct resource *res) +macgpio_activate_resource(device_t bus, device_t child, struct resource *res) { struct macgpio_softc *sc; struct macgpio_devinfo *dinfo; u_char val; sc = device_get_softc(bus); dinfo = device_get_ivars(child); - if (type != SYS_RES_IRQ) + if (rman_get_type(res) != SYS_RES_IRQ) return ENXIO; if (dinfo->gpio_num >= 0) { val = bus_read_1(sc->sc_gpios,dinfo->gpio_num); val |= 0x80; bus_write_1(sc->sc_gpios,dinfo->gpio_num,val); } - return (bus_activate_resource(bus, type, rid, res)); + return (bus_generic_activate_resource(bus, child, res)); } static int -macgpio_deactivate_resource(device_t bus, device_t child, int type, int rid, - struct resource *res) +macgpio_deactivate_resource(device_t bus, device_t child, struct resource *res) { struct macgpio_softc *sc; struct macgpio_devinfo *dinfo; u_char val; sc = device_get_softc(bus); dinfo = device_get_ivars(child); - if (type != SYS_RES_IRQ) + if (rman_get_type(res) != SYS_RES_IRQ) return ENXIO; if (dinfo->gpio_num >= 0) { val = bus_read_1(sc->sc_gpios,dinfo->gpio_num); val &= ~0x80; bus_write_1(sc->sc_gpios,dinfo->gpio_num,val); } - return (bus_deactivate_resource(bus, type, rid, res)); + return (bus_generic_deactivate_resource(bus, child, res)); } uint8_t macgpio_read(device_t dev) { struct macgpio_softc *sc; struct macgpio_devinfo *dinfo; sc = device_get_softc(device_get_parent(dev)); dinfo = device_get_ivars(dev); if (dinfo->gpio_num < 0) return (0); return (bus_read_1(sc->sc_gpios,dinfo->gpio_num)); } void macgpio_write(device_t dev, uint8_t val) { struct macgpio_softc *sc; struct macgpio_devinfo *dinfo; sc = device_get_softc(device_get_parent(dev)); dinfo = device_get_ivars(dev); if (dinfo->gpio_num < 0) return; bus_write_1(sc->sc_gpios,dinfo->gpio_num,val); } static const struct ofw_bus_devinfo * macgpio_get_devinfo(device_t dev, device_t child) { struct macgpio_devinfo *dinfo; dinfo = device_get_ivars(child); return (&dinfo->mdi_obdinfo); } static int macgpio_suspend(device_t dev) { struct macgpio_softc *sc; int i; sc = device_get_softc(dev); sc->sc_saved_gpio_levels[0] = bus_read_4(sc->sc_gpios, GPIO_LEVELS_0); sc->sc_saved_gpio_levels[1] = bus_read_4(sc->sc_gpios, GPIO_LEVELS_1); for (i = 0; i < GPIO_COUNT; i++) sc->sc_saved_gpios[i] = bus_read_1(sc->sc_gpios, GPIO_BASE + i); for (i = 0; i < GPIO_EXTINT_COUNT; i++) sc->sc_saved_extint_gpios[i] = bus_read_1(sc->sc_gpios, GPIO_EXTINT_BASE + i); return (0); } static int macgpio_resume(device_t dev) { struct macgpio_softc *sc; int i; sc = device_get_softc(dev); bus_write_4(sc->sc_gpios, GPIO_LEVELS_0, sc->sc_saved_gpio_levels[0]); bus_write_4(sc->sc_gpios, GPIO_LEVELS_1, sc->sc_saved_gpio_levels[1]); for (i = 0; i < GPIO_COUNT; i++) bus_write_1(sc->sc_gpios, GPIO_BASE + i, sc->sc_saved_gpios[i]); for (i = 0; i < GPIO_EXTINT_COUNT; i++) bus_write_1(sc->sc_gpios, GPIO_EXTINT_BASE + i, sc->sc_saved_extint_gpios[i]); return (0); } diff --git a/sys/powerpc/powermac/macio.c b/sys/powerpc/powermac/macio.c index cb4471bbcca1..eb6980005904 100644 --- a/sys/powerpc/powermac/macio.c +++ b/sys/powerpc/powermac/macio.c @@ -1,799 +1,791 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright 2002 by Peter Grehan. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Driver for KeyLargo/Pangea, the MacPPC south bridge ASIC. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Macio softc */ struct macio_softc { phandle_t sc_node; vm_offset_t sc_base; vm_offset_t sc_size; struct rman sc_mem_rman; /* FCR registers */ int sc_memrid; struct resource *sc_memr; /* GPIO offsets */ int sc_timebase; }; static MALLOC_DEFINE(M_MACIO, "macio", "macio device information"); static int macio_probe(device_t); static int macio_attach(device_t); static int macio_print_child(device_t dev, device_t child); static void macio_probe_nomatch(device_t, device_t); static struct rman *macio_get_rman(device_t, int, u_int); static struct resource *macio_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int macio_adjust_resource(device_t, device_t, struct resource *, rman_res_t, rman_res_t); -static int macio_activate_resource(device_t, device_t, int, int, - struct resource *); -static int macio_deactivate_resource(device_t, device_t, int, int, - struct resource *); +static int macio_activate_resource(device_t, device_t, struct resource *); +static int macio_deactivate_resource(device_t, device_t, struct resource *); static int macio_release_resource(device_t, device_t, int, int, struct resource *); static int macio_map_resource(device_t, device_t, struct resource *, struct resource_map_request *, struct resource_map *); static int macio_unmap_resource(device_t, device_t, struct resource *, struct resource_map *); static struct resource_list *macio_get_resource_list (device_t, device_t); static ofw_bus_get_devinfo_t macio_get_devinfo; #if !defined(__powerpc64__) && defined(SMP) static void macio_freeze_timebase(device_t, bool); #endif /* * Bus interface definition */ static device_method_t macio_methods[] = { /* Device interface */ DEVMETHOD(device_probe, macio_probe), DEVMETHOD(device_attach, macio_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_print_child, macio_print_child), DEVMETHOD(bus_probe_nomatch, macio_probe_nomatch), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_rman, macio_get_rman), DEVMETHOD(bus_alloc_resource, macio_alloc_resource), DEVMETHOD(bus_adjust_resource, macio_adjust_resource), DEVMETHOD(bus_release_resource, macio_release_resource), DEVMETHOD(bus_activate_resource, macio_activate_resource), DEVMETHOD(bus_deactivate_resource, macio_deactivate_resource), DEVMETHOD(bus_map_resource, macio_map_resource), DEVMETHOD(bus_unmap_resource, macio_unmap_resource), DEVMETHOD(bus_get_resource_list, macio_get_resource_list), DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, macio_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), { 0, 0 } }; static driver_t macio_pci_driver = { "macio", macio_methods, sizeof(struct macio_softc) }; EARLY_DRIVER_MODULE(macio, pci, macio_pci_driver, 0, 0, BUS_PASS_BUS); /* * PCI ID search table */ static struct macio_pci_dev { u_int32_t mpd_devid; char *mpd_desc; } macio_pci_devlist[] = { { 0x0017106b, "Paddington I/O Controller" }, { 0x0022106b, "KeyLargo I/O Controller" }, { 0x0025106b, "Pangea I/O Controller" }, { 0x003e106b, "Intrepid I/O Controller" }, { 0x0041106b, "K2 KeyLargo I/O Controller" }, { 0x004f106b, "Shasta I/O Controller" }, { 0, NULL } }; /* * Devices to exclude from the probe * XXX some of these may be required in the future... */ #define MACIO_QUIRK_IGNORE 0x00000001 #define MACIO_QUIRK_CHILD_HAS_INTR 0x00000002 #define MACIO_QUIRK_USE_CHILD_REG 0x00000004 struct macio_quirk_entry { const char *mq_name; int mq_quirks; }; static struct macio_quirk_entry macio_quirks[] = { { "escc-legacy", MACIO_QUIRK_IGNORE }, { "timer", MACIO_QUIRK_IGNORE }, { "escc", MACIO_QUIRK_CHILD_HAS_INTR }, { "i2s", MACIO_QUIRK_CHILD_HAS_INTR | MACIO_QUIRK_USE_CHILD_REG }, { NULL, 0 } }; static int macio_get_quirks(const char *name) { struct macio_quirk_entry *mqe; for (mqe = macio_quirks; mqe->mq_name != NULL; mqe++) if (strcmp(name, mqe->mq_name) == 0) return (mqe->mq_quirks); return (0); } /* * Add an interrupt to the dev's resource list if present */ static void macio_add_intr(phandle_t devnode, struct macio_devinfo *dinfo) { phandle_t iparent; int *intr; int i, nintr; int icells; if (dinfo->mdi_ninterrupts >= 6) { printf("macio: device has more than 6 interrupts\n"); return; } nintr = OF_getprop_alloc_multi(devnode, "interrupts", sizeof(*intr), (void **)&intr); if (nintr == -1) { nintr = OF_getprop_alloc_multi(devnode, "AAPL,interrupts", sizeof(*intr), (void **)&intr); if (nintr == -1) return; } if (intr[0] == -1) return; if (OF_getprop(devnode, "interrupt-parent", &iparent, sizeof(iparent)) <= 0) panic("Interrupt but no interrupt parent!\n"); if (OF_getprop(OF_node_from_xref(iparent), "#interrupt-cells", &icells, sizeof(icells)) <= 0) icells = 1; for (i = 0; i < nintr; i+=icells) { u_int irq = MAP_IRQ(iparent, intr[i]); resource_list_add(&dinfo->mdi_resources, SYS_RES_IRQ, dinfo->mdi_ninterrupts, irq, irq, 1); dinfo->mdi_interrupts[dinfo->mdi_ninterrupts] = irq; dinfo->mdi_ninterrupts++; } } static void macio_add_reg(phandle_t devnode, struct macio_devinfo *dinfo) { struct macio_reg *reg, *regp; phandle_t child; char buf[8]; int i, layout_id = 0, nreg, res; nreg = OF_getprop_alloc_multi(devnode, "reg", sizeof(*reg), (void **)®); if (nreg == -1) return; /* * Some G5's have broken properties in the i2s-a area. If so we try * to fix it. Right now we know of two different cases, one for * sound layout-id 36 and the other one for sound layout-id 76. * What is missing is the base address for the memory addresses. * We take them from the parent node (i2s) and use the size * information from the child. */ if (reg[0].mr_base == 0) { child = OF_child(devnode); while (child != 0) { res = OF_getprop(child, "name", buf, sizeof(buf)); if (res > 0 && strcmp(buf, "sound") == 0) break; child = OF_peer(child); } res = OF_getprop(child, "layout-id", &layout_id, sizeof(layout_id)); if (res > 0 && (layout_id == 36 || layout_id == 76)) { res = OF_getprop_alloc_multi(OF_parent(devnode), "reg", sizeof(*regp), (void **)®p); reg[0] = regp[0]; reg[1].mr_base = regp[1].mr_base; reg[2].mr_base = regp[1].mr_base + reg[1].mr_size; } } for (i = 0; i < nreg; i++) { resource_list_add(&dinfo->mdi_resources, SYS_RES_MEMORY, i, reg[i].mr_base, reg[i].mr_base + reg[i].mr_size, reg[i].mr_size); } } /* * PCI probe */ static int macio_probe(device_t dev) { int i; u_int32_t devid; devid = pci_get_devid(dev); for (i = 0; macio_pci_devlist[i].mpd_desc != NULL; i++) { if (devid == macio_pci_devlist[i].mpd_devid) { device_set_desc(dev, macio_pci_devlist[i].mpd_desc); return (0); } } return (ENXIO); } /* * PCI attach: scan Open Firmware child nodes, and attach these as children * of the macio bus */ static int macio_attach(device_t dev) { struct macio_softc *sc; struct macio_devinfo *dinfo; phandle_t root; phandle_t child; phandle_t subchild; device_t cdev; u_int reg[3]; char compat[32]; int error, quirks; sc = device_get_softc(dev); root = sc->sc_node = ofw_bus_get_node(dev); /* * Locate the device node and it's base address */ if (OF_getprop(root, "assigned-addresses", reg, sizeof(reg)) < (ssize_t)sizeof(reg)) { return (ENXIO); } /* Used later to see if we have to enable the I2S part. */ OF_getprop(root, "compatible", compat, sizeof(compat)); sc->sc_base = reg[2]; sc->sc_size = MACIO_REG_SIZE; sc->sc_memrid = PCIR_BAR(0); sc->sc_memr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_memrid, RF_ACTIVE); sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "MacIO Device Memory"; error = rman_init(&sc->sc_mem_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); return (error); } error = rman_manage_region(&sc->sc_mem_rman, 0, sc->sc_size); if (error) { device_printf(dev, "rman_manage_region() failed. error = %d\n", error); return (error); } /* * Iterate through the sub-devices */ for (child = OF_child(root); child != 0; child = OF_peer(child)) { dinfo = malloc(sizeof(*dinfo), M_MACIO, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(&dinfo->mdi_obdinfo, child) != 0) { free(dinfo, M_MACIO); continue; } quirks = macio_get_quirks(dinfo->mdi_obdinfo.obd_name); if ((quirks & MACIO_QUIRK_IGNORE) != 0) { ofw_bus_gen_destroy_devinfo(&dinfo->mdi_obdinfo); free(dinfo, M_MACIO); continue; } resource_list_init(&dinfo->mdi_resources); dinfo->mdi_ninterrupts = 0; macio_add_intr(child, dinfo); if ((quirks & MACIO_QUIRK_USE_CHILD_REG) != 0) macio_add_reg(OF_child(child), dinfo); else macio_add_reg(child, dinfo); if ((quirks & MACIO_QUIRK_CHILD_HAS_INTR) != 0) for (subchild = OF_child(child); subchild != 0; subchild = OF_peer(subchild)) macio_add_intr(subchild, dinfo); cdev = device_add_child(dev, NULL, -1); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", dinfo->mdi_obdinfo.obd_name); resource_list_free(&dinfo->mdi_resources); ofw_bus_gen_destroy_devinfo(&dinfo->mdi_obdinfo); free(dinfo, M_MACIO); continue; } device_set_ivars(cdev, dinfo); /* Set FCRs to enable some devices */ if (sc->sc_memr == NULL) continue; if (strcmp(ofw_bus_get_name(cdev), "bmac") == 0 || (ofw_bus_get_compat(cdev) != NULL && strcmp(ofw_bus_get_compat(cdev), "bmac+") == 0)) { uint32_t fcr; fcr = bus_read_4(sc->sc_memr, HEATHROW_FCR); fcr |= FCR_ENET_ENABLE & ~FCR_ENET_RESET; bus_write_4(sc->sc_memr, HEATHROW_FCR, fcr); DELAY(50000); fcr |= FCR_ENET_RESET; bus_write_4(sc->sc_memr, HEATHROW_FCR, fcr); DELAY(50000); fcr &= ~FCR_ENET_RESET; bus_write_4(sc->sc_memr, HEATHROW_FCR, fcr); DELAY(50000); bus_write_4(sc->sc_memr, HEATHROW_FCR, fcr); } /* * Make sure the I2S0 and the I2S0_CLK are enabled. * On certain G5's they are not. */ if ((strcmp(ofw_bus_get_name(cdev), "i2s") == 0) && (strcmp(compat, "K2-Keylargo") == 0)) { uint32_t fcr1; fcr1 = bus_read_4(sc->sc_memr, KEYLARGO_FCR1); fcr1 |= FCR1_I2S0_CLK_ENABLE | FCR1_I2S0_ENABLE; bus_write_4(sc->sc_memr, KEYLARGO_FCR1, fcr1); } } #if !defined(__powerpc64__) && defined(SMP) /* * Detect an SMP G4 machine. * * On SMP G4, timebase freeze is via a GPIO on macio. * * When we are on an SMP G4, we need to install a handler to * perform timebase freeze/unfreeze on behalf of the platform. */ if ((child = OF_finddevice("/cpus/PowerPC,G4@0")) != -1 && OF_peer(child) != -1) { if (OF_getprop(child, "timebase-enable", &sc->sc_timebase, sizeof(sc->sc_timebase)) <= 0) sc->sc_timebase = KEYLARGO_GPIO_BASE + 0x09; powermac_register_timebase(dev, macio_freeze_timebase); device_printf(dev, "GPIO timebase control at 0x%x\n", sc->sc_timebase); } #endif return (bus_generic_attach(dev)); } static int macio_print_child(device_t dev, device_t child) { struct macio_devinfo *dinfo; struct resource_list *rl; int retval = 0; dinfo = device_get_ivars(child); rl = &dinfo->mdi_resources; retval += bus_print_child_header(dev, child); retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); retval += bus_print_child_footer(dev, child); return (retval); } static void macio_probe_nomatch(device_t dev, device_t child) { struct macio_devinfo *dinfo; struct resource_list *rl; const char *type; if (bootverbose) { dinfo = device_get_ivars(child); rl = &dinfo->mdi_resources; if ((type = ofw_bus_get_type(child)) == NULL) type = "(unknown)"; device_printf(dev, "<%s, %s>", type, ofw_bus_get_name(child)); resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); printf(" (no driver attached)\n"); } } static struct rman * macio_get_rman(device_t bus, int type, u_int flags) { struct macio_softc *sc; sc = device_get_softc(bus); switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (&sc->sc_mem_rman); default: return (NULL); } } static struct resource * macio_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { rman_res_t adjstart, adjend, adjcount; struct macio_devinfo *dinfo; struct resource_list_entry *rle; dinfo = device_get_ivars(child); switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: rle = resource_list_find(&dinfo->mdi_resources, SYS_RES_MEMORY, *rid); if (rle == NULL) { device_printf(bus, "no rle for %s memory %d\n", device_get_nameunit(child), *rid); return (NULL); } if (start < rle->start) adjstart = rle->start; else if (start > rle->end) adjstart = rle->end; else adjstart = start; if (end < rle->start) adjend = rle->start; else if (end > rle->end) adjend = rle->end; else adjend = end; adjcount = adjend - adjstart; return (bus_generic_rman_alloc_resource(bus, child, type, rid, adjstart, adjend, adjcount, flags)); case SYS_RES_IRQ: /* Check for passthrough from subattachments like macgpio */ if (device_get_parent(child) != bus) return BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags); rle = resource_list_find(&dinfo->mdi_resources, SYS_RES_IRQ, *rid); if (rle == NULL) { if (dinfo->mdi_ninterrupts >= 6) { device_printf(bus, "%s has more than 6 interrupts\n", device_get_nameunit(child)); return (NULL); } resource_list_add(&dinfo->mdi_resources, SYS_RES_IRQ, dinfo->mdi_ninterrupts, start, start, 1); dinfo->mdi_interrupts[dinfo->mdi_ninterrupts] = start; dinfo->mdi_ninterrupts++; } return (resource_list_alloc(&dinfo->mdi_resources, bus, child, type, rid, start, end, count, flags)); default: device_printf(bus, "unknown resource request from %s\n", device_get_nameunit(child)); return (NULL); } } static int macio_adjust_resource(device_t bus, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_adjust_resource(bus, child, r, start, end)); case SYS_RES_IRQ: return (bus_generic_adjust_resource(bus, child, r, start, end)); default: return (EINVAL); } } static int macio_release_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_release_resource(bus, child, type, rid, res)); case SYS_RES_IRQ: return (bus_generic_rl_release_resource(bus, child, type, rid, res)); default: return (EINVAL); } } static int -macio_activate_resource(device_t bus, device_t child, int type, int rid, - struct resource *res) +macio_activate_resource(device_t bus, device_t child, struct resource *res) { - switch (type) { + switch (rman_get_type(res)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: - return (bus_generic_rman_activate_resource(bus, child, type, - rid, res)); + return (bus_generic_rman_activate_resource(bus, child, res)); case SYS_RES_IRQ: - return (bus_generic_activate_resource(bus, child, type, rid, - res)); + return (bus_generic_activate_resource(bus, child, res)); default: return (EINVAL); } } static int -macio_deactivate_resource(device_t bus, device_t child, int type, int rid, - struct resource *res) +macio_deactivate_resource(device_t bus, device_t child, struct resource *res) { - switch (type) { + switch (rman_get_type(res)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: - return (bus_generic_rman_deactivate_resource(bus, child, type, - rid, res)); + return (bus_generic_rman_deactivate_resource(bus, child, res)); case SYS_RES_IRQ: - return (bus_generic_deactivate_resource(bus, child, type, rid, - res)); + return (bus_generic_deactivate_resource(bus, child, res)); default: return (EINVAL); } } static int macio_map_resource(device_t bus, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; struct macio_softc *sc; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); /* Mappings are only supported on I/O and memory resources. */ switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); if (bootverbose) printf("nexus mapdev: start %jx, len %jd\n", (uintmax_t)start, (uintmax_t)length); sc = device_get_softc(bus); map->r_vaddr = pmap_mapdev_attr((vm_paddr_t)start + sc->sc_base, length, args.memattr); if (map->r_vaddr == NULL) return (ENOMEM); map->r_bustag = &bs_le_tag; map->r_bushandle = (bus_space_handle_t)map->r_vaddr; return (0); } static int macio_unmap_resource(device_t bus, device_t child, struct resource *r, struct resource_map *map) { /* * If this is a memory resource, unmap it. */ switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: pmap_unmapdev(map->r_vaddr, map->r_size); break; default: return (EINVAL); } return (0); } static struct resource_list * macio_get_resource_list (device_t dev, device_t child) { struct macio_devinfo *dinfo; dinfo = device_get_ivars(child); return (&dinfo->mdi_resources); } static const struct ofw_bus_devinfo * macio_get_devinfo(device_t dev, device_t child) { struct macio_devinfo *dinfo; dinfo = device_get_ivars(child); return (&dinfo->mdi_obdinfo); } int macio_enable_wireless(device_t dev, bool enable) { struct macio_softc *sc = device_get_softc(dev); uint32_t x; if (enable) { x = bus_read_4(sc->sc_memr, KEYLARGO_FCR2); x |= 0x4; bus_write_4(sc->sc_memr, KEYLARGO_FCR2, x); /* Enable card slot. */ bus_write_1(sc->sc_memr, KEYLARGO_GPIO_BASE + 0x0f, 5); DELAY(1000); bus_write_1(sc->sc_memr, KEYLARGO_GPIO_BASE + 0x0f, 4); DELAY(1000); x = bus_read_4(sc->sc_memr, KEYLARGO_FCR2); x &= ~0x80000000; bus_write_4(sc->sc_memr, KEYLARGO_FCR2, x); /* out8(gpio + 0x10, 4); */ bus_write_1(sc->sc_memr, KEYLARGO_EXTINT_GPIO_REG_BASE + 0x0b, 0); bus_write_1(sc->sc_memr, KEYLARGO_EXTINT_GPIO_REG_BASE + 0x0a, 0x28); bus_write_1(sc->sc_memr, KEYLARGO_EXTINT_GPIO_REG_BASE + 0x0d, 0x28); bus_write_1(sc->sc_memr, KEYLARGO_GPIO_BASE + 0x0d, 0x28); bus_write_1(sc->sc_memr, KEYLARGO_GPIO_BASE + 0x0e, 0x28); bus_write_4(sc->sc_memr, 0x1c000, 0); /* Initialize the card. */ bus_write_4(sc->sc_memr, 0x1a3e0, 0x41); x = bus_read_4(sc->sc_memr, KEYLARGO_FCR2); x |= 0x80000000; bus_write_4(sc->sc_memr, KEYLARGO_FCR2, x); } else { x = bus_read_4(sc->sc_memr, KEYLARGO_FCR2); x &= ~0x4; bus_write_4(sc->sc_memr, KEYLARGO_FCR2, x); /* out8(gpio + 0x10, 0); */ } return (0); } #if !defined(__powerpc64__) && defined(SMP) static void macio_freeze_timebase(device_t dev, bool freeze) { struct macio_softc *sc = device_get_softc(dev); if (freeze) { bus_write_1(sc->sc_memr, sc->sc_timebase, 4); } else { bus_write_1(sc->sc_memr, sc->sc_timebase, 0); } bus_read_1(sc->sc_memr, sc->sc_timebase); } #endif diff --git a/sys/powerpc/powermac/uninorth.c b/sys/powerpc/powermac/uninorth.c index c7842311d730..96caf4f34530 100644 --- a/sys/powerpc/powermac/uninorth.c +++ b/sys/powerpc/powermac/uninorth.c @@ -1,727 +1,722 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (C) 2002 Benno Rice. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Driver for the Uninorth chip itself. */ static MALLOC_DEFINE(M_UNIN, "unin", "unin device information"); /* * Device interface. */ static int unin_chip_probe(device_t); static int unin_chip_attach(device_t); /* * Bus interface. */ static int unin_chip_print_child(device_t dev, device_t child); static void unin_chip_probe_nomatch(device_t, device_t); static struct rman *unin_chip_get_rman(device_t, int, u_int); static struct resource *unin_chip_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int unin_chip_adjust_resource(device_t, device_t, struct resource *, rman_res_t, rman_res_t); -static int unin_chip_activate_resource(device_t, device_t, int, int, +static int unin_chip_activate_resource(device_t, device_t, struct resource *); -static int unin_chip_deactivate_resource(device_t, device_t, int, int, +static int unin_chip_deactivate_resource(device_t, device_t, struct resource *); static int unin_chip_map_resource(device_t, device_t, struct resource *, struct resource_map_request *, struct resource_map *); static int unin_chip_unmap_resource(device_t, device_t, struct resource *, struct resource_map *); static int unin_chip_release_resource(device_t, device_t, int, int, struct resource *); static struct resource_list *unin_chip_get_resource_list (device_t, device_t); /* * OFW Bus interface */ static ofw_bus_get_devinfo_t unin_chip_get_devinfo; /* * Local routines */ static void unin_enable_gmac(device_t dev); static void unin_enable_mpic(device_t dev); /* * Driver methods. */ static device_method_t unin_chip_methods[] = { /* Device interface */ DEVMETHOD(device_probe, unin_chip_probe), DEVMETHOD(device_attach, unin_chip_attach), /* Bus interface */ DEVMETHOD(bus_print_child, unin_chip_print_child), DEVMETHOD(bus_probe_nomatch, unin_chip_probe_nomatch), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_rman, unin_chip_get_rman), DEVMETHOD(bus_alloc_resource, unin_chip_alloc_resource), DEVMETHOD(bus_adjust_resource, unin_chip_adjust_resource), DEVMETHOD(bus_release_resource, unin_chip_release_resource), DEVMETHOD(bus_activate_resource, unin_chip_activate_resource), DEVMETHOD(bus_deactivate_resource, unin_chip_deactivate_resource), DEVMETHOD(bus_map_resource, unin_chip_map_resource), DEVMETHOD(bus_unmap_resource, unin_chip_unmap_resource), DEVMETHOD(bus_get_resource_list, unin_chip_get_resource_list), DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, unin_chip_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), { 0, 0 } }; static driver_t unin_chip_driver = { "unin", unin_chip_methods, sizeof(struct unin_chip_softc) }; /* * Assume there is only one unin chip in a PowerMac, so that pmu.c functions can * suspend the chip after the whole rest of the device tree is suspended, not * earlier. */ static device_t unin_chip; EARLY_DRIVER_MODULE(unin, ofwbus, unin_chip_driver, 0, 0, BUS_PASS_BUS); /* * Add an interrupt to the dev's resource list if present */ static void unin_chip_add_intr(phandle_t devnode, struct unin_chip_devinfo *dinfo) { phandle_t iparent; int *intr; int i, nintr; int icells; if (dinfo->udi_ninterrupts >= 6) { printf("unin: device has more than 6 interrupts\n"); return; } nintr = OF_getprop_alloc_multi(devnode, "interrupts", sizeof(*intr), (void **)&intr); if (nintr == -1) { nintr = OF_getprop_alloc_multi(devnode, "AAPL,interrupts", sizeof(*intr), (void **)&intr); if (nintr == -1) return; } if (intr[0] == -1) return; if (OF_getprop(devnode, "interrupt-parent", &iparent, sizeof(iparent)) <= 0) panic("Interrupt but no interrupt parent!\n"); if (OF_searchprop(iparent, "#interrupt-cells", &icells, sizeof(icells)) <= 0) icells = 1; for (i = 0; i < nintr; i+=icells) { u_int irq = MAP_IRQ(iparent, intr[i]); resource_list_add(&dinfo->udi_resources, SYS_RES_IRQ, dinfo->udi_ninterrupts, irq, irq, 1); if (icells > 1) { powerpc_config_intr(irq, (intr[i+1] & 1) ? INTR_TRIGGER_LEVEL : INTR_TRIGGER_EDGE, INTR_POLARITY_LOW); } dinfo->udi_interrupts[dinfo->udi_ninterrupts] = irq; dinfo->udi_ninterrupts++; } } static void unin_chip_add_reg(phandle_t devnode, struct unin_chip_devinfo *dinfo) { struct unin_chip_reg *reg; int i, nreg; nreg = OF_getprop_alloc_multi(devnode, "reg", sizeof(*reg), (void **)®); if (nreg == -1) return; for (i = 0; i < nreg; i++) { resource_list_add(&dinfo->udi_resources, SYS_RES_MEMORY, i, reg[i].mr_base, reg[i].mr_base + reg[i].mr_size, reg[i].mr_size); } } static void unin_update_reg(device_t dev, uint32_t regoff, uint32_t set, uint32_t clr) { volatile u_int *reg; struct unin_chip_softc *sc; u_int32_t tmpl; sc = device_get_softc(dev); reg = (void *)(sc->sc_addr + regoff); tmpl = inl(reg); tmpl &= ~clr; tmpl |= set; outl(reg, tmpl); } static void unin_enable_gmac(device_t dev) { unin_update_reg(dev, UNIN_CLOCKCNTL, UNIN_CLOCKCNTL_GMAC, 0); } static void unin_enable_mpic(device_t dev) { unin_update_reg(dev, UNIN_TOGGLE_REG, UNIN_MPIC_RESET | UNIN_MPIC_OUTPUT_ENABLE, 0); } static int unin_chip_probe(device_t dev) { const char *name; name = ofw_bus_get_name(dev); if (name == NULL) return (ENXIO); if (strcmp(name, "uni-n") != 0 && strcmp(name, "u3") != 0 && strcmp(name, "u4") != 0) return (ENXIO); device_set_desc(dev, "Apple UniNorth System Controller"); return (0); } static int unin_chip_attach(device_t dev) { struct unin_chip_softc *sc; struct unin_chip_devinfo *dinfo; phandle_t root; phandle_t child; phandle_t iparent; device_t cdev; cell_t acells, scells; char compat[32]; char name[32]; u_int irq, reg[3]; int error, i = 0; sc = device_get_softc(dev); root = ofw_bus_get_node(dev); if (OF_getprop(root, "reg", reg, sizeof(reg)) < 8) return (ENXIO); acells = scells = 1; OF_getprop(OF_parent(root), "#address-cells", &acells, sizeof(acells)); OF_getprop(OF_parent(root), "#size-cells", &scells, sizeof(scells)); i = 0; sc->sc_physaddr = reg[i++]; if (acells == 2) { sc->sc_physaddr <<= 32; sc->sc_physaddr |= reg[i++]; } sc->sc_size = reg[i++]; if (scells == 2) { sc->sc_size <<= 32; sc->sc_size |= reg[i++]; } sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "UniNorth Device Memory"; error = rman_init(&sc->sc_mem_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); return (error); } error = rman_manage_region(&sc->sc_mem_rman, sc->sc_physaddr, sc->sc_physaddr + sc->sc_size - 1); if (error) { device_printf(dev, "rman_manage_region() failed. error = %d\n", error); return (error); } if (unin_chip == NULL) unin_chip = dev; /* * Iterate through the sub-devices */ for (child = OF_child(root); child != 0; child = OF_peer(child)) { dinfo = malloc(sizeof(*dinfo), M_UNIN, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(&dinfo->udi_obdinfo, child) != 0) { free(dinfo, M_UNIN); continue; } resource_list_init(&dinfo->udi_resources); dinfo->udi_ninterrupts = 0; unin_chip_add_intr(child, dinfo); /* * Some Apple machines do have a bug in OF, they miss * the interrupt entries on the U3 I2C node. That means they * do not have an entry with number of interrupts nor the * entry of the interrupt parent handle. * We define an interrupt and hardwire it to the /u3/mpic * handle. */ if (OF_getprop(child, "name", name, sizeof(name)) <= 0) device_printf(dev, "device has no name!\n"); if (dinfo->udi_ninterrupts == 0 && (strcmp(name, "i2c-bus") == 0 || strcmp(name, "i2c") == 0)) { if (OF_getprop(child, "interrupt-parent", &iparent, sizeof(iparent)) <= 0) { iparent = OF_finddevice("/u3/mpic"); device_printf(dev, "Set /u3/mpic as iparent!\n"); } /* Add an interrupt number 0 to the parent. */ irq = MAP_IRQ(iparent, 0); resource_list_add(&dinfo->udi_resources, SYS_RES_IRQ, dinfo->udi_ninterrupts, irq, irq, 1); dinfo->udi_interrupts[dinfo->udi_ninterrupts] = irq; dinfo->udi_ninterrupts++; } unin_chip_add_reg(child, dinfo); cdev = device_add_child(dev, NULL, -1); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", dinfo->udi_obdinfo.obd_name); resource_list_free(&dinfo->udi_resources); ofw_bus_gen_destroy_devinfo(&dinfo->udi_obdinfo); free(dinfo, M_UNIN); continue; } device_set_ivars(cdev, dinfo); } /* * Only map the first page, since that is where the registers * of interest lie. */ sc->sc_addr = (vm_offset_t)pmap_mapdev(sc->sc_physaddr, PAGE_SIZE); sc->sc_version = *(u_int *)sc->sc_addr; device_printf(dev, "Version %d\n", sc->sc_version); /* * Enable the GMAC Ethernet cell and the integrated OpenPIC * if Open Firmware says they are used. */ for (child = OF_child(root); child; child = OF_peer(child)) { memset(compat, 0, sizeof(compat)); OF_getprop(child, "compatible", compat, sizeof(compat)); if (strcmp(compat, "gmac") == 0) unin_enable_gmac(dev); if (strcmp(compat, "chrp,open-pic") == 0) unin_enable_mpic(dev); } /* * GMAC lives under the PCI bus, so just check if enet is gmac. */ child = OF_finddevice("enet"); memset(compat, 0, sizeof(compat)); OF_getprop(child, "compatible", compat, sizeof(compat)); if (strcmp(compat, "gmac") == 0) unin_enable_gmac(dev); return (bus_generic_attach(dev)); } static int unin_chip_print_child(device_t dev, device_t child) { struct unin_chip_devinfo *dinfo; struct resource_list *rl; int retval = 0; dinfo = device_get_ivars(child); rl = &dinfo->udi_resources; retval += bus_print_child_header(dev, child); retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); retval += bus_print_child_footer(dev, child); return (retval); } static void unin_chip_probe_nomatch(device_t dev, device_t child) { struct unin_chip_devinfo *dinfo; struct resource_list *rl; const char *type; if (bootverbose) { dinfo = device_get_ivars(child); rl = &dinfo->udi_resources; if ((type = ofw_bus_get_type(child)) == NULL) type = "(unknown)"; device_printf(dev, "<%s, %s>", type, ofw_bus_get_name(child)); resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); printf(" (no driver attached)\n"); } } static struct rman * unin_chip_get_rman(device_t bus, int type, u_int flags) { struct unin_chip_softc *sc; sc = device_get_softc(bus); switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (&sc->sc_mem_rman); default: return (NULL); } } static struct resource * unin_chip_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { rman_res_t adjstart, adjend, adjcount; struct unin_chip_devinfo *dinfo; struct resource_list_entry *rle; dinfo = device_get_ivars(child); switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: rle = resource_list_find(&dinfo->udi_resources, SYS_RES_MEMORY, *rid); if (rle == NULL) { device_printf(bus, "no rle for %s memory %d\n", device_get_nameunit(child), *rid); return (NULL); } rle->end = rle->end - 1; /* Hack? */ if (start < rle->start) adjstart = rle->start; else if (start > rle->end) adjstart = rle->end; else adjstart = start; if (end < rle->start) adjend = rle->start; else if (end > rle->end) adjend = rle->end; else adjend = end; adjcount = adjend - adjstart; return (bus_generic_rman_alloc_resource(bus, child, SYS_RES_MEMORY, rid, adjstart, adjend, adjcount, flags)); case SYS_RES_IRQ: /* Check for passthrough from subattachments. */ if (device_get_parent(child) != bus) return BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags); rle = resource_list_find(&dinfo->udi_resources, SYS_RES_IRQ, *rid); if (rle == NULL) { if (dinfo->udi_ninterrupts >= 6) { device_printf(bus, "%s has more than 6 interrupts\n", device_get_nameunit(child)); return (NULL); } resource_list_add(&dinfo->udi_resources, SYS_RES_IRQ, dinfo->udi_ninterrupts, start, start, 1); dinfo->udi_interrupts[dinfo->udi_ninterrupts] = start; dinfo->udi_ninterrupts++; } return (resource_list_alloc(&dinfo->udi_resources, bus, child, type, rid, start, end, count, flags)); default: device_printf(bus, "unknown resource request from %s\n", device_get_nameunit(child)); return (NULL); } } static int unin_chip_adjust_resource(device_t bus, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_adjust_resource(bus, child, r, start, end)); case SYS_RES_IRQ: return (bus_generic_adjust_resource(bus, child, r, start, end)); default: return (EINVAL); } } static int unin_chip_release_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_release_resource(bus, child, type, rid, res)); case SYS_RES_IRQ: return (bus_generic_rl_release_resource(bus, child, type, rid, res)); default: return (EINVAL); } } static int -unin_chip_activate_resource(device_t bus, device_t child, int type, int rid, - struct resource *res) +unin_chip_activate_resource(device_t bus, device_t child, struct resource *res) { - switch (type) { + switch (rman_get_type(res)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: - return (bus_generic_rman_activate_resource(bus, child, type, - rid, res)); + return (bus_generic_rman_activate_resource(bus, child, res)); case SYS_RES_IRQ: - return (bus_generic_activate_resource(bus, child, type, rid, - res)); + return (bus_generic_activate_resource(bus, child, res)); default: return (EINVAL); } } static int -unin_chip_deactivate_resource(device_t bus, device_t child, int type, int rid, +unin_chip_deactivate_resource(device_t bus, device_t child, struct resource *res) { - switch (type) { + switch (rman_get_type(res)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: - return (bus_generic_rman_deactivate_resource(bus, child, type, - rid, res)); + return (bus_generic_rman_deactivate_resource(bus, child, res)); case SYS_RES_IRQ: - return (bus_generic_deactivate_resource(bus, child, type, rid, - res)); + return (bus_generic_deactivate_resource(bus, child, res)); default: return (EINVAL); } } static int unin_chip_map_resource(device_t bus, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); /* Mappings are only supported on I/O and memory resources. */ switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); if (bootverbose) printf("nexus mapdev: start %jx, len %jd\n", (uintmax_t)start, (uintmax_t)length); map->r_vaddr = pmap_mapdev_attr(start, length, args.memattr); if (map->r_vaddr == NULL) return (ENOMEM); map->r_bustag = &bs_be_tag; map->r_size = length; map->r_bushandle = (bus_space_handle_t)map->r_vaddr; return (0); } static int unin_chip_unmap_resource(device_t bus, device_t child, struct resource *r, struct resource_map *map) { /* * If this is a memory resource, unmap it. */ switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: pmap_unmapdev(map->r_vaddr, map->r_size); break; default: return (EINVAL); } return (0); } static struct resource_list * unin_chip_get_resource_list (device_t dev, device_t child) { struct unin_chip_devinfo *dinfo; dinfo = device_get_ivars(child); return (&dinfo->udi_resources); } static const struct ofw_bus_devinfo * unin_chip_get_devinfo(device_t dev, device_t child) { struct unin_chip_devinfo *dinfo; dinfo = device_get_ivars(child); return (&dinfo->udi_obdinfo); } int unin_chip_wake(device_t dev) { if (dev == NULL) dev = unin_chip; unin_update_reg(dev, UNIN_PWR_MGMT, UNIN_PWR_NORMAL, UNIN_PWR_MASK); DELAY(10); unin_update_reg(dev, UNIN_HWINIT_STATE, UNIN_RUNNING, 0); DELAY(100); return (0); } int unin_chip_sleep(device_t dev, int idle) { if (dev == NULL) dev = unin_chip; unin_update_reg(dev, UNIN_HWINIT_STATE, UNIN_SLEEPING, 0); DELAY(10); if (idle) unin_update_reg(dev, UNIN_PWR_MGMT, UNIN_PWR_IDLE2, UNIN_PWR_MASK); else unin_update_reg(dev, UNIN_PWR_MGMT, UNIN_PWR_SLEEP, UNIN_PWR_MASK); DELAY(10); return (0); } diff --git a/sys/powerpc/psim/iobus.c b/sys/powerpc/psim/iobus.c index 8f348f0f0614..cea6fc5fb15e 100644 --- a/sys/powerpc/psim/iobus.c +++ b/sys/powerpc/psim/iobus.c @@ -1,463 +1,457 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright 2002 by Peter Grehan. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * PSIM 'iobus' local bus. Should be set up in the device tree like: * * /iobus@0x80000000/name psim-iobus * * Code borrowed from various nexus.c and uninorth.c :-) */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct iobus_softc { phandle_t sc_node; vm_offset_t sc_addr; vm_offset_t sc_size; struct rman sc_mem_rman; }; static MALLOC_DEFINE(M_IOBUS, "iobus", "iobus device information"); static int iobus_probe(device_t); static int iobus_attach(device_t); static int iobus_print_child(device_t dev, device_t child); static void iobus_probe_nomatch(device_t, device_t); static int iobus_read_ivar(device_t, device_t, int, uintptr_t *); static int iobus_write_ivar(device_t, device_t, int, uintptr_t); static struct rman *iobus_get_rman(device_t, int, u_int); static struct resource *iobus_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int iobus_adjust_resource(device_t, device_t, struct resource *, rman_res_t, rman_res_t); -static int iobus_activate_resource(device_t, device_t, int, int, - struct resource *); -static int iobus_deactivate_resource(device_t, device_t, int, int, - struct resource *); +static int iobus_activate_resource(device_t, device_t, struct resource *); +static int iobus_deactivate_resource(device_t, device_t, struct resource *); static int iobus_map_resource(device_t, device_t, struct resource *, struct resource_map_request *, struct resource_map *); static int iobus_unmap_resource(device_t, device_t, struct resource *, struct resource_map *); static int iobus_release_resource(device_t, device_t, int, int, struct resource *); /* * Bus interface definition */ static device_method_t iobus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, iobus_probe), DEVMETHOD(device_attach, iobus_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_print_child, iobus_print_child), DEVMETHOD(bus_probe_nomatch, iobus_probe_nomatch), DEVMETHOD(bus_read_ivar, iobus_read_ivar), DEVMETHOD(bus_write_ivar, iobus_write_ivar), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_rman, iobus_get_rman), DEVMETHOD(bus_alloc_resource, iobus_alloc_resource), DEVMETHOD(bus_adjust_resource, iobus_adjust_resource), DEVMETHOD(bus_release_resource, iobus_release_resource), DEVMETHOD(bus_activate_resource, iobus_activate_resource), DEVMETHOD(bus_deactivate_resource, iobus_deactivate_resource), DEVMETHOD(bus_map_resource, iobus_map_resource), DEVMETHOD(bus_unmap_resource, iobus_unmap_resource), { 0, 0 } }; static driver_t iobus_driver = { "iobus", iobus_methods, sizeof(struct iobus_softc) }; DRIVER_MODULE(iobus, ofwbus, iobus_driver, 0, 0); static int iobus_probe(device_t dev) { const char *type = ofw_bus_get_name(dev); if (strcmp(type, "psim-iobus") != 0) return (ENXIO); device_set_desc(dev, "PSIM local bus"); return (0); } /* * Add interrupt/addr range to the dev's resource list if present */ static void iobus_add_intr(phandle_t devnode, struct iobus_devinfo *dinfo) { u_int intr = -1; if (OF_getprop(devnode, "interrupt", &intr, sizeof(intr)) != -1) { resource_list_add(&dinfo->id_resources, SYS_RES_IRQ, 0, intr, intr, 1); } dinfo->id_interrupt = intr; } static void iobus_add_reg(phandle_t devnode, struct iobus_devinfo *dinfo, vm_offset_t iobus_off) { u_int size; int i; size = OF_getprop(devnode, "reg", dinfo->id_reg,sizeof(dinfo->id_reg)); if (size != -1) { dinfo->id_nregs = size / (sizeof(dinfo->id_reg[0])); for (i = 0; i < dinfo->id_nregs; i+= 3) { /* * Scale the absolute addresses back to iobus * relative offsets. This is to better simulate * macio */ dinfo->id_reg[i+1] -= iobus_off; resource_list_add(&dinfo->id_resources, SYS_RES_MEMORY, 0, dinfo->id_reg[i+1], dinfo->id_reg[i+1] + dinfo->id_reg[i+2], dinfo->id_reg[i+2]); } } } static int iobus_attach(device_t dev) { struct iobus_softc *sc; struct iobus_devinfo *dinfo; phandle_t root; phandle_t child; device_t cdev; char *name; u_int reg[2]; int size; sc = device_get_softc(dev); sc->sc_node = ofw_bus_get_node(dev); /* * Find the base addr/size of the iobus, and initialize the * resource manager */ size = OF_getprop(sc->sc_node, "reg", reg, sizeof(reg)); if (size == sizeof(reg)) { sc->sc_addr = reg[0]; sc->sc_size = reg[1]; } else { return (ENXIO); } sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "IOBus Device Memory"; if (rman_init(&sc->sc_mem_rman) != 0) { device_printf(dev, "failed to init mem range resources\n"); return (ENXIO); } rman_manage_region(&sc->sc_mem_rman, 0, sc->sc_size); /* * Iterate through the sub-devices */ root = sc->sc_node; for (child = OF_child(root); child != 0; child = OF_peer(child)) { OF_getprop_alloc(child, "name", (void **)&name); cdev = device_add_child(dev, NULL, -1); if (cdev != NULL) { dinfo = malloc(sizeof(*dinfo), M_IOBUS, M_WAITOK); memset(dinfo, 0, sizeof(*dinfo)); resource_list_init(&dinfo->id_resources); dinfo->id_node = child; dinfo->id_name = name; iobus_add_intr(child, dinfo); iobus_add_reg(child, dinfo, sc->sc_addr); device_set_ivars(cdev, dinfo); } else { OF_prop_free(name); } } return (bus_generic_attach(dev)); } static int iobus_print_child(device_t dev, device_t child) { struct iobus_devinfo *dinfo; struct resource_list *rl; int retval = 0; dinfo = device_get_ivars(child); rl = &dinfo->id_resources; retval += bus_print_child_header(dev, child); retval += printf(" offset 0x%x", dinfo->id_reg[1]); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); retval += bus_print_child_footer(dev, child); return (retval); } static void iobus_probe_nomatch(device_t dev, device_t child) { } static int iobus_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct iobus_devinfo *dinfo; if ((dinfo = device_get_ivars(child)) == NULL) return (ENOENT); switch (which) { case IOBUS_IVAR_NODE: *result = dinfo->id_node; break; case IOBUS_IVAR_NAME: *result = (uintptr_t)dinfo->id_name; break; case IOBUS_IVAR_NREGS: *result = dinfo->id_nregs; break; case IOBUS_IVAR_REGS: *result = (uintptr_t)dinfo->id_reg; break; default: return (ENOENT); } return (0); } static int iobus_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { return (EINVAL); } static struct rman * iobus_get_rman(device_t bus, int type, u_int flags) { struct iobus_softc *sc; sc = device_get_softc(bus); switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (&sc->sc_mem_rman); default: return (NULL); } } static struct resource * iobus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_alloc_resource(bus, child, type, rid, start, end, count, flags)); case SYS_RES_IRQ: return (bus_alloc_resource(bus, type, rid, start, end, count, flags)); default: device_printf(bus, "unknown resource request from %s\n", device_get_nameunit(child)); return (NULL); } } static int iobus_adjust_resource(device_t bus, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_adjust_resource(bus, child, r, start, end)); case SYS_RES_IRQ: return (bus_generic_adjust_resource(bus, child, r, start, end)); default: return (EINVAL); } } static int iobus_release_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_release_resource(bus, child, type, rid, res)); case SYS_RES_IRQ: return (bus_generic_release_resource(bus, child, type, rid, res)); default: return (EINVAL); } } static int -iobus_activate_resource(device_t bus, device_t child, int type, int rid, - struct resource *res) +iobus_activate_resource(device_t bus, device_t child, struct resource *res) { - switch (type) { + switch (rman_get_type(res)) { case SYS_RES_IRQ: - return (bus_generic_activate_resource(bus, child, type, rid, res)); + return (bus_generic_activate_resource(bus, child, res)); case SYS_RES_IOPORT: case SYS_RES_MEMORY: - return (bus_generic_rman_activate_resource(bus, child, type, - rid, res)); + return (bus_generic_rman_activate_resource(bus, child, res)); default: return (EINVAL); } } static int -iobus_deactivate_resource(device_t bus, device_t child, int type, int rid, - struct resource *res) +iobus_deactivate_resource(device_t bus, device_t child, struct resource *res) { - switch (type) { + switch (rman_get_type(res)) { case SYS_RES_IRQ: - return (bus_generic_deactivate_resource(bus, child, type, rid, res)); + return (bus_generic_deactivate_resource(bus, child, res)); case SYS_RES_IOPORT: case SYS_RES_MEMORY: - return (bus_generic_rman_deactivate_resource(bus, child, type, - rid, res)); + return (bus_generic_rman_deactivate_resource(bus, child, res)); default: return (EINVAL); } } static int iobus_map_resource(device_t bus, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; struct iobus_softc *sc; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); /* Mappings are only supported on I/O and memory resources. */ switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); sc = device_get_softc(bus); map->r_vaddr = pmap_mapdev_attr((vm_paddr_t)start + sc->sc_addr, (vm_size_t)length, args.memattr); if (map->r_vaddr == NULL) return (ENOMEM); map->r_bustag = &bs_le_tag; map->r_bushandle = (vm_offset_t)map->r_vaddr; map->r_size = length; return (0); } static int iobus_unmap_resource(device_t bus, device_t child, struct resource *r, struct resource_map *map) { switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: pmap_unmapdev(map->r_vaddr, map->r_size); return (0); default: return (EINVAL); } } diff --git a/sys/riscv/riscv/nexus.c b/sys/riscv/riscv/nexus.c index eb8126ba1e2f..b8b4eb6604ae 100644 --- a/sys/riscv/riscv/nexus.c +++ b/sys/riscv/riscv/nexus.c @@ -1,435 +1,431 @@ /*- * Copyright 1998 Massachusetts Institute of Technology * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * This code implements a `root nexus' for RISC-V Architecture * machines. The function of the root nexus is to serve as an * attachment point for both processors and buses, and to manage * resources which are common to all of them. In particular, * this code implements the core resource managers for interrupt * requests and I/O memory address space. */ #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #include "ofw_bus_if.h" #endif extern struct bus_space memmap_bus; static MALLOC_DEFINE(M_NEXUSDEV, "nexusdev", "Nexus device"); struct nexus_device { struct resource_list nx_resources; }; #define DEVTONX(dev) ((struct nexus_device *)device_get_ivars(dev)) static struct rman mem_rman; static struct rman irq_rman; static device_probe_t nexus_fdt_probe; static device_attach_t nexus_attach; static bus_add_child_t nexus_add_child; static bus_print_child_t nexus_print_child; static bus_activate_resource_t nexus_activate_resource; static bus_alloc_resource_t nexus_alloc_resource; static bus_deactivate_resource_t nexus_deactivate_resource; static bus_get_resource_list_t nexus_get_reslist; static bus_get_rman_t nexus_get_rman; static bus_map_resource_t nexus_map_resource; static bus_unmap_resource_t nexus_unmap_resource; static bus_config_intr_t nexus_config_intr; static bus_describe_intr_t nexus_describe_intr; static bus_setup_intr_t nexus_setup_intr; static bus_teardown_intr_t nexus_teardown_intr; static bus_get_bus_tag_t nexus_get_bus_tag; static ofw_bus_map_intr_t nexus_ofw_map_intr; static device_method_t nexus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nexus_fdt_probe), DEVMETHOD(device_attach, nexus_attach), /* OFW interface */ DEVMETHOD(ofw_bus_map_intr, nexus_ofw_map_intr), /* Bus interface */ DEVMETHOD(bus_add_child, nexus_add_child), DEVMETHOD(bus_print_child, nexus_print_child), DEVMETHOD(bus_activate_resource, nexus_activate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_rman_adjust_resource), DEVMETHOD(bus_alloc_resource, nexus_alloc_resource), DEVMETHOD(bus_deactivate_resource, nexus_deactivate_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_get_resource_list, nexus_get_reslist), DEVMETHOD(bus_get_rman, nexus_get_rman), DEVMETHOD(bus_map_resource, nexus_map_resource), DEVMETHOD(bus_release_resource, bus_generic_rman_release_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_unmap_resource, nexus_unmap_resource), DEVMETHOD(bus_config_intr, nexus_config_intr), DEVMETHOD(bus_describe_intr, nexus_describe_intr), DEVMETHOD(bus_setup_intr, nexus_setup_intr), DEVMETHOD(bus_teardown_intr, nexus_teardown_intr), DEVMETHOD(bus_get_bus_tag, nexus_get_bus_tag), DEVMETHOD_END }; static driver_t nexus_fdt_driver = { "nexus", nexus_methods, 1 /* no softc */ }; EARLY_DRIVER_MODULE(nexus_fdt, root, nexus_fdt_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_FIRST); static int nexus_fdt_probe(device_t dev) { device_quiet(dev); return (BUS_PROBE_DEFAULT); } static int nexus_attach(device_t dev) { mem_rman.rm_start = 0; mem_rman.rm_end = BUS_SPACE_MAXADDR; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "I/O memory addresses"; if (rman_init(&mem_rman) || rman_manage_region(&mem_rman, 0, BUS_SPACE_MAXADDR)) panic("nexus_attach mem_rman"); irq_rman.rm_start = 0; irq_rman.rm_end = ~0; irq_rman.rm_type = RMAN_ARRAY; irq_rman.rm_descr = "Interrupts"; if (rman_init(&irq_rman) || rman_manage_region(&irq_rman, 0, ~0)) panic("nexus_attach irq_rman"); /* * Add direct children of nexus. Devices will be probed and attached * through ofwbus0. */ nexus_add_child(dev, 0, "timer", 0); nexus_add_child(dev, 1, "rcons", 0); nexus_add_child(dev, 2, "ofwbus", 0); bus_generic_probe(dev); bus_generic_attach(dev); return (0); } static int nexus_print_child(device_t bus, device_t child) { int retval = 0; retval += bus_print_child_header(bus, child); retval += printf("\n"); return (retval); } static device_t nexus_add_child(device_t bus, u_int order, const char *name, int unit) { device_t child; struct nexus_device *ndev; ndev = malloc(sizeof(struct nexus_device), M_NEXUSDEV, M_NOWAIT|M_ZERO); if (!ndev) return (0); resource_list_init(&ndev->nx_resources); child = device_add_child_ordered(bus, order, name, unit); device_set_ivars(child, ndev); return (child); } static struct rman * nexus_get_rman(device_t bus, int type, u_int flags) { switch (type) { case SYS_RES_IRQ: return (&irq_rman); case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (&mem_rman); default: return (NULL); } } /* * Allocate a resource on behalf of child. NB: child is usually going to be a * child of one of our descendants, not a direct child of nexus0. */ static struct resource * nexus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct nexus_device *ndev = DEVTONX(child); struct resource_list_entry *rle; /* * If this is an allocation of the "default" range for a given * RID, and we know what the resources for this device are * (ie. they aren't maintained by a child bus), then work out * the start/end values. */ if (RMAN_IS_DEFAULT_RANGE(start, end) && (count == 1)) { if (device_get_parent(child) != bus || ndev == NULL) return (NULL); rle = resource_list_find(&ndev->nx_resources, type, *rid); if (rle == NULL) return (NULL); start = rle->start; end = rle->end; count = rle->count; } return (bus_generic_rman_alloc_resource(bus, child, type, rid, start, end, count, flags)); } static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { return (EOPNOTSUPP); } static int nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) { int error; if ((rman_get_flags(res) & RF_SHAREABLE) == 0) flags |= INTR_EXCL; /* We depend here on rman_activate_resource() being idempotent. */ error = rman_activate_resource(res); if (error != 0) return (error); error = intr_setup_irq(child, res, filt, intr, arg, flags, cookiep); return (error); } static int nexus_teardown_intr(device_t dev, device_t child, struct resource *r, void *ih) { return (intr_teardown_irq(child, r, ih)); } static int nexus_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { return (intr_describe_irq(child, irq, cookie, descr)); } static bus_space_tag_t nexus_get_bus_tag(device_t bus __unused, device_t child __unused) { return (&memmap_bus); } static int -nexus_activate_resource(device_t bus, device_t child, int type, int rid, - struct resource *r) +nexus_activate_resource(device_t bus, device_t child, struct resource *r) { int error; - switch (type) { + switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: - error = bus_generic_rman_activate_resource(bus, child, type, - rid, r); + error = bus_generic_rman_activate_resource(bus, child, r); break; case SYS_RES_IRQ: error = rman_activate_resource(r); if (error != 0) return (error); error = intr_activate_irq(child, r); if (error != 0) { rman_deactivate_resource(r); return (error); } break; default: error = EINVAL; break; } return (error); } static struct resource_list * nexus_get_reslist(device_t dev, device_t child) { struct nexus_device *ndev = DEVTONX(child); return (&ndev->nx_resources); } static int -nexus_deactivate_resource(device_t bus, device_t child, int type, int rid, - struct resource *r) +nexus_deactivate_resource(device_t bus, device_t child, struct resource *r) { int error; - switch (type) { + switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: - error = bus_generic_rman_deactivate_resource(bus, child, type, - rid, r); + error = bus_generic_rman_deactivate_resource(bus, child, r); break; case SYS_RES_IRQ: error = rman_deactivate_resource(r); if (error != 0) return (error); intr_deactivate_irq(child, r); break; default: error = EINVAL; break; } return (error); } static int nexus_map_resource(device_t bus, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if ((rman_get_flags(r) & RF_ACTIVE) == 0) return (ENXIO); /* Mappings are only supported on I/O and memory resources. */ switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); map->r_vaddr = pmap_mapdev(start, length); map->r_bustag = &memmap_bus; map->r_size = length; /* * The handle is the virtual address. */ map->r_bushandle = (bus_space_handle_t)map->r_vaddr; return (0); } static int nexus_unmap_resource(device_t bus, device_t child, struct resource *r, struct resource_map *map) { switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: pmap_unmapdev(map->r_vaddr, map->r_size); return (0); default: return (EINVAL); } } static int nexus_ofw_map_intr(device_t dev, device_t child, phandle_t iparent, int icells, pcell_t *intr) { struct intr_map_data_fdt *fdt_data; size_t len; u_int irq; len = sizeof(*fdt_data) + icells * sizeof(pcell_t); fdt_data = (struct intr_map_data_fdt *)intr_alloc_map_data( INTR_MAP_DATA_FDT, len, M_WAITOK | M_ZERO); fdt_data->iparent = iparent; fdt_data->ncells = icells; memcpy(fdt_data->cells, intr, icells * sizeof(pcell_t)); irq = intr_map_irq(NULL, iparent, (struct intr_map_data *)fdt_data); return (irq); } diff --git a/sys/sys/bus.h b/sys/sys/bus.h index c74553f2ba54..76b1aa4ad2f5 100644 --- a/sys/sys/bus.h +++ b/sys/sys/bus.h @@ -1,1084 +1,1082 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1997,1998,2003 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _SYS_BUS_H_ #define _SYS_BUS_H_ #include #include #include #include /** * @defgroup NEWBUS newbus - a generic framework for managing devices * @{ */ /** * @brief Interface information structure. */ struct u_businfo { int ub_version; /**< @brief interface version */ #define BUS_USER_VERSION 2 int ub_generation; /**< @brief generation count */ }; /** * @brief State of the device. */ typedef enum device_state { DS_NOTPRESENT = 10, /**< @brief not probed or probe failed */ DS_ALIVE = 20, /**< @brief probe succeeded */ DS_ATTACHING = 25, /**< @brief currently attaching */ DS_ATTACHED = 30, /**< @brief attach method called */ } device_state_t; /** * @brief Device proprty types. * * Those are used by bus logic to encode requested properties, * e.g. in DT all properties are stored as BE and need to be converted * to host endianness. */ typedef enum device_property_type { DEVICE_PROP_ANY = 0, DEVICE_PROP_BUFFER = 1, DEVICE_PROP_UINT32 = 2, DEVICE_PROP_UINT64 = 3, DEVICE_PROP_HANDLE = 4, } device_property_type_t; /** * @brief Device information exported to userspace. * The strings are placed one after the other, separated by NUL characters. * Fields should be added after the last one and order maintained for compatibility */ #define BUS_USER_BUFFER (3*1024) struct u_device { uintptr_t dv_handle; uintptr_t dv_parent; uint32_t dv_devflags; /**< @brief API Flags for device */ uint16_t dv_flags; /**< @brief flags for dev state */ device_state_t dv_state; /**< @brief State of attachment */ char dv_fields[BUS_USER_BUFFER]; /**< @brief NUL terminated fields */ /* name (name of the device in tree) */ /* desc (driver description) */ /* drivername (Name of driver without unit number) */ /* pnpinfo (Plug and play information from bus) */ /* location (Location of device on parent */ /* NUL */ }; /* Flags exported via dv_flags. */ #define DF_ENABLED 0x01 /* device should be probed/attached */ #define DF_FIXEDCLASS 0x02 /* devclass specified at create time */ #define DF_WILDCARD 0x04 /* unit was originally wildcard */ #define DF_DESCMALLOCED 0x08 /* description was malloced */ #define DF_QUIET 0x10 /* don't print verbose attach message */ #define DF_DONENOMATCH 0x20 /* don't execute DEVICE_NOMATCH again */ #define DF_EXTERNALSOFTC 0x40 /* softc not allocated by us */ #define DF_SUSPENDED 0x100 /* Device is suspended. */ #define DF_QUIET_CHILDREN 0x200 /* Default to quiet for all my children */ #define DF_ATTACHED_ONCE 0x400 /* Has been attached at least once */ #define DF_NEEDNOMATCH 0x800 /* Has a pending NOMATCH event */ /** * @brief Device request structure used for ioctl's. * * Used for ioctl's on /dev/devctl2. All device ioctl's * must have parameter definitions which begin with dr_name. */ struct devreq_buffer { void *buffer; size_t length; }; struct devreq { char dr_name[128]; int dr_flags; /* request-specific flags */ union { struct devreq_buffer dru_buffer; void *dru_data; } dr_dru; #define dr_buffer dr_dru.dru_buffer /* variable-sized buffer */ #define dr_data dr_dru.dru_data /* fixed-size buffer */ }; #define DEV_ATTACH _IOW('D', 1, struct devreq) #define DEV_DETACH _IOW('D', 2, struct devreq) #define DEV_ENABLE _IOW('D', 3, struct devreq) #define DEV_DISABLE _IOW('D', 4, struct devreq) #define DEV_SUSPEND _IOW('D', 5, struct devreq) #define DEV_RESUME _IOW('D', 6, struct devreq) #define DEV_SET_DRIVER _IOW('D', 7, struct devreq) #define DEV_CLEAR_DRIVER _IOW('D', 8, struct devreq) #define DEV_RESCAN _IOW('D', 9, struct devreq) #define DEV_DELETE _IOW('D', 10, struct devreq) #define DEV_FREEZE _IOW('D', 11, struct devreq) #define DEV_THAW _IOW('D', 12, struct devreq) #define DEV_RESET _IOW('D', 13, struct devreq) #define DEV_GET_PATH _IOWR('D', 14, struct devreq) /* Flags for DEV_DETACH and DEV_DISABLE. */ #define DEVF_FORCE_DETACH 0x0000001 /* Flags for DEV_SET_DRIVER. */ #define DEVF_SET_DRIVER_DETACH 0x0000001 /* Detach existing driver. */ /* Flags for DEV_CLEAR_DRIVER. */ #define DEVF_CLEAR_DRIVER_DETACH 0x0000001 /* Detach existing driver. */ /* Flags for DEV_DELETE. */ #define DEVF_FORCE_DELETE 0x0000001 /* Flags for DEV_RESET */ #define DEVF_RESET_DETACH 0x0000001 /* Detach drivers vs suspend device */ #ifdef _KERNEL #include #include #include #include /** * Device name parsers. Hook to allow device enumerators to map * scheme-specific names to a device. */ typedef void (*dev_lookup_fn)(void *arg, const char *name, device_t *result); EVENTHANDLER_DECLARE(dev_lookup, dev_lookup_fn); /** * @brief A device driver. * * Provides an abstraction layer for driver dispatch. */ typedef struct kobj_class driver_t; /** * @brief A device class * * The devclass object has two main functions in the system. The first * is to manage the allocation of unit numbers for device instances * and the second is to hold the list of device drivers for a * particular bus type. Each devclass has a name and there cannot be * two devclasses with the same name. This ensures that unique unit * numbers are allocated to device instances. * * Drivers that support several different bus attachments (e.g. isa, * pci, pccard) should all use the same devclass to ensure that unit * numbers do not conflict. * * Each devclass may also have a parent devclass. This is used when * searching for device drivers to allow a form of inheritance. When * matching drivers with devices, first the driver list of the parent * device's devclass is searched. If no driver is found in that list, * the search continues in the parent devclass (if any). */ typedef struct devclass *devclass_t; /** * @brief A device method */ #define device_method_t kobj_method_t /** * @brief Driver interrupt filter return values * * If a driver provides an interrupt filter routine it must return an * integer consisting of oring together zero or more of the following * flags: * * FILTER_STRAY - this device did not trigger the interrupt * FILTER_HANDLED - the interrupt has been fully handled and can be EOId * FILTER_SCHEDULE_THREAD - the threaded interrupt handler should be * scheduled to execute * * If the driver does not provide a filter, then the interrupt code will * act is if the filter had returned FILTER_SCHEDULE_THREAD. Note that it * is illegal to specify any other flag with FILTER_STRAY and that it is * illegal to not specify either of FILTER_HANDLED or FILTER_SCHEDULE_THREAD * if FILTER_STRAY is not specified. */ #define FILTER_STRAY 0x01 #define FILTER_HANDLED 0x02 #define FILTER_SCHEDULE_THREAD 0x04 /** * @brief Driver interrupt service routines * * The filter routine is run in primary interrupt context and may not * block or use regular mutexes. It may only use spin mutexes for * synchronization. The filter may either completely handle the * interrupt or it may perform some of the work and defer more * expensive work to the regular interrupt handler. If a filter * routine is not registered by the driver, then the regular interrupt * handler is always used to handle interrupts from this device. * * The regular interrupt handler executes in its own thread context * and may use regular mutexes. However, it is prohibited from * sleeping on a sleep queue. */ typedef int driver_filter_t(void*); typedef void driver_intr_t(void*); /** * @brief Interrupt type bits. * * These flags may be passed by drivers to bus_setup_intr(9) when * registering a new interrupt handler. The field is overloaded to * specify both the interrupt's type and any special properties. * * The INTR_TYPE* bits will be passed to intr_priority(9) to determine * the scheduling priority of the handler's ithread. Historically, each * type was assigned a unique scheduling preference, but now only * INTR_TYPE_CLK receives a default priority higher than other * interrupts. See sys/priority.h. * * Buses may choose to modify or augment these flags as appropriate, * e.g. nexus may apply INTR_EXCL. */ enum intr_type { INTR_TYPE_TTY = 1, INTR_TYPE_BIO = 2, INTR_TYPE_NET = 4, INTR_TYPE_CAM = 8, INTR_TYPE_MISC = 16, INTR_TYPE_CLK = 32, INTR_TYPE_AV = 64, INTR_EXCL = 256, /* exclusive interrupt */ INTR_MPSAFE = 512, /* this interrupt is SMP safe */ INTR_ENTROPY = 1024, /* this interrupt provides entropy */ INTR_MD1 = 4096, /* flag reserved for MD use */ INTR_MD2 = 8192, /* flag reserved for MD use */ INTR_MD3 = 16384, /* flag reserved for MD use */ INTR_MD4 = 32768 /* flag reserved for MD use */ }; enum intr_trigger { INTR_TRIGGER_INVALID = -1, INTR_TRIGGER_CONFORM = 0, INTR_TRIGGER_EDGE = 1, INTR_TRIGGER_LEVEL = 2 }; enum intr_polarity { INTR_POLARITY_CONFORM = 0, INTR_POLARITY_HIGH = 1, INTR_POLARITY_LOW = 2 }; /** * CPU sets supported by bus_get_cpus(). Note that not all sets may be * supported for a given device. If a request is not supported by a * device (or its parents), then bus_get_cpus() will fail with EINVAL. */ enum cpu_sets { LOCAL_CPUS = 0, INTR_CPUS }; typedef int (*devop_t)(void); /** * @brief This structure is deprecated. * * Use the kobj(9) macro DEFINE_CLASS to * declare classes which implement device drivers. */ struct driver { KOBJ_CLASS_FIELDS; }; struct resource; /** * @brief A resource mapping. */ struct resource_map { bus_space_tag_t r_bustag; bus_space_handle_t r_bushandle; bus_size_t r_size; void *r_vaddr; }; /** * @brief Optional properties of a resource mapping request. */ struct resource_map_request { size_t size; rman_res_t offset; rman_res_t length; vm_memattr_t memattr; }; void resource_init_map_request_impl(struct resource_map_request *_args, size_t _sz); #define resource_init_map_request(rmr) \ resource_init_map_request_impl((rmr), sizeof(*(rmr))) int resource_validate_map_request(struct resource *r, struct resource_map_request *in, struct resource_map_request *out, rman_res_t *startp, rman_res_t *lengthp); /* * Definitions for drivers which need to keep simple lists of resources * for their child devices. */ /** * @brief An entry for a single resource in a resource list. */ struct resource_list_entry { STAILQ_ENTRY(resource_list_entry) link; int type; /**< @brief type argument to alloc_resource */ int rid; /**< @brief resource identifier */ int flags; /**< @brief resource flags */ struct resource *res; /**< @brief the real resource when allocated */ rman_res_t start; /**< @brief start of resource range */ rman_res_t end; /**< @brief end of resource range */ rman_res_t count; /**< @brief count within range */ }; STAILQ_HEAD(resource_list, resource_list_entry); #define RLE_RESERVED 0x0001 /* Reserved by the parent bus. */ #define RLE_ALLOCATED 0x0002 /* Reserved resource is allocated. */ #define RLE_PREFETCH 0x0004 /* Resource is a prefetch range. */ void resource_list_init(struct resource_list *rl); void resource_list_free(struct resource_list *rl); struct resource_list_entry * resource_list_add(struct resource_list *rl, int type, int rid, rman_res_t start, rman_res_t end, rman_res_t count); int resource_list_add_next(struct resource_list *rl, int type, rman_res_t start, rman_res_t end, rman_res_t count); int resource_list_busy(struct resource_list *rl, int type, int rid); int resource_list_reserved(struct resource_list *rl, int type, int rid); struct resource_list_entry* resource_list_find(struct resource_list *rl, int type, int rid); void resource_list_delete(struct resource_list *rl, int type, int rid); struct resource * resource_list_alloc(struct resource_list *rl, device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int resource_list_release(struct resource_list *rl, device_t bus, device_t child, int type, int rid, struct resource *res); int resource_list_release_active(struct resource_list *rl, device_t bus, device_t child, int type); struct resource * resource_list_reserve(struct resource_list *rl, device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int resource_list_unreserve(struct resource_list *rl, device_t bus, device_t child, int type, int rid); void resource_list_purge(struct resource_list *rl); int resource_list_print_type(struct resource_list *rl, const char *name, int type, const char *format); /* * The root bus, to which all top-level buses are attached. */ extern device_t root_bus; extern devclass_t root_devclass; void root_bus_configure(void); /* * Useful functions for implementing buses. */ struct _cpuset; -int bus_generic_activate_resource(device_t dev, device_t child, int type, - int rid, struct resource *r); +int bus_generic_activate_resource(device_t dev, device_t child, + struct resource *r); device_t bus_generic_add_child(device_t dev, u_int order, const char *name, int unit); int bus_generic_adjust_resource(device_t bus, device_t child, struct resource *r, rman_res_t start, rman_res_t end); struct resource * bus_generic_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int bus_generic_translate_resource(device_t dev, int type, rman_res_t start, rman_res_t *newstart); int bus_generic_attach(device_t dev); int bus_generic_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu); int bus_generic_child_location(device_t dev, device_t child, struct sbuf *sb); int bus_generic_child_pnpinfo(device_t dev, device_t child, struct sbuf *sb); int bus_generic_child_present(device_t dev, device_t child); int bus_generic_config_intr(device_t, int, enum intr_trigger, enum intr_polarity); int bus_generic_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr); -int bus_generic_deactivate_resource(device_t dev, device_t child, int type, - int rid, struct resource *r); +int bus_generic_deactivate_resource(device_t dev, device_t child, + struct resource *r); int bus_generic_detach(device_t dev); void bus_generic_driver_added(device_t dev, driver_t *driver); int bus_generic_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, struct _cpuset *cpuset); bus_dma_tag_t bus_generic_get_dma_tag(device_t dev, device_t child); bus_space_tag_t bus_generic_get_bus_tag(device_t dev, device_t child); int bus_generic_get_domain(device_t dev, device_t child, int *domain); ssize_t bus_generic_get_property(device_t dev, device_t child, const char *propname, void *propvalue, size_t size, device_property_type_t type); struct resource_list * bus_generic_get_resource_list(device_t, device_t); int bus_generic_map_resource(device_t dev, device_t child, struct resource *r, struct resource_map_request *args, struct resource_map *map); void bus_generic_new_pass(device_t dev); int bus_print_child_header(device_t dev, device_t child); int bus_print_child_domain(device_t dev, device_t child); int bus_print_child_footer(device_t dev, device_t child); int bus_generic_print_child(device_t dev, device_t child); int bus_generic_probe(device_t dev); int bus_generic_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); int bus_generic_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r); int bus_generic_resume(device_t dev); int bus_generic_resume_child(device_t dev, device_t child); int bus_generic_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep); struct resource * bus_generic_rl_alloc_resource (device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); void bus_generic_rl_delete_resource (device_t, device_t, int, int); int bus_generic_rl_get_resource (device_t, device_t, int, int, rman_res_t *, rman_res_t *); int bus_generic_rl_set_resource (device_t, device_t, int, int, rman_res_t, rman_res_t); int bus_generic_rl_release_resource (device_t, device_t, int, int, struct resource *); struct resource * bus_generic_rman_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int bus_generic_rman_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end); int bus_generic_rman_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r); int bus_generic_rman_activate_resource(device_t dev, device_t child, - int type, int rid, struct resource *r); int bus_generic_rman_deactivate_resource(device_t dev, device_t child, - int type, int rid, struct resource *r); int bus_generic_shutdown(device_t dev); int bus_generic_suspend(device_t dev); int bus_generic_suspend_child(device_t dev, device_t child); int bus_generic_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie); int bus_generic_suspend_intr(device_t dev, device_t child, struct resource *irq); int bus_generic_resume_intr(device_t dev, device_t child, struct resource *irq); int bus_generic_unmap_resource(device_t dev, device_t child, struct resource *r, struct resource_map *map); int bus_generic_write_ivar(device_t dev, device_t child, int which, uintptr_t value); int bus_generic_get_device_path(device_t bus, device_t child, const char *locator, struct sbuf *sb); int bus_helper_reset_post(device_t dev, int flags); int bus_helper_reset_prepare(device_t dev, int flags); int bus_null_rescan(device_t dev); /* * Wrapper functions for the BUS_*_RESOURCE methods to make client code * a little simpler. */ struct resource_spec { int type; int rid; int flags; }; #define RESOURCE_SPEC_END {-1, 0, 0} int bus_alloc_resources(device_t dev, struct resource_spec *rs, struct resource **res); void bus_release_resources(device_t dev, const struct resource_spec *rs, struct resource **res); int bus_adjust_resource(device_t child, struct resource *r, rman_res_t start, rman_res_t end); int bus_translate_resource(device_t child, int type, rman_res_t start, rman_res_t *newstart); struct resource *bus_alloc_resource(device_t dev, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); -int bus_activate_resource(device_t dev, int type, int rid, - struct resource *r); -int bus_deactivate_resource(device_t dev, int type, int rid, - struct resource *r); +int bus_activate_resource(device_t dev, struct resource *r); +int bus_deactivate_resource(device_t dev, struct resource *r); int bus_map_resource(device_t dev, struct resource *r, struct resource_map_request *args, struct resource_map *map); int bus_unmap_resource(device_t dev, struct resource *r, struct resource_map *map); int bus_get_cpus(device_t dev, enum cpu_sets op, size_t setsize, struct _cpuset *cpuset); bus_dma_tag_t bus_get_dma_tag(device_t dev); bus_space_tag_t bus_get_bus_tag(device_t dev); int bus_get_domain(device_t dev, int *domain); int bus_release_resource(device_t dev, int type, int rid, struct resource *r); int bus_free_resource(device_t dev, int type, struct resource *r); int bus_setup_intr(device_t dev, struct resource *r, int flags, driver_filter_t filter, driver_intr_t handler, void *arg, void **cookiep); int bus_teardown_intr(device_t dev, struct resource *r, void *cookie); int bus_suspend_intr(device_t dev, struct resource *r); int bus_resume_intr(device_t dev, struct resource *r); int bus_bind_intr(device_t dev, struct resource *r, int cpu); int bus_describe_intr(device_t dev, struct resource *irq, void *cookie, const char *fmt, ...) __printflike(4, 5); int bus_set_resource(device_t dev, int type, int rid, rman_res_t start, rman_res_t count); int bus_get_resource(device_t dev, int type, int rid, rman_res_t *startp, rman_res_t *countp); rman_res_t bus_get_resource_start(device_t dev, int type, int rid); rman_res_t bus_get_resource_count(device_t dev, int type, int rid); void bus_delete_resource(device_t dev, int type, int rid); int bus_child_present(device_t child); int bus_child_pnpinfo(device_t child, struct sbuf *sb); int bus_child_location(device_t child, struct sbuf *sb); void bus_enumerate_hinted_children(device_t bus); int bus_delayed_attach_children(device_t bus); static __inline struct resource * bus_alloc_resource_any(device_t dev, int type, int *rid, u_int flags) { return (bus_alloc_resource(dev, type, rid, 0, ~0, 1, flags)); } static __inline struct resource * bus_alloc_resource_anywhere(device_t dev, int type, int *rid, rman_res_t count, u_int flags) { return (bus_alloc_resource(dev, type, rid, 0, ~0, count, flags)); } /* Compat shims for simpler bus resource API. */ int bus_adjust_resource_old(device_t child, int type, struct resource *r, rman_res_t start, rman_res_t end); -int bus_activate_resource_new(device_t dev, struct resource *r); -int bus_deactivate_resource_new(device_t dev, struct resource *r); +int bus_activate_resource_old(device_t dev, int type, int rid, + struct resource *r); +int bus_deactivate_resource_old(device_t dev, int type, int rid, + struct resource *r); int bus_map_resource_old(device_t dev, int type, struct resource *r, struct resource_map_request *args, struct resource_map *map); int bus_unmap_resource_old(device_t dev, int type, struct resource *r, struct resource_map *map); int bus_release_resource_new(device_t dev, struct resource *r); #define _BUS_API_MACRO(_1, _2, _3, _4, _5, NAME, ...) NAME #define bus_adjust_resource(...) \ _BUS_API_MACRO(__VA_ARGS__, bus_adjust_resource_old, \ bus_adjust_resource)(__VA_ARGS__) #define bus_activate_resource(...) \ - _BUS_API_MACRO(__VA_ARGS__, INVALID, bus_activate_resource, \ - INVALID, bus_activate_resource_new)(__VA_ARGS__) + _BUS_API_MACRO(__VA_ARGS__, INVALID, bus_activate_resource_old, \ + INVALID, bus_activate_resource)(__VA_ARGS__) #define bus_deactivate_resource(...) \ - _BUS_API_MACRO(__VA_ARGS__, INVALID, bus_deactivate_resource, \ - INVALID, bus_deactivate_resource_new)(__VA_ARGS__) + _BUS_API_MACRO(__VA_ARGS__, INVALID, bus_deactivate_resource_old, \ + INVALID, bus_deactivate_resource)(__VA_ARGS__) #define bus_map_resource(...) \ _BUS_API_MACRO(__VA_ARGS__, bus_map_resource_old, \ bus_map_resource)(__VA_ARGS__) #define bus_unmap_resource(...) \ _BUS_API_MACRO(__VA_ARGS__, INVALID, bus_unmap_resource_old, \ bus_unmap_resource)(__VA_ARGS__) #define bus_release_resource(...) \ _BUS_API_MACRO(__VA_ARGS__, INVALID, bus_release_resource, \ INVALID, bus_release_resource_new)(__VA_ARGS__) /* * Access functions for device. */ device_t device_add_child(device_t dev, const char *name, int unit); device_t device_add_child_ordered(device_t dev, u_int order, const char *name, int unit); void device_busy(device_t dev); int device_delete_child(device_t dev, device_t child); int device_delete_children(device_t dev); int device_attach(device_t dev); int device_detach(device_t dev); void device_disable(device_t dev); void device_enable(device_t dev); device_t device_find_child(device_t dev, const char *classname, int unit); const char *device_get_desc(device_t dev); devclass_t device_get_devclass(device_t dev); driver_t *device_get_driver(device_t dev); u_int32_t device_get_flags(device_t dev); device_t device_get_parent(device_t dev); int device_get_children(device_t dev, device_t **listp, int *countp); void *device_get_ivars(device_t dev); void device_set_ivars(device_t dev, void *ivars); const char *device_get_name(device_t dev); const char *device_get_nameunit(device_t dev); void *device_get_softc(device_t dev); device_state_t device_get_state(device_t dev); int device_get_unit(device_t dev); struct sysctl_ctx_list *device_get_sysctl_ctx(device_t dev); struct sysctl_oid *device_get_sysctl_tree(device_t dev); int device_has_quiet_children(device_t dev); int device_is_alive(device_t dev); /* did probe succeed? */ int device_is_attached(device_t dev); /* did attach succeed? */ int device_is_enabled(device_t dev); int device_is_suspended(device_t dev); int device_is_quiet(device_t dev); device_t device_lookup_by_name(const char *name); int device_print_prettyname(device_t dev); int device_printf(device_t dev, const char *, ...) __printflike(2, 3); int device_log(device_t dev, int pri, const char *, ...) __printflike(3, 4); int device_probe(device_t dev); int device_probe_and_attach(device_t dev); int device_probe_child(device_t bus, device_t dev); int device_quiesce(device_t dev); void device_quiet(device_t dev); void device_quiet_children(device_t dev); void device_set_desc(device_t dev, const char* desc); void device_set_descf(device_t dev, const char* fmt, ...) __printflike(2, 3); void device_set_desc_copy(device_t dev, const char* desc); int device_set_devclass(device_t dev, const char *classname); int device_set_devclass_fixed(device_t dev, const char *classname); bool device_is_devclass_fixed(device_t dev); int device_set_driver(device_t dev, driver_t *driver); void device_set_flags(device_t dev, u_int32_t flags); void device_set_softc(device_t dev, void *softc); void device_free_softc(void *softc); void device_claim_softc(device_t dev); int device_set_unit(device_t dev, int unit); /* XXX DONT USE XXX */ int device_shutdown(device_t dev); void device_unbusy(device_t dev); void device_verbose(device_t dev); ssize_t device_get_property(device_t dev, const char *prop, void *val, size_t sz, device_property_type_t type); bool device_has_property(device_t dev, const char *prop); /* * Access functions for devclass. */ int devclass_add_driver(devclass_t dc, driver_t *driver, int pass, devclass_t *dcp); devclass_t devclass_create(const char *classname); int devclass_delete_driver(devclass_t busclass, driver_t *driver); devclass_t devclass_find(const char *classname); const char *devclass_get_name(devclass_t dc); device_t devclass_get_device(devclass_t dc, int unit); void *devclass_get_softc(devclass_t dc, int unit); int devclass_get_devices(devclass_t dc, device_t **listp, int *countp); int devclass_get_drivers(devclass_t dc, driver_t ***listp, int *countp); int devclass_get_count(devclass_t dc); int devclass_get_maxunit(devclass_t dc); int devclass_find_free_unit(devclass_t dc, int unit); void devclass_set_parent(devclass_t dc, devclass_t pdc); devclass_t devclass_get_parent(devclass_t dc); struct sysctl_ctx_list *devclass_get_sysctl_ctx(devclass_t dc); struct sysctl_oid *devclass_get_sysctl_tree(devclass_t dc); /* * Access functions for device resources. */ int resource_int_value(const char *name, int unit, const char *resname, int *result); int resource_long_value(const char *name, int unit, const char *resname, long *result); int resource_string_value(const char *name, int unit, const char *resname, const char **result); int resource_disabled(const char *name, int unit); int resource_find_match(int *anchor, const char **name, int *unit, const char *resname, const char *value); int resource_find_dev(int *anchor, const char *name, int *unit, const char *resname, const char *value); int resource_unset_value(const char *name, int unit, const char *resname); /* * Functions for maintaining and checking consistency of * bus information exported to userspace. */ int bus_data_generation_check(int generation); void bus_data_generation_update(void); /** * Some convenience defines for probe routines to return. These are just * suggested values, and there's nothing magical about them. * BUS_PROBE_SPECIFIC is for devices that cannot be reprobed, and that no * possible other driver may exist (typically legacy drivers who don't follow * all the rules, or special needs drivers). BUS_PROBE_VENDOR is the * suggested value that vendor supplied drivers use. This is for source or * binary drivers that are not yet integrated into the FreeBSD tree. Its use * in the base OS is prohibited. BUS_PROBE_DEFAULT is the normal return value * for drivers to use. It is intended that nearly all of the drivers in the * tree should return this value. BUS_PROBE_LOW_PRIORITY are for drivers that * have special requirements like when there are two drivers that support * overlapping series of hardware devices. In this case the one that supports * the older part of the line would return this value, while the one that * supports the newer ones would return BUS_PROBE_DEFAULT. BUS_PROBE_GENERIC * is for drivers that wish to have a generic form and a specialized form, * like is done with the pci bus and the acpi pci bus. BUS_PROBE_HOOVER is * for those buses that implement a generic device placeholder for devices on * the bus that have no more specific driver for them (aka ugen). * BUS_PROBE_NOWILDCARD or lower means that the device isn't really bidding * for a device node, but accepts only devices that its parent has told it * use this driver. */ #define BUS_PROBE_SPECIFIC 0 /* Only I can use this device */ #define BUS_PROBE_VENDOR (-10) /* Vendor supplied driver */ #define BUS_PROBE_DEFAULT (-20) /* Base OS default driver */ #define BUS_PROBE_LOW_PRIORITY (-40) /* Older, less desirable drivers */ #define BUS_PROBE_GENERIC (-100) /* generic driver for dev */ #define BUS_PROBE_HOOVER (-1000000) /* Driver for any dev on bus */ #define BUS_PROBE_NOWILDCARD (-2000000000) /* No wildcard device matches */ /** * During boot, the device tree is scanned multiple times. Each scan, * or pass, drivers may be attached to devices. Each driver * attachment is assigned a pass number. Drivers may only probe and * attach to devices if their pass number is less than or equal to the * current system-wide pass number. The default pass is the last pass * and is used by most drivers. Drivers needed by the scheduler are * probed in earlier passes. */ #define BUS_PASS_ROOT 0 /* Used to attach root0. */ #define BUS_PASS_BUS 10 /* Buses and bridges. */ #define BUS_PASS_CPU 20 /* CPU devices. */ #define BUS_PASS_RESOURCE 30 /* Resource discovery. */ #define BUS_PASS_INTERRUPT 40 /* Interrupt controllers. */ #define BUS_PASS_TIMER 50 /* Timers and clocks. */ #define BUS_PASS_SCHEDULER 60 /* Start scheduler. */ #define BUS_PASS_SUPPORTDEV 100000 /* Drivers which support DEFAULT drivers. */ #define BUS_PASS_DEFAULT __INT_MAX /* Everything else. */ #define BUS_PASS_ORDER_FIRST 0 #define BUS_PASS_ORDER_EARLY 2 #define BUS_PASS_ORDER_MIDDLE 5 #define BUS_PASS_ORDER_LATE 7 #define BUS_PASS_ORDER_LAST 9 #define BUS_LOCATOR_ACPI "ACPI" #define BUS_LOCATOR_FREEBSD "FreeBSD" #define BUS_LOCATOR_UEFI "UEFI" #define BUS_LOCATOR_OFW "OFW" extern int bus_current_pass; void bus_set_pass(int pass); /** * Routines to lock / unlock the newbus lock. * Must be taken out to interact with newbus. */ void bus_topo_lock(void); void bus_topo_unlock(void); struct mtx * bus_topo_mtx(void); void bus_topo_assert(void); /** * Shorthands for constructing method tables. */ #define DEVMETHOD KOBJMETHOD #define DEVMETHOD_END KOBJMETHOD_END /* * Some common device interfaces. */ #include "device_if.h" #include "bus_if.h" struct module; int driver_module_handler(struct module *, int, void *); /** * Module support for automatically adding drivers to buses. */ struct driver_module_data { int (*dmd_chainevh)(struct module *, int, void *); void *dmd_chainarg; const char *dmd_busname; kobj_class_t dmd_driver; devclass_t *dmd_devclass; int dmd_pass; }; #define EARLY_DRIVER_MODULE_ORDERED(name, busname, driver, evh, arg, \ order, pass) \ \ static struct driver_module_data name##_##busname##_driver_mod = { \ evh, arg, \ #busname, \ (kobj_class_t) &driver, \ NULL, \ pass \ }; \ \ static moduledata_t name##_##busname##_mod = { \ #busname "/" #name, \ driver_module_handler, \ &name##_##busname##_driver_mod \ }; \ DECLARE_MODULE(name##_##busname, name##_##busname##_mod, \ SI_SUB_DRIVERS, order) #define EARLY_DRIVER_MODULE(name, busname, driver, evh, arg, pass) \ EARLY_DRIVER_MODULE_ORDERED(name, busname, driver, evh, arg, \ SI_ORDER_MIDDLE, pass) #define DRIVER_MODULE_ORDERED(name, busname, driver, evh, arg, order) \ EARLY_DRIVER_MODULE_ORDERED(name, busname, driver, evh, arg, \ order, BUS_PASS_DEFAULT) #define DRIVER_MODULE(name, busname, driver, evh, arg) \ EARLY_DRIVER_MODULE(name, busname, driver, evh, arg, \ BUS_PASS_DEFAULT) /** * Generic ivar accessor generation macros for bus drivers */ #define __BUS_ACCESSOR(varp, var, ivarp, ivar, type) \ \ static __inline type varp ## _get_ ## var(device_t dev) \ { \ uintptr_t v; \ int e __diagused; \ e = BUS_READ_IVAR(device_get_parent(dev), dev, \ ivarp ## _IVAR_ ## ivar, &v); \ KASSERT(e == 0, ("%s failed for %s on bus %s, error = %d", \ __func__, device_get_nameunit(dev), \ device_get_nameunit(device_get_parent(dev)), e)); \ return ((type) v); \ } \ \ static __inline void varp ## _set_ ## var(device_t dev, type t) \ { \ uintptr_t v = (uintptr_t) t; \ int e __diagused; \ e = BUS_WRITE_IVAR(device_get_parent(dev), dev, \ ivarp ## _IVAR_ ## ivar, v); \ KASSERT(e == 0, ("%s failed for %s on bus %s, error = %d", \ __func__, device_get_nameunit(dev), \ device_get_nameunit(device_get_parent(dev)), e)); \ } struct device_location_cache; typedef struct device_location_cache device_location_cache_t; device_location_cache_t *dev_wired_cache_init(void); void dev_wired_cache_fini(device_location_cache_t *dcp); bool dev_wired_cache_match(device_location_cache_t *dcp, device_t dev, const char *at); /** * Shorthand macros, taking resource argument * Generated with sys/tools/bus_macro.sh */ #define bus_barrier(r, o, l, f) \ bus_space_barrier((r)->r_bustag, (r)->r_bushandle, (o), (l), (f)) #define bus_poke_1(r, o, v) \ bus_space_poke_1((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_peek_1(r, o, vp) \ bus_space_peek_1((r)->r_bustag, (r)->r_bushandle, (o), (vp)) #define bus_read_1(r, o) \ bus_space_read_1((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_1(r, o, d, c) \ bus_space_read_multi_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_1(r, o, d, c) \ bus_space_read_region_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_1(r, o, v, c) \ bus_space_set_multi_1((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_1(r, o, v, c) \ bus_space_set_region_1((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_1(r, o, v) \ bus_space_write_1((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_1(r, o, d, c) \ bus_space_write_multi_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_1(r, o, d, c) \ bus_space_write_region_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_stream_1(r, o) \ bus_space_read_stream_1((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_stream_1(r, o, d, c) \ bus_space_read_multi_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_stream_1(r, o, d, c) \ bus_space_read_region_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_stream_1(r, o, v, c) \ bus_space_set_multi_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_stream_1(r, o, v, c) \ bus_space_set_region_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_stream_1(r, o, v) \ bus_space_write_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_stream_1(r, o, d, c) \ bus_space_write_multi_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_stream_1(r, o, d, c) \ bus_space_write_region_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_poke_2(r, o, v) \ bus_space_poke_2((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_peek_2(r, o, vp) \ bus_space_peek_2((r)->r_bustag, (r)->r_bushandle, (o), (vp)) #define bus_read_2(r, o) \ bus_space_read_2((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_2(r, o, d, c) \ bus_space_read_multi_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_2(r, o, d, c) \ bus_space_read_region_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_2(r, o, v, c) \ bus_space_set_multi_2((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_2(r, o, v, c) \ bus_space_set_region_2((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_2(r, o, v) \ bus_space_write_2((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_2(r, o, d, c) \ bus_space_write_multi_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_2(r, o, d, c) \ bus_space_write_region_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_stream_2(r, o) \ bus_space_read_stream_2((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_stream_2(r, o, d, c) \ bus_space_read_multi_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_stream_2(r, o, d, c) \ bus_space_read_region_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_stream_2(r, o, v, c) \ bus_space_set_multi_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_stream_2(r, o, v, c) \ bus_space_set_region_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_stream_2(r, o, v) \ bus_space_write_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_stream_2(r, o, d, c) \ bus_space_write_multi_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_stream_2(r, o, d, c) \ bus_space_write_region_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_poke_4(r, o, v) \ bus_space_poke_4((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_peek_4(r, o, vp) \ bus_space_peek_4((r)->r_bustag, (r)->r_bushandle, (o), (vp)) #define bus_read_4(r, o) \ bus_space_read_4((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_4(r, o, d, c) \ bus_space_read_multi_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_4(r, o, d, c) \ bus_space_read_region_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_4(r, o, v, c) \ bus_space_set_multi_4((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_4(r, o, v, c) \ bus_space_set_region_4((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_4(r, o, v) \ bus_space_write_4((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_4(r, o, d, c) \ bus_space_write_multi_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_4(r, o, d, c) \ bus_space_write_region_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_stream_4(r, o) \ bus_space_read_stream_4((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_stream_4(r, o, d, c) \ bus_space_read_multi_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_stream_4(r, o, d, c) \ bus_space_read_region_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_stream_4(r, o, v, c) \ bus_space_set_multi_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_stream_4(r, o, v, c) \ bus_space_set_region_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_stream_4(r, o, v) \ bus_space_write_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_stream_4(r, o, d, c) \ bus_space_write_multi_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_stream_4(r, o, d, c) \ bus_space_write_region_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_poke_8(r, o, v) \ bus_space_poke_8((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_peek_8(r, o, vp) \ bus_space_peek_8((r)->r_bustag, (r)->r_bushandle, (o), (vp)) #define bus_read_8(r, o) \ bus_space_read_8((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_8(r, o, d, c) \ bus_space_read_multi_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_8(r, o, d, c) \ bus_space_read_region_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_8(r, o, v, c) \ bus_space_set_multi_8((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_8(r, o, v, c) \ bus_space_set_region_8((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_8(r, o, v) \ bus_space_write_8((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_8(r, o, d, c) \ bus_space_write_multi_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_8(r, o, d, c) \ bus_space_write_region_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_stream_8(r, o) \ bus_space_read_stream_8((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_stream_8(r, o, d, c) \ bus_space_read_multi_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_stream_8(r, o, d, c) \ bus_space_read_region_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_stream_8(r, o, v, c) \ bus_space_set_multi_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_stream_8(r, o, v, c) \ bus_space_set_region_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_stream_8(r, o, v) \ bus_space_write_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_stream_8(r, o, d, c) \ bus_space_write_multi_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_stream_8(r, o, d, c) \ bus_space_write_region_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #endif /* _KERNEL */ #endif /* !_SYS_BUS_H_ */ diff --git a/sys/x86/include/legacyvar.h b/sys/x86/include/legacyvar.h index 789c6c55f198..4f05315c3949 100644 --- a/sys/x86/include/legacyvar.h +++ b/sys/x86/include/legacyvar.h @@ -1,75 +1,75 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2000 Peter Wemm * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _X86_LEGACYVAR_H_ #define _X86_LEGACYVAR_H_ enum legacy_device_ivars { LEGACY_IVAR_PCIDOMAIN, LEGACY_IVAR_PCIBUS, LEGACY_IVAR_PCISLOT, LEGACY_IVAR_PCIFUNC }; #define LEGACY_ACCESSOR(var, ivar, type) \ __BUS_ACCESSOR(legacy, var, LEGACY, ivar, type) LEGACY_ACCESSOR(pcidomain, PCIDOMAIN, uint32_t) LEGACY_ACCESSOR(pcibus, PCIBUS, uint32_t) LEGACY_ACCESSOR(pcislot, PCISLOT, int) LEGACY_ACCESSOR(pcifunc, PCIFUNC, int) #undef LEGACY_ACCESSOR int legacy_pcib_maxslots(device_t dev); uint32_t legacy_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes); int legacy_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); void legacy_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes); int legacy_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value); struct resource *legacy_pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int legacy_pcib_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end); int legacy_pcib_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r); -int legacy_pcib_activate_resource(device_t dev, device_t child, int type, - int rid, struct resource *r); -int legacy_pcib_deactivate_resource(device_t dev, device_t child, int type, - int rid, struct resource *r); +int legacy_pcib_activate_resource(device_t dev, device_t child, + struct resource *r); +int legacy_pcib_deactivate_resource(device_t dev, device_t child, + struct resource *r); int legacy_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs); int legacy_pcib_alloc_msix(device_t pcib, device_t dev, int *irq); int legacy_pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data); #endif /* !_X86_LEGACYVAR_H_ */ diff --git a/sys/x86/pci/pci_bus.c b/sys/x86/pci/pci_bus.c index c7715c47d3c8..67be4d61fa4a 100644 --- a/sys/x86/pci/pci_bus.c +++ b/sys/x86/pci/pci_bus.c @@ -1,776 +1,775 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1997, Stefan Esser * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include "opt_cpu.h" #include #include #include #include #include #include #include #include #include #include #include #include #ifdef CPU_ELAN #include #endif #include #include #include #include "pcib_if.h" int legacy_pcib_maxslots(device_t dev) { return 31; } /* read configuration space register */ uint32_t legacy_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { return(pci_cfgregread(0, bus, slot, func, reg, bytes)); } /* write configuration space register */ void legacy_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { pci_cfgregwrite(0, bus, slot, func, reg, data, bytes); } /* route interrupt */ static int legacy_pcib_route_interrupt(device_t pcib, device_t dev, int pin) { #ifdef __HAVE_PIR return (pci_pir_route_interrupt(pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pin)); #else /* No routing possible */ return (PCI_INVALID_IRQ); #endif } /* Pass MSI requests up to the nexus. */ int legacy_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs) { device_t bus; bus = device_get_parent(pcib); return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount, irqs)); } int legacy_pcib_alloc_msix(device_t pcib, device_t dev, int *irq) { device_t bus; bus = device_get_parent(pcib); return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq)); } int legacy_pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data) { device_t bus, hostb; int error, func, slot; bus = device_get_parent(pcib); error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data); if (error) return (error); slot = legacy_get_pcislot(pcib); func = legacy_get_pcifunc(pcib); if (slot == -1 || func == -1) return (0); hostb = pci_find_bsf(0, slot, func); KASSERT(hostb != NULL, ("%s: missing hostb for 0:%d:%d", __func__, slot, func)); pci_ht_map_msi(hostb, *addr); return (0); } static const char * legacy_pcib_is_host_bridge(int bus, int slot, int func, uint32_t id, uint8_t class, uint8_t subclass, uint8_t *busnum) { #ifdef __i386__ const char *s = NULL; static uint8_t pxb[4]; /* hack for 450nx */ *busnum = 0; switch (id) { case 0x12258086: s = "Intel 824?? host to PCI bridge"; /* XXX This is a guess */ /* *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x41, 1); */ *busnum = bus; break; case 0x71208086: s = "Intel 82810 (i810 GMCH) Host To Hub bridge"; break; case 0x71228086: s = "Intel 82810-DC100 (i810-DC100 GMCH) Host To Hub bridge"; break; case 0x71248086: s = "Intel 82810E (i810E GMCH) Host To Hub bridge"; break; case 0x11308086: s = "Intel 82815 (i815 GMCH) Host To Hub bridge"; break; case 0x71808086: s = "Intel 82443LX (440 LX) host to PCI bridge"; break; case 0x71908086: s = "Intel 82443BX (440 BX) host to PCI bridge"; break; case 0x71928086: s = "Intel 82443BX host to PCI bridge (AGP disabled)"; break; case 0x71948086: s = "Intel 82443MX host to PCI bridge"; break; case 0x71a08086: s = "Intel 82443GX host to PCI bridge"; break; case 0x71a18086: s = "Intel 82443GX host to AGP bridge"; break; case 0x71a28086: s = "Intel 82443GX host to PCI bridge (AGP disabled)"; break; case 0x84c48086: s = "Intel 82454KX/GX (Orion) host to PCI bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x4a, 1); break; case 0x84ca8086: /* * For the 450nx chipset, there is a whole bundle of * things pretending to be host bridges. The MIOC will * be seen first and isn't really a pci bridge (the * actual buses are attached to the PXB's). We need to * read the registers of the MIOC to figure out the * bus numbers for the PXB channels. * * Since the MIOC doesn't have a pci bus attached, we * pretend it wasn't there. */ pxb[0] = legacy_pcib_read_config(0, bus, slot, func, 0xd0, 1); /* BUSNO[0] */ pxb[1] = legacy_pcib_read_config(0, bus, slot, func, 0xd1, 1) + 1; /* SUBA[0]+1 */ pxb[2] = legacy_pcib_read_config(0, bus, slot, func, 0xd3, 1); /* BUSNO[1] */ pxb[3] = legacy_pcib_read_config(0, bus, slot, func, 0xd4, 1) + 1; /* SUBA[1]+1 */ return NULL; case 0x84cb8086: switch (slot) { case 0x12: s = "Intel 82454NX PXB#0, Bus#A"; *busnum = pxb[0]; break; case 0x13: s = "Intel 82454NX PXB#0, Bus#B"; *busnum = pxb[1]; break; case 0x14: s = "Intel 82454NX PXB#1, Bus#A"; *busnum = pxb[2]; break; case 0x15: s = "Intel 82454NX PXB#1, Bus#B"; *busnum = pxb[3]; break; } break; case 0x1A308086: s = "Intel 82845 Host to PCI bridge"; break; /* AMD -- vendor 0x1022 */ case 0x30001022: s = "AMD Elan SC520 host to PCI bridge"; #ifdef CPU_ELAN init_AMD_Elan_sc520(); #else printf( "*** WARNING: missing CPU_ELAN -- timekeeping may be wrong\n"); #endif break; case 0x70061022: s = "AMD-751 host to PCI bridge"; break; case 0x700e1022: s = "AMD-761 host to PCI bridge"; break; /* SiS -- vendor 0x1039 */ case 0x04961039: s = "SiS 85c496"; break; case 0x04061039: s = "SiS 85c501"; break; case 0x06011039: s = "SiS 85c601"; break; case 0x55911039: s = "SiS 5591 host to PCI bridge"; break; case 0x00011039: s = "SiS 5591 host to AGP bridge"; break; /* VLSI -- vendor 0x1004 */ case 0x00051004: s = "VLSI 82C592 Host to PCI bridge"; break; /* XXX Here is MVP3, I got the datasheet but NO M/B to test it */ /* totally. Please let me know if anything wrong. -F */ /* XXX need info on the MVP3 -- any takers? */ case 0x05981106: s = "VIA 82C598MVP (Apollo MVP3) host bridge"; break; /* AcerLabs -- vendor 0x10b9 */ /* Funny : The datasheet told me vendor id is "10b8",sub-vendor */ /* id is '10b9" but the register always shows "10b9". -Foxfair */ case 0x154110b9: s = "AcerLabs M1541 (Aladdin-V) PCI host bridge"; break; /* OPTi -- vendor 0x1045 */ case 0xc7011045: s = "OPTi 82C700 host to PCI bridge"; break; case 0xc8221045: s = "OPTi 82C822 host to PCI Bridge"; break; /* ServerWorks -- vendor 0x1166 */ case 0x00051166: s = "ServerWorks NB6536 2.0HE host to PCI bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x44, 1); break; case 0x00061166: /* FALLTHROUGH */ case 0x00081166: /* FALLTHROUGH */ case 0x02011166: /* FALLTHROUGH */ case 0x010f1014: /* IBM re-badged ServerWorks chipset */ s = "ServerWorks host to PCI bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x44, 1); break; case 0x00091166: s = "ServerWorks NB6635 3.0LE host to PCI bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x44, 1); break; case 0x00101166: s = "ServerWorks CIOB30 host to PCI bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x44, 1); break; case 0x00111166: /* FALLTHROUGH */ case 0x03021014: /* IBM re-badged ServerWorks chipset */ s = "ServerWorks CMIC-HE host to PCI-X bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x44, 1); break; /* XXX unknown chipset, but working */ case 0x00171166: /* FALLTHROUGH */ case 0x01011166: case 0x01101166: case 0x02251166: s = "ServerWorks host to PCI bridge(unknown chipset)"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x44, 1); break; /* Compaq/HP -- vendor 0x0e11 */ case 0x60100e11: s = "Compaq/HP Model 6010 HotPlug PCI Bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0xc8, 1); break; /* Integrated Micro Solutions -- vendor 0x10e0 */ case 0x884910e0: s = "Integrated Micro Solutions VL Bridge"; break; default: if (class == PCIC_BRIDGE && subclass == PCIS_BRIDGE_HOST) s = "Host to PCI bridge"; break; } return s; #else const char *s = NULL; *busnum = 0; if (class == PCIC_BRIDGE && subclass == PCIS_BRIDGE_HOST) s = "Host to PCI bridge"; return s; #endif } /* * Scan the first pci bus for host-pci bridges and add pcib instances * to the nexus for each bridge. */ static void legacy_pcib_identify(driver_t *driver, device_t parent) { int bus, slot, func; uint8_t hdrtype; int found = 0; int pcifunchigh; int found824xx = 0; int found_orion = 0; device_t child; devclass_t pci_devclass; if (pci_cfgregopen() == 0) return; /* * Check to see if we haven't already had a PCI bus added * via some other means. If we have, bail since otherwise * we're going to end up duplicating it. */ if ((pci_devclass = devclass_find("pci")) && devclass_get_device(pci_devclass, 0)) return; bus = 0; retry: for (slot = 0; slot <= PCI_SLOTMAX; slot++) { func = 0; hdrtype = legacy_pcib_read_config(0, bus, slot, func, PCIR_HDRTYPE, 1); /* * When enumerating bus devices, the standard says that * one should check the header type and ignore the slots whose * header types that the software doesn't know about. We use * this to filter out devices. */ if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) continue; if ((hdrtype & PCIM_MFDEV) && (!found_orion || hdrtype != 0xff)) pcifunchigh = PCI_FUNCMAX; else pcifunchigh = 0; for (func = 0; func <= pcifunchigh; func++) { /* * Read the IDs and class from the device. */ uint32_t id; uint8_t class, subclass, busnum; const char *s; device_t *devs; int ndevs, i; id = legacy_pcib_read_config(0, bus, slot, func, PCIR_DEVVENDOR, 4); if (id == -1) continue; class = legacy_pcib_read_config(0, bus, slot, func, PCIR_CLASS, 1); subclass = legacy_pcib_read_config(0, bus, slot, func, PCIR_SUBCLASS, 1); s = legacy_pcib_is_host_bridge(bus, slot, func, id, class, subclass, &busnum); if (s == NULL) continue; /* * Check to see if the physical bus has already * been seen. Eg: hybrid 32 and 64 bit host * bridges to the same logical bus. */ if (device_get_children(parent, &devs, &ndevs) == 0) { for (i = 0; s != NULL && i < ndevs; i++) { if (strcmp(device_get_name(devs[i]), "pcib") != 0) continue; if (legacy_get_pcibus(devs[i]) == busnum) s = NULL; } free(devs, M_TEMP); } if (s == NULL) continue; /* * Add at priority 100 to make sure we * go after any motherboard resources */ child = BUS_ADD_CHILD(parent, 100, "pcib", busnum); device_set_desc(child, s); legacy_set_pcibus(child, busnum); legacy_set_pcislot(child, slot); legacy_set_pcifunc(child, func); found = 1; if (id == 0x12258086) found824xx = 1; if (id == 0x84c48086) found_orion = 1; } } if (found824xx && bus == 0) { bus++; goto retry; } /* * Make sure we add at least one bridge since some old * hardware doesn't actually have a host-pci bridge device. * Note that pci_cfgregopen() thinks we have PCI devices.. */ if (!found) { #ifndef NO_LEGACY_PCIB if (bootverbose) printf( "legacy_pcib_identify: no bridge found, adding pcib0 anyway\n"); child = BUS_ADD_CHILD(parent, 100, "pcib", 0); legacy_set_pcibus(child, 0); #endif } } static int legacy_pcib_probe(device_t dev) { if (pci_cfgregopen() == 0) return ENXIO; return -100; } static int legacy_pcib_attach(device_t dev) { #ifdef __HAVE_PIR device_t pir; int bus; bus = pcib_get_bus(dev); /* * Look for a PCI BIOS interrupt routing table as that will be * our method of routing interrupts if we have one. */ if (pci_pir_probe(bus, 0)) { pir = BUS_ADD_CHILD(device_get_parent(dev), 0, "pir", 0); if (pir != NULL) device_probe_and_attach(pir); } #endif device_add_child(dev, "pci", -1); return bus_generic_attach(dev); } int legacy_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return 0; case PCIB_IVAR_BUS: *result = legacy_get_pcibus(dev); return 0; } return ENOENT; } int legacy_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { switch (which) { case PCIB_IVAR_DOMAIN: return EINVAL; case PCIB_IVAR_BUS: legacy_set_pcibus(dev, value); return 0; } return ENOENT; } /* * Helper routine for x86 Host-PCI bridge driver resource allocation. * This is used to adjust the start address of wildcard allocation * requests to avoid low addresses that are known to be problematic. * * If no memory preference is given, use upper 32MB slot most BIOSes * use for their memory window. This is typically only used on older * laptops that don't have PCI buses behind a PCI bridge, so assuming * > 32MB is likely OK. * * However, this can cause problems for other chipsets, so we make * this tunable by hw.pci.host_mem_start. */ SYSCTL_DECL(_hw_pci); static unsigned long host_mem_start = 0x80000000; SYSCTL_ULONG(_hw_pci, OID_AUTO, host_mem_start, CTLFLAG_RDTUN, &host_mem_start, 0, "Limit the host bridge memory to being above this address."); rman_res_t hostb_alloc_start(int type, rman_res_t start, rman_res_t end, rman_res_t count) { if (start + count - 1 != end) { if (type == SYS_RES_MEMORY && start < host_mem_start) start = host_mem_start; if (type == SYS_RES_IOPORT && start < 0x1000) start = 0x1000; } return (start); } struct resource * legacy_pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) if (type == PCI_RES_BUS) return (pci_domain_alloc_bus(0, child, rid, start, end, count, flags)); #endif start = hostb_alloc_start(type, start, end, count); return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); } #if defined(NEW_PCIB) && defined(PCI_RES_BUS) int legacy_pcib_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { if (rman_get_type(r) == PCI_RES_BUS) return (pci_domain_adjust_bus(0, child, r, start, end)); return (bus_generic_adjust_resource(dev, child, r, start, end)); } int legacy_pcib_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { if (type == PCI_RES_BUS) return (pci_domain_release_bus(0, child, rid, r)); return (bus_generic_release_resource(dev, child, type, rid, r)); } int -legacy_pcib_activate_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +legacy_pcib_activate_resource(device_t dev, device_t child, struct resource *r) { - if (type == PCI_RES_BUS) - return (pci_domain_activate_bus(0, child, rid, r)); - return (bus_generic_activate_resource(dev, child, type, rid, r)); + if (rman_get_type(r) == PCI_RES_BUS) + return (pci_domain_activate_bus(0, child, r)); + return (bus_generic_activate_resource(dev, child, r)); } int -legacy_pcib_deactivate_resource(device_t dev, device_t child, int type, int rid, +legacy_pcib_deactivate_resource(device_t dev, device_t child, struct resource *r) { - if (type == PCI_RES_BUS) - return (pci_domain_deactivate_bus(0, child, rid, r)); - return (bus_generic_deactivate_resource(dev, child, type, rid, r)); + if (rman_get_type(r) == PCI_RES_BUS) + return (pci_domain_deactivate_bus(0, child, r)); + return (bus_generic_deactivate_resource(dev, child, r)); } #endif static device_method_t legacy_pcib_methods[] = { /* Device interface */ DEVMETHOD(device_identify, legacy_pcib_identify), DEVMETHOD(device_probe, legacy_pcib_probe), DEVMETHOD(device_attach, legacy_pcib_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, legacy_pcib_read_ivar), DEVMETHOD(bus_write_ivar, legacy_pcib_write_ivar), DEVMETHOD(bus_alloc_resource, legacy_pcib_alloc_resource), #if defined(NEW_PCIB) && defined(PCI_RES_BUS) DEVMETHOD(bus_adjust_resource, legacy_pcib_adjust_resource), DEVMETHOD(bus_release_resource, legacy_pcib_release_resource), DEVMETHOD(bus_activate_resource, legacy_pcib_activate_resource), DEVMETHOD(bus_deactivate_resource, legacy_pcib_deactivate_resource), #else DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), #endif DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, legacy_pcib_maxslots), DEVMETHOD(pcib_read_config, legacy_pcib_read_config), DEVMETHOD(pcib_write_config, legacy_pcib_write_config), DEVMETHOD(pcib_route_interrupt, legacy_pcib_route_interrupt), DEVMETHOD(pcib_alloc_msi, legacy_pcib_alloc_msi), DEVMETHOD(pcib_release_msi, pcib_release_msi), DEVMETHOD(pcib_alloc_msix, legacy_pcib_alloc_msix), DEVMETHOD(pcib_release_msix, pcib_release_msix), DEVMETHOD(pcib_map_msi, legacy_pcib_map_msi), DEVMETHOD(pcib_request_feature, pcib_request_feature_allow), DEVMETHOD_END }; DEFINE_CLASS_0(pcib, legacy_pcib_driver, legacy_pcib_methods, 1); DRIVER_MODULE(pcib, legacy, legacy_pcib_driver, 0, 0); /* * Install placeholder to claim the resources owned by the * PCI bus interface. This could be used to extract the * config space registers in the extreme case where the PnP * ID is available and the PCI BIOS isn't, but for now we just * eat the PnP ID and do nothing else. * * we silence this probe, as it will generally confuse people. */ static struct isa_pnp_id pcibus_pnp_ids[] = { { 0x030ad041 /* PNP0A03 */, "PCI Bus" }, { 0x080ad041 /* PNP0A08 */, "PCIe Bus" }, { 0 } }; static int pcibus_pnp_probe(device_t dev) { int result; if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, pcibus_pnp_ids)) <= 0) device_quiet(dev); return(result); } static int pcibus_pnp_attach(device_t dev) { return(0); } static device_method_t pcibus_pnp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pcibus_pnp_probe), DEVMETHOD(device_attach, pcibus_pnp_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), { 0, 0 } }; DEFINE_CLASS_0(pcibus_pnp, pcibus_pnp_driver, pcibus_pnp_methods, 1); DRIVER_MODULE(pcibus_pnp, isa, pcibus_pnp_driver, 0, 0); #ifdef __HAVE_PIR /* * Provide a PCI-PCI bridge driver for PCI buses behind PCI-PCI bridges * that appear in the PCIBIOS Interrupt Routing Table to use the routing * table for interrupt routing when possible. */ static int pcibios_pcib_probe(device_t bus); static device_method_t pcibios_pcib_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pcibios_pcib_probe), /* pcib interface */ DEVMETHOD(pcib_route_interrupt, legacy_pcib_route_interrupt), {0, 0} }; DEFINE_CLASS_1(pcib, pcibios_pcib_driver, pcibios_pcib_pci_methods, sizeof(struct pcib_softc), pcib_driver); DRIVER_MODULE(pcibios_pcib, pci, pcibios_pcib_driver, 0, 0); ISA_PNP_INFO(pcibus_pnp_ids); static int pcibios_pcib_probe(device_t dev) { int bus; if ((pci_get_class(dev) != PCIC_BRIDGE) || (pci_get_subclass(dev) != PCIS_BRIDGE_PCI)) return (ENXIO); bus = pci_read_config(dev, PCIR_SECBUS_1, 1); if (bus == 0) return (ENXIO); if (!pci_pir_probe(bus, 1)) return (ENXIO); device_set_desc(dev, "PCIBIOS PCI-PCI bridge"); return (-2000); } #endif