diff --git a/share/man/man9/bus_adjust_resource.9 b/share/man/man9/bus_adjust_resource.9 index 6afc31cfb35c..27173894e0ec 100644 --- a/share/man/man9/bus_adjust_resource.9 +++ b/share/man/man9/bus_adjust_resource.9 @@ -1,102 +1,95 @@ .\" -*- nroff -*- .\" .\" Copyright (c) 2011 Hudson River Trading LLC .\" Written by: John H. Baldwin .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.Dd April 23, 2016 +.Dd March 13, 2024 .Dt BUS_ADJUST_RESOURCE 9 .Os .Sh NAME .Nm bus_adjust_resource .Nd adjust resource allocated from a parent bus .Sh SYNOPSIS .In sys/param.h .In sys/bus.h .Pp .In machine/bus.h .In sys/rman.h .In machine/resource.h .Ft int .Fo bus_adjust_resource -.Fa "device_t dev" "int type" "struct resource *r" +.Fa "device_t dev" "struct resource *r" .Fa "rman_res_t start" "rman_res_t end" .Fc .Sh DESCRIPTION This function is used to ask the parent bus to adjust the resource range assigned to an allocated resource. The resource .Fa r should have been allocated by a previous call to .Xr bus_alloc_resource 9 . The new resource range must overlap the existing range of .Fa r . -The -.Fa type -argument should match the -.Fa type -argument passed to -.Xr bus_alloc_resource 9 -when the resource was initially allocated. .Pp Note that none of the constraints of the original allocation request such as alignment or boundary restrictions are checked by .Fn bus_adjust_resource . It is the caller's responsibility to enforce any such requirements. .Sh RETURN VALUES The .Fn bus_adjust_resource method returns zero on success or an error code on failure. .Sh EXAMPLES Grow an existing memory resource by 4096 bytes. .Bd -literal struct resource *res; int error; - error = bus_adjust_resource(dev, SYS_RES_MEMORY, res, - rman_get_start(res), rman_get_end(res) + 0x1000); + error = bus_adjust_resource(dev, res, rman_get_start(res), + rman_get_end(res) + 0x1000); .Ed .Sh ERRORS .Fn bus_adjust_resource will fail if: .Bl -tag -width Er .It Bq Er EINVAL The .Fa dev device does not have a parent device. .It Bq Er EINVAL The .Fa r resource is a shared resource. .It Bq Er EINVAL The new address range does not overlap with the existing address range of .Fa r . .It Bq Er EBUSY The new address range conflicts with another allocated resource. .El .Sh SEE ALSO .Xr bus_alloc_resource 9 , .Xr bus_release_resource 9 , .Xr device 9 , .Xr driver 9 diff --git a/sys/arm/mv/mv_pci.c b/sys/arm/mv/mv_pci.c index 76da1c4da7a7..42ee3180a942 100644 --- a/sys/arm/mv/mv_pci.c +++ b/sys/arm/mv/mv_pci.c @@ -1,1413 +1,1412 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2008 MARVELL INTERNATIONAL LTD. * Copyright (c) 2010 The FreeBSD Foundation * Copyright (c) 2010-2015 Semihalf * All rights reserved. * * Developed by Semihalf. * * Portions of this software were developed by Semihalf * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of MARVELL nor the names of contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Marvell integrated PCI/PCI-Express controller driver. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ofw_bus_if.h" #include "pcib_if.h" #include #include #include #include #include #ifdef DEBUG #define debugf(fmt, args...) do { printf(fmt,##args); } while (0) #else #define debugf(fmt, args...) #endif /* * Code and data related to fdt-based PCI configuration. * * This stuff used to be in dev/fdt/fdt_pci.c and fdt_common.h, but it was * always Marvell-specific so that was deleted and the code now lives here. */ struct mv_pci_range { u_long base_pci; u_long base_parent; u_long len; }; #define FDT_RANGES_CELLS ((3 + 3 + 2) * 2) #define PCI_SPACE_LEN 0x00400000 static void mv_pci_range_dump(struct mv_pci_range *range) { #ifdef DEBUG printf("\n"); printf(" base_pci = 0x%08lx\n", range->base_pci); printf(" base_par = 0x%08lx\n", range->base_parent); printf(" len = 0x%08lx\n", range->len); #endif } static int mv_pci_ranges_decode(phandle_t node, struct mv_pci_range *io_space, struct mv_pci_range *mem_space) { pcell_t ranges[FDT_RANGES_CELLS]; struct mv_pci_range *pci_space; pcell_t addr_cells, size_cells, par_addr_cells; pcell_t *rangesptr; pcell_t cell0, cell2; int tuple_size, tuples, i, rv, offset_cells, len; int portid, is_io_space; /* * Retrieve 'ranges' property. */ if ((fdt_addrsize_cells(node, &addr_cells, &size_cells)) != 0) return (EINVAL); if (addr_cells != 3 || size_cells != 2) return (ERANGE); par_addr_cells = fdt_parent_addr_cells(node); if (par_addr_cells > 3) return (ERANGE); len = OF_getproplen(node, "ranges"); if (len > sizeof(ranges)) return (ENOMEM); if (OF_getprop(node, "ranges", ranges, sizeof(ranges)) <= 0) return (EINVAL); tuple_size = sizeof(pcell_t) * (addr_cells + par_addr_cells + size_cells); tuples = len / tuple_size; /* * Initialize the ranges so that we don't have to worry about * having them all defined in the FDT. In particular, it is * perfectly fine not to want I/O space on PCI buses. */ bzero(io_space, sizeof(*io_space)); bzero(mem_space, sizeof(*mem_space)); rangesptr = &ranges[0]; offset_cells = 0; for (i = 0; i < tuples; i++) { cell0 = fdt_data_get((void *)rangesptr, 1); rangesptr++; /* cell1 */ rangesptr++; cell2 = fdt_data_get((void *)rangesptr, 1); rangesptr++; portid = fdt_data_get((void *)(rangesptr+1), 1); if (cell0 & 0x02000000) { pci_space = mem_space; is_io_space = 0; } else if (cell0 & 0x01000000) { pci_space = io_space; is_io_space = 1; } else { rv = ERANGE; goto out; } if (par_addr_cells == 3) { /* * This is a PCI subnode 'ranges'. Skip cell0 and * cell1 of this entry and only use cell2. */ offset_cells = 2; rangesptr += offset_cells; } if ((par_addr_cells - offset_cells) > 2) { rv = ERANGE; goto out; } pci_space->base_parent = fdt_data_get((void *)rangesptr, par_addr_cells - offset_cells); rangesptr += par_addr_cells - offset_cells; if (size_cells > 2) { rv = ERANGE; goto out; } pci_space->len = fdt_data_get((void *)rangesptr, size_cells); rangesptr += size_cells; pci_space->base_pci = cell2; if (pci_space->len == 0) { pci_space->len = PCI_SPACE_LEN; pci_space->base_parent = fdt_immr_va + PCI_SPACE_LEN * ( 2 * portid + is_io_space); } } rv = 0; out: return (rv); } static int mv_pci_ranges(phandle_t node, struct mv_pci_range *io_space, struct mv_pci_range *mem_space) { int err; debugf("Processing PCI node: %x\n", node); if ((err = mv_pci_ranges_decode(node, io_space, mem_space)) != 0) { debugf("could not decode parent PCI node 'ranges'\n"); return (err); } debugf("Post fixup dump:\n"); mv_pci_range_dump(io_space); mv_pci_range_dump(mem_space); return (0); } int mv_pci_devmap(phandle_t node, struct devmap_entry *devmap, vm_offset_t io_va, vm_offset_t mem_va) { struct mv_pci_range io_space, mem_space; int error; if ((error = mv_pci_ranges_decode(node, &io_space, &mem_space)) != 0) return (error); devmap->pd_va = (io_va ? io_va : io_space.base_parent); devmap->pd_pa = io_space.base_parent; devmap->pd_size = io_space.len; devmap++; devmap->pd_va = (mem_va ? mem_va : mem_space.base_parent); devmap->pd_pa = mem_space.base_parent; devmap->pd_size = mem_space.len; return (0); } /* * Code and data related to the Marvell pcib driver. */ #define PCI_CFG_ENA (1U << 31) #define PCI_CFG_BUS(bus) (((bus) & 0xff) << 16) #define PCI_CFG_DEV(dev) (((dev) & 0x1f) << 11) #define PCI_CFG_FUN(fun) (((fun) & 0x7) << 8) #define PCI_CFG_PCIE_REG(reg) ((reg) & 0xfc) #define PCI_REG_CFG_ADDR 0x0C78 #define PCI_REG_CFG_DATA 0x0C7C #define PCIE_REG_CFG_ADDR 0x18F8 #define PCIE_REG_CFG_DATA 0x18FC #define PCIE_REG_CONTROL 0x1A00 #define PCIE_CTRL_LINK1X 0x00000001 #define PCIE_REG_STATUS 0x1A04 #define PCIE_REG_IRQ_MASK 0x1910 #define PCIE_CONTROL_ROOT_CMPLX (1 << 1) #define PCIE_CONTROL_HOT_RESET (1 << 24) #define PCIE_LINK_TIMEOUT 1000000 #define PCIE_STATUS_LINK_DOWN 1 #define PCIE_STATUS_DEV_OFFS 16 /* Minimum PCI Memory and I/O allocations taken from PCI spec (in bytes) */ #define PCI_MIN_IO_ALLOC 4 #define PCI_MIN_MEM_ALLOC 16 #define BITS_PER_UINT32 (NBBY * sizeof(uint32_t)) struct mv_pcib_softc { device_t sc_dev; struct rman sc_mem_rman; bus_addr_t sc_mem_base; bus_addr_t sc_mem_size; uint32_t sc_mem_map[MV_PCI_MEM_SLICE_SIZE / (PCI_MIN_MEM_ALLOC * BITS_PER_UINT32)]; int sc_win_target; int sc_mem_win_attr; struct rman sc_io_rman; bus_addr_t sc_io_base; bus_addr_t sc_io_size; uint32_t sc_io_map[MV_PCI_IO_SLICE_SIZE / (PCI_MIN_IO_ALLOC * BITS_PER_UINT32)]; int sc_io_win_attr; struct resource *sc_res; bus_space_handle_t sc_bsh; bus_space_tag_t sc_bst; int sc_rid; struct mtx sc_msi_mtx; uint32_t sc_msi_bitmap; int sc_busnr; /* Host bridge bus number */ int sc_devnr; /* Host bridge device number */ int sc_type; int sc_mode; /* Endpoint / Root Complex */ int sc_msi_supported; int sc_skip_enable_procedure; int sc_enable_find_root_slot; struct ofw_bus_iinfo sc_pci_iinfo; int ap_segment; /* PCI domain */ }; /* Local forward prototypes */ static int mv_pcib_decode_win(phandle_t, struct mv_pcib_softc *); static void mv_pcib_hw_cfginit(void); static uint32_t mv_pcib_hw_cfgread(struct mv_pcib_softc *, u_int, u_int, u_int, u_int, int); static void mv_pcib_hw_cfgwrite(struct mv_pcib_softc *, u_int, u_int, u_int, u_int, uint32_t, int); static int mv_pcib_init(struct mv_pcib_softc *, int, int); static int mv_pcib_init_all_bars(struct mv_pcib_softc *, int, int, int, int); static void mv_pcib_init_bridge(struct mv_pcib_softc *, int, int, int); static inline void pcib_write_irq_mask(struct mv_pcib_softc *, uint32_t); static void mv_pcib_enable(struct mv_pcib_softc *, uint32_t); static int mv_pcib_mem_init(struct mv_pcib_softc *); /* Forward prototypes */ static int mv_pcib_probe(device_t); static int mv_pcib_attach(device_t); static struct rman *mv_pcib_get_rman(device_t, int, u_int); static struct resource *mv_pcib_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); -static int mv_pcib_adjust_resource(device_t, device_t, int, struct resource *, +static int mv_pcib_adjust_resource(device_t, device_t, struct resource *, rman_res_t, rman_res_t); static int mv_pcib_release_resource(device_t, device_t, int, int, struct resource *); static int mv_pcib_activate_resource(device_t, device_t, int, int, struct resource *r); static int mv_pcib_deactivate_resource(device_t, device_t, int, int, struct resource *r); static int mv_pcib_map_resource(device_t, device_t, int, struct resource *, struct resource_map_request *, struct resource_map *); static int mv_pcib_unmap_resource(device_t, device_t, int, struct resource *, struct resource_map *); static int mv_pcib_read_ivar(device_t, device_t, int, uintptr_t *); static int mv_pcib_write_ivar(device_t, device_t, int, uintptr_t); static int mv_pcib_maxslots(device_t); static uint32_t mv_pcib_read_config(device_t, u_int, u_int, u_int, u_int, int); static void mv_pcib_write_config(device_t, u_int, u_int, u_int, u_int, uint32_t, int); static int mv_pcib_route_interrupt(device_t, device_t, int); static int mv_pcib_alloc_msi(device_t, device_t, int, int, int *); static int mv_pcib_map_msi(device_t, device_t, int, uint64_t *, uint32_t *); static int mv_pcib_release_msi(device_t, device_t, int, int *); /* * Bus interface definitions. */ static device_method_t mv_pcib_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mv_pcib_probe), DEVMETHOD(device_attach, mv_pcib_attach), /* Bus interface */ DEVMETHOD(bus_read_ivar, mv_pcib_read_ivar), DEVMETHOD(bus_write_ivar, mv_pcib_write_ivar), DEVMETHOD(bus_get_rman, mv_pcib_get_rman), DEVMETHOD(bus_alloc_resource, mv_pcib_alloc_resource), DEVMETHOD(bus_adjust_resource, mv_pcib_adjust_resource), DEVMETHOD(bus_release_resource, mv_pcib_release_resource), DEVMETHOD(bus_activate_resource, mv_pcib_activate_resource), DEVMETHOD(bus_deactivate_resource, mv_pcib_deactivate_resource), DEVMETHOD(bus_map_resource, mv_pcib_map_resource), DEVMETHOD(bus_unmap_resource, mv_pcib_unmap_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, mv_pcib_maxslots), DEVMETHOD(pcib_read_config, mv_pcib_read_config), DEVMETHOD(pcib_write_config, mv_pcib_write_config), DEVMETHOD(pcib_route_interrupt, mv_pcib_route_interrupt), DEVMETHOD(pcib_request_feature, pcib_request_feature_allow), DEVMETHOD(pcib_alloc_msi, mv_pcib_alloc_msi), DEVMETHOD(pcib_release_msi, mv_pcib_release_msi), DEVMETHOD(pcib_map_msi, mv_pcib_map_msi), /* OFW bus interface */ DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; static driver_t mv_pcib_driver = { "pcib", mv_pcib_methods, sizeof(struct mv_pcib_softc), }; DRIVER_MODULE(mv_pcib, ofwbus, mv_pcib_driver, 0, 0); DRIVER_MODULE(mv_pcib, pcib_ctrl, mv_pcib_driver, 0, 0); static struct mtx pcicfg_mtx; static int mv_pcib_probe(device_t self) { phandle_t node; node = ofw_bus_get_node(self); if (!mv_fdt_is_type(node, "pci")) return (ENXIO); if (!(ofw_bus_is_compatible(self, "mrvl,pcie") || ofw_bus_is_compatible(self, "mrvl,pci") || ofw_bus_node_is_compatible( OF_parent(node), "marvell,armada-370-pcie"))) return (ENXIO); if (!ofw_bus_status_okay(self)) return (ENXIO); device_set_desc(self, "Marvell Integrated PCI/PCI-E Controller"); return (BUS_PROBE_DEFAULT); } static int mv_pcib_attach(device_t self) { struct mv_pcib_softc *sc; phandle_t node, parnode; uint32_t val, reg0; int err, bus, devfn, port_id; sc = device_get_softc(self); sc->sc_dev = self; node = ofw_bus_get_node(self); parnode = OF_parent(node); if (OF_getencprop(node, "marvell,pcie-port", &(port_id), sizeof(port_id)) <= 0) { /* If port ID does not exist in the FDT set value to 0 */ if (!OF_hasprop(node, "marvell,pcie-port")) port_id = 0; else return(ENXIO); } sc->ap_segment = port_id; if (ofw_bus_node_is_compatible(node, "mrvl,pcie")) { sc->sc_type = MV_TYPE_PCIE; sc->sc_win_target = MV_WIN_PCIE_TARGET(port_id); sc->sc_mem_win_attr = MV_WIN_PCIE_MEM_ATTR(port_id); sc->sc_io_win_attr = MV_WIN_PCIE_IO_ATTR(port_id); sc->sc_skip_enable_procedure = 1; } else if (ofw_bus_node_is_compatible(parnode, "marvell,armada-370-pcie")) { sc->sc_type = MV_TYPE_PCIE; sc->sc_win_target = MV_WIN_PCIE_TARGET_ARMADA38X(port_id); sc->sc_mem_win_attr = MV_WIN_PCIE_MEM_ATTR_ARMADA38X(port_id); sc->sc_io_win_attr = MV_WIN_PCIE_IO_ATTR_ARMADA38X(port_id); sc->sc_enable_find_root_slot = 1; } else if (ofw_bus_node_is_compatible(node, "mrvl,pci")) { sc->sc_type = MV_TYPE_PCI; sc->sc_win_target = MV_WIN_PCI_TARGET; sc->sc_mem_win_attr = MV_WIN_PCI_MEM_ATTR; sc->sc_io_win_attr = MV_WIN_PCI_IO_ATTR; } else return (ENXIO); /* * Retrieve our mem-mapped registers range. */ sc->sc_rid = 0; sc->sc_res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &sc->sc_rid, RF_ACTIVE); if (sc->sc_res == NULL) { device_printf(self, "could not map memory\n"); return (ENXIO); } sc->sc_bst = rman_get_bustag(sc->sc_res); sc->sc_bsh = rman_get_bushandle(sc->sc_res); val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_CONTROL); sc->sc_mode = (val & PCIE_CONTROL_ROOT_CMPLX ? MV_MODE_ROOT : MV_MODE_ENDPOINT); /* * Get PCI interrupt info. */ if (sc->sc_mode == MV_MODE_ROOT) ofw_bus_setup_iinfo(node, &sc->sc_pci_iinfo, sizeof(pcell_t)); /* * Configure decode windows for PCI(E) access. */ if (mv_pcib_decode_win(node, sc) != 0) return (ENXIO); mv_pcib_hw_cfginit(); /* * Enable PCIE device. */ mv_pcib_enable(sc, port_id); /* * Memory management. */ err = mv_pcib_mem_init(sc); if (err) return (err); /* * Preliminary bus enumeration to find first linked devices and set * appropriate bus number from which should start the actual enumeration */ for (bus = 0; bus < PCI_BUSMAX; bus++) { for (devfn = 0; devfn < mv_pcib_maxslots(self); devfn++) { reg0 = mv_pcib_read_config(self, bus, devfn, devfn & 0x7, 0x0, 4); if (reg0 == (~0U)) continue; /* no device */ else { sc->sc_busnr = bus; /* update bus number */ break; } } } if (sc->sc_mode == MV_MODE_ROOT) { err = mv_pcib_init(sc, sc->sc_busnr, mv_pcib_maxslots(sc->sc_dev)); if (err) goto error; device_add_child(self, "pci", -1); } else { sc->sc_devnr = 1; bus_space_write_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS, 1 << PCIE_STATUS_DEV_OFFS); device_add_child(self, "pci_ep", -1); } mtx_init(&sc->sc_msi_mtx, "msi_mtx", NULL, MTX_DEF); return (bus_generic_attach(self)); error: /* XXX SYS_RES_ should be released here */ rman_fini(&sc->sc_mem_rman); rman_fini(&sc->sc_io_rman); return (err); } static void mv_pcib_enable(struct mv_pcib_softc *sc, uint32_t unit) { uint32_t val; int timeout; if (sc->sc_skip_enable_procedure) goto pcib_enable_root_mode; /* * Check if PCIE device is enabled. */ if ((sc->sc_skip_enable_procedure == 0) && (read_cpu_ctrl(CPU_CONTROL) & CPU_CONTROL_PCIE_DISABLE(unit))) { write_cpu_ctrl(CPU_CONTROL, read_cpu_ctrl(CPU_CONTROL) & ~(CPU_CONTROL_PCIE_DISABLE(unit))); timeout = PCIE_LINK_TIMEOUT; val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS); while (((val & PCIE_STATUS_LINK_DOWN) == 1) && (timeout > 0)) { DELAY(1000); timeout -= 1000; val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS); } } pcib_enable_root_mode: if (sc->sc_mode == MV_MODE_ROOT) { /* * Enable PCI bridge. */ val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIR_COMMAND); val |= PCIM_CMD_SERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_PORTEN; bus_space_write_4(sc->sc_bst, sc->sc_bsh, PCIR_COMMAND, val); } } static int mv_pcib_mem_init(struct mv_pcib_softc *sc) { int err; /* * Memory management. */ sc->sc_mem_rman.rm_type = RMAN_ARRAY; err = rman_init(&sc->sc_mem_rman); if (err) return (err); sc->sc_io_rman.rm_type = RMAN_ARRAY; err = rman_init(&sc->sc_io_rman); if (err) { rman_fini(&sc->sc_mem_rman); return (err); } err = rman_manage_region(&sc->sc_mem_rman, sc->sc_mem_base, sc->sc_mem_base + sc->sc_mem_size - 1); if (err) goto error; err = rman_manage_region(&sc->sc_io_rman, sc->sc_io_base, sc->sc_io_base + sc->sc_io_size - 1); if (err) goto error; return (0); error: rman_fini(&sc->sc_mem_rman); rman_fini(&sc->sc_io_rman); return (err); } static inline uint32_t pcib_bit_get(uint32_t *map, uint32_t bit) { uint32_t n = bit / BITS_PER_UINT32; bit = bit % BITS_PER_UINT32; return (map[n] & (1 << bit)); } static inline void pcib_bit_set(uint32_t *map, uint32_t bit) { uint32_t n = bit / BITS_PER_UINT32; bit = bit % BITS_PER_UINT32; map[n] |= (1 << bit); } static inline uint32_t pcib_map_check(uint32_t *map, uint32_t start, uint32_t bits) { uint32_t i; for (i = start; i < start + bits; i++) if (pcib_bit_get(map, i)) return (0); return (1); } static inline void pcib_map_set(uint32_t *map, uint32_t start, uint32_t bits) { uint32_t i; for (i = start; i < start + bits; i++) pcib_bit_set(map, i); } /* * The idea of this allocator is taken from ARM No-Cache memory * management code (sys/arm/arm/vm_machdep.c). */ static bus_addr_t pcib_alloc(struct mv_pcib_softc *sc, uint32_t smask) { uint32_t bits, bits_limit, i, *map, min_alloc, size; bus_addr_t addr = 0; bus_addr_t base; if (smask & 1) { base = sc->sc_io_base; min_alloc = PCI_MIN_IO_ALLOC; bits_limit = sc->sc_io_size / min_alloc; map = sc->sc_io_map; smask &= ~0x3; } else { base = sc->sc_mem_base; min_alloc = PCI_MIN_MEM_ALLOC; bits_limit = sc->sc_mem_size / min_alloc; map = sc->sc_mem_map; smask &= ~0xF; } size = ~smask + 1; bits = size / min_alloc; for (i = 0; i + bits <= bits_limit; i += bits) if (pcib_map_check(map, i, bits)) { pcib_map_set(map, i, bits); addr = base + (i * min_alloc); return (addr); } return (addr); } static int mv_pcib_init_bar(struct mv_pcib_softc *sc, int bus, int slot, int func, int barno) { uint32_t addr, bar; int reg, width; reg = PCIR_BAR(barno); /* * Need to init the BAR register with 0xffffffff before correct * value can be read. */ mv_pcib_write_config(sc->sc_dev, bus, slot, func, reg, ~0, 4); bar = mv_pcib_read_config(sc->sc_dev, bus, slot, func, reg, 4); if (bar == 0) return (1); /* Calculate BAR size: 64 or 32 bit (in 32-bit units) */ width = ((bar & 7) == 4) ? 2 : 1; addr = pcib_alloc(sc, bar); if (!addr) return (-1); if (bootverbose) printf("PCI %u:%u:%u: reg %x: smask=%08x: addr=%08x\n", bus, slot, func, reg, bar, addr); mv_pcib_write_config(sc->sc_dev, bus, slot, func, reg, addr, 4); if (width == 2) mv_pcib_write_config(sc->sc_dev, bus, slot, func, reg + 4, 0, 4); return (width); } static void mv_pcib_init_bridge(struct mv_pcib_softc *sc, int bus, int slot, int func) { bus_addr_t io_base, mem_base; uint32_t io_limit, mem_limit; int secbus; io_base = sc->sc_io_base; io_limit = io_base + sc->sc_io_size - 1; mem_base = sc->sc_mem_base; mem_limit = mem_base + sc->sc_mem_size - 1; /* Configure I/O decode registers */ mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOBASEL_1, io_base >> 8, 1); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOBASEH_1, io_base >> 16, 2); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOLIMITL_1, io_limit >> 8, 1); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOLIMITH_1, io_limit >> 16, 2); /* Configure memory decode registers */ mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_MEMBASE_1, mem_base >> 16, 2); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_MEMLIMIT_1, mem_limit >> 16, 2); /* Disable memory prefetch decode */ mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMBASEL_1, 0x10, 2); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMBASEH_1, 0x0, 4); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMLIMITL_1, 0xF, 2); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMLIMITH_1, 0x0, 4); secbus = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_SECBUS_1, 1); /* Configure buses behind the bridge */ mv_pcib_init(sc, secbus, PCI_SLOTMAX); } static int mv_pcib_init(struct mv_pcib_softc *sc, int bus, int maxslot) { int slot, func, maxfunc, error; uint8_t hdrtype, command, class, subclass; for (slot = 0; slot <= maxslot; slot++) { maxfunc = 0; for (func = 0; func <= maxfunc; func++) { hdrtype = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_HDRTYPE, 1); if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) continue; if (func == 0 && (hdrtype & PCIM_MFDEV)) maxfunc = PCI_FUNCMAX; command = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_COMMAND, 1); command &= ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_COMMAND, command, 1); error = mv_pcib_init_all_bars(sc, bus, slot, func, hdrtype); if (error) return (error); command |= PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_PORTEN; mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_COMMAND, command, 1); /* Handle PCI-PCI bridges */ class = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_CLASS, 1); subclass = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_SUBCLASS, 1); if (class != PCIC_BRIDGE || subclass != PCIS_BRIDGE_PCI) continue; mv_pcib_init_bridge(sc, bus, slot, func); } } /* Enable all ABCD interrupts */ pcib_write_irq_mask(sc, (0xF << 24)); return (0); } static int mv_pcib_init_all_bars(struct mv_pcib_softc *sc, int bus, int slot, int func, int hdrtype) { int maxbar, bar, i; maxbar = (hdrtype & PCIM_HDRTYPE) ? 0 : 6; bar = 0; /* Program the base address registers */ while (bar < maxbar) { i = mv_pcib_init_bar(sc, bus, slot, func, bar); bar += i; if (i < 0) { device_printf(sc->sc_dev, "PCI IO/Memory space exhausted\n"); return (ENOMEM); } } return (0); } static struct rman * mv_pcib_get_rman(device_t dev, int type, u_int flags) { struct mv_pcib_softc *sc = device_get_softc(dev); switch (type) { case SYS_RES_IOPORT: return (&sc->sc_io_rman); case SYS_RES_MEMORY: return (&sc->sc_mem_rman); default: return (NULL); } } static struct resource * mv_pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct mv_pcib_softc *sc = device_get_softc(dev); switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; #ifdef PCI_RES_BUS case PCI_RES_BUS: return (pci_domain_alloc_bus(sc->ap_segment, child, rid, start, end, count, flags)); #endif default: return (BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, type, rid, start, end, count, flags)); } if (RMAN_IS_DEFAULT_RANGE(start, end)) { start = sc->sc_mem_base; end = sc->sc_mem_base + sc->sc_mem_size - 1; count = sc->sc_mem_size; } if ((start < sc->sc_mem_base) || (start + count - 1 != end) || (end > sc->sc_mem_base + sc->sc_mem_size - 1)) return (NULL); return (bus_generic_rman_alloc_resource(dev, child, type, rid, start, end, count, flags)); } static int -mv_pcib_adjust_resource(device_t dev, device_t child, int type, +mv_pcib_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { #ifdef PCI_RES_BUS struct mv_pcib_softc *sc = device_get_softc(dev); #endif - switch (type) { + switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: - return (bus_generic_rman_adjust_resource(dev, child, type, r, - start, end)); + return (bus_generic_rman_adjust_resource(dev, child, r, start, + end)); #ifdef PCI_RES_BUS case PCI_RES_BUS: return (pci_domain_adjust_bus(sc->ap_segment, child, r, start, end)); #endif default: - return (bus_generic_adjust_resource(dev, child, type, r, - start, end)); + return (bus_generic_adjust_resource(dev, child, r, start, end)); } } static int mv_pcib_release_resource(device_t dev, device_t child, int type, int rid, struct resource *res) { #ifdef PCI_RES_BUS struct mv_pcib_softc *sc = device_get_softc(dev); #endif switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_release_resource(dev, child, type, rid, res)); #ifdef PCI_RES_BUS case PCI_RES_BUS: return (pci_domain_release_bus(sc->ap_segment, child, rid, res)); #endif default: return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child, type, rid, res)); } } static int mv_pcib_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { #ifdef PCI_RES_BUS struct mv_pcib_softc *sc = device_get_softc(dev); #endif switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_activate_resource(dev, child, type, rid, r)); #ifdef PCI_RES_BUS case PCI_RES_BUS: return (pci_domain_activate_bus(sc->ap_segment, child, rid, r)); #endif default: return (bus_generic_activate_resource(dev, child, type, rid, r)); } } static int mv_pcib_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { #ifdef PCI_RES_BUS struct mv_pcib_softc *sc = device_get_softc(dev); #endif switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_deactivate_resource(dev, child, type, rid, r)); #ifdef PCI_RES_BUS case PCI_RES_BUS: return (pci_domain_deactivate_bus(sc->ap_segment, child, rid, r)); #endif default: return (bus_generic_deactivate_resource(dev, child, type, rid, r)); } } static int mv_pcib_map_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); /* Mappings are only supported on I/O and memory resources. */ switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); map->r_bustag = fdtbus_bs_tag; map->r_bushandle = start; map->r_size = length; return (0); } static int mv_pcib_unmap_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map *map) { switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (0); default: return (EINVAL); } } static int mv_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct mv_pcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: *result = sc->sc_busnr; return (0); case PCIB_IVAR_DOMAIN: *result = device_get_unit(dev); return (0); } return (ENOENT); } static int mv_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct mv_pcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_busnr = value; return (0); } return (ENOENT); } static inline void pcib_write_irq_mask(struct mv_pcib_softc *sc, uint32_t mask) { if (sc->sc_type != MV_TYPE_PCIE) return; bus_space_write_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_IRQ_MASK, mask); } static void mv_pcib_hw_cfginit(void) { static int opened = 0; if (opened) return; mtx_init(&pcicfg_mtx, "pcicfg", NULL, MTX_SPIN); opened = 1; } static uint32_t mv_pcib_hw_cfgread(struct mv_pcib_softc *sc, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { uint32_t addr, data, ca, cd; ca = (sc->sc_type != MV_TYPE_PCI) ? PCIE_REG_CFG_ADDR : PCI_REG_CFG_ADDR; cd = (sc->sc_type != MV_TYPE_PCI) ? PCIE_REG_CFG_DATA : PCI_REG_CFG_DATA; addr = PCI_CFG_ENA | PCI_CFG_BUS(bus) | PCI_CFG_DEV(slot) | PCI_CFG_FUN(func) | PCI_CFG_PCIE_REG(reg); mtx_lock_spin(&pcicfg_mtx); bus_space_write_4(sc->sc_bst, sc->sc_bsh, ca, addr); data = ~0; switch (bytes) { case 1: data = bus_space_read_1(sc->sc_bst, sc->sc_bsh, cd + (reg & 3)); break; case 2: data = le16toh(bus_space_read_2(sc->sc_bst, sc->sc_bsh, cd + (reg & 2))); break; case 4: data = le32toh(bus_space_read_4(sc->sc_bst, sc->sc_bsh, cd)); break; } mtx_unlock_spin(&pcicfg_mtx); return (data); } static void mv_pcib_hw_cfgwrite(struct mv_pcib_softc *sc, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { uint32_t addr, ca, cd; ca = (sc->sc_type != MV_TYPE_PCI) ? PCIE_REG_CFG_ADDR : PCI_REG_CFG_ADDR; cd = (sc->sc_type != MV_TYPE_PCI) ? PCIE_REG_CFG_DATA : PCI_REG_CFG_DATA; addr = PCI_CFG_ENA | PCI_CFG_BUS(bus) | PCI_CFG_DEV(slot) | PCI_CFG_FUN(func) | PCI_CFG_PCIE_REG(reg); mtx_lock_spin(&pcicfg_mtx); bus_space_write_4(sc->sc_bst, sc->sc_bsh, ca, addr); switch (bytes) { case 1: bus_space_write_1(sc->sc_bst, sc->sc_bsh, cd + (reg & 3), data); break; case 2: bus_space_write_2(sc->sc_bst, sc->sc_bsh, cd + (reg & 2), htole16(data)); break; case 4: bus_space_write_4(sc->sc_bst, sc->sc_bsh, cd, htole32(data)); break; } mtx_unlock_spin(&pcicfg_mtx); } static int mv_pcib_maxslots(device_t dev) { struct mv_pcib_softc *sc = device_get_softc(dev); return ((sc->sc_type != MV_TYPE_PCI) ? 1 : PCI_SLOTMAX); } static int mv_pcib_root_slot(device_t dev, u_int bus, u_int slot, u_int func) { struct mv_pcib_softc *sc = device_get_softc(dev); uint32_t vendor, device; /* On platforms other than Armada38x, root link is always at slot 0 */ if (!sc->sc_enable_find_root_slot) return (slot == 0); vendor = mv_pcib_hw_cfgread(sc, bus, slot, func, PCIR_VENDOR, PCIR_VENDOR_LENGTH); device = mv_pcib_hw_cfgread(sc, bus, slot, func, PCIR_DEVICE, PCIR_DEVICE_LENGTH) & MV_DEV_FAMILY_MASK; return (vendor == PCI_VENDORID_MRVL && device == MV_DEV_ARMADA38X); } static uint32_t mv_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct mv_pcib_softc *sc = device_get_softc(dev); /* Return ~0 if link is inactive or trying to read from Root */ if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS) & PCIE_STATUS_LINK_DOWN) || mv_pcib_root_slot(dev, bus, slot, func)) return (~0U); return (mv_pcib_hw_cfgread(sc, bus, slot, func, reg, bytes)); } static void mv_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t val, int bytes) { struct mv_pcib_softc *sc = device_get_softc(dev); /* Return if link is inactive or trying to write to Root */ if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS) & PCIE_STATUS_LINK_DOWN) || mv_pcib_root_slot(dev, bus, slot, func)) return; mv_pcib_hw_cfgwrite(sc, bus, slot, func, reg, val, bytes); } static int mv_pcib_route_interrupt(device_t bus, device_t dev, int pin) { struct mv_pcib_softc *sc; struct ofw_pci_register reg; uint32_t pintr, mintr[4]; int icells; phandle_t iparent; sc = device_get_softc(bus); pintr = pin; /* Fabricate imap information in case this isn't an OFW device */ bzero(®, sizeof(reg)); reg.phys_hi = (pci_get_bus(dev) << OFW_PCI_PHYS_HI_BUSSHIFT) | (pci_get_slot(dev) << OFW_PCI_PHYS_HI_DEVICESHIFT) | (pci_get_function(dev) << OFW_PCI_PHYS_HI_FUNCTIONSHIFT); icells = ofw_bus_lookup_imap(ofw_bus_get_node(dev), &sc->sc_pci_iinfo, ®, sizeof(reg), &pintr, sizeof(pintr), mintr, sizeof(mintr), &iparent); if (icells > 0) return (ofw_bus_map_intr(dev, iparent, icells, mintr)); /* Maybe it's a real interrupt, not an intpin */ if (pin > 4) return (pin); device_printf(bus, "could not route pin %d for device %d.%d\n", pin, pci_get_slot(dev), pci_get_function(dev)); return (PCI_INVALID_IRQ); } static int mv_pcib_decode_win(phandle_t node, struct mv_pcib_softc *sc) { struct mv_pci_range io_space, mem_space; device_t dev; int error; dev = sc->sc_dev; if ((error = mv_pci_ranges(node, &io_space, &mem_space)) != 0) { device_printf(dev, "could not retrieve 'ranges' data\n"); return (error); } /* Configure CPU decoding windows */ error = decode_win_cpu_set(sc->sc_win_target, sc->sc_io_win_attr, io_space.base_parent, io_space.len, ~0); if (error < 0) { device_printf(dev, "could not set up CPU decode " "window for PCI IO\n"); return (ENXIO); } error = decode_win_cpu_set(sc->sc_win_target, sc->sc_mem_win_attr, mem_space.base_parent, mem_space.len, mem_space.base_parent); if (error < 0) { device_printf(dev, "could not set up CPU decode " "windows for PCI MEM\n"); return (ENXIO); } sc->sc_io_base = io_space.base_parent; sc->sc_io_size = io_space.len; sc->sc_mem_base = mem_space.base_parent; sc->sc_mem_size = mem_space.len; return (0); } static int mv_pcib_map_msi(device_t dev, device_t child, int irq, uint64_t *addr, uint32_t *data) { struct mv_pcib_softc *sc; sc = device_get_softc(dev); if (!sc->sc_msi_supported) return (ENOTSUP); irq = irq - MSI_IRQ; /* validate parameters */ if (isclr(&sc->sc_msi_bitmap, irq)) { device_printf(dev, "invalid MSI 0x%x\n", irq); return (EINVAL); } mv_msi_data(irq, addr, data); debugf("%s: irq: %d addr: %jx data: %x\n", __func__, irq, *addr, *data); return (0); } static int mv_pcib_alloc_msi(device_t dev, device_t child, int count, int maxcount __unused, int *irqs) { struct mv_pcib_softc *sc; u_int start = 0, i; sc = device_get_softc(dev); if (!sc->sc_msi_supported) return (ENOTSUP); if (powerof2(count) == 0 || count > MSI_IRQ_NUM) return (EINVAL); mtx_lock(&sc->sc_msi_mtx); for (start = 0; (start + count) < MSI_IRQ_NUM; start++) { for (i = start; i < start + count; i++) { if (isset(&sc->sc_msi_bitmap, i)) break; } if (i == start + count) break; } if ((start + count) == MSI_IRQ_NUM) { mtx_unlock(&sc->sc_msi_mtx); return (ENXIO); } for (i = start; i < start + count; i++) { setbit(&sc->sc_msi_bitmap, i); *irqs++ = MSI_IRQ + i; } debugf("%s: start: %x count: %x\n", __func__, start, count); mtx_unlock(&sc->sc_msi_mtx); return (0); } static int mv_pcib_release_msi(device_t dev, device_t child, int count, int *irqs) { struct mv_pcib_softc *sc; u_int i; sc = device_get_softc(dev); if(!sc->sc_msi_supported) return (ENOTSUP); mtx_lock(&sc->sc_msi_mtx); for (i = 0; i < count; i++) clrbit(&sc->sc_msi_bitmap, irqs[i] - MSI_IRQ); mtx_unlock(&sc->sc_msi_mtx); return (0); } diff --git a/sys/arm64/cavium/thunder_pcie_pem.c b/sys/arm64/cavium/thunder_pcie_pem.c index 5060b6e79d97..78fcf333d825 100644 --- a/sys/arm64/cavium/thunder_pcie_pem.c +++ b/sys/arm64/cavium/thunder_pcie_pem.c @@ -1,972 +1,972 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* PCIe external MAC root complex driver (PEM) for Cavium Thunder SOC */ #include #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #include #include #endif #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #define THUNDER_PEM_DEVICE_ID 0xa020 #define THUNDER_PEM_VENDOR_ID 0x177d /* ThunderX specific defines */ #define THUNDER_PEMn_REG_BASE(unit) (0x87e0c0000000UL | ((unit) << 24)) #define PCIERC_CFG002 0x08 #define PCIERC_CFG006 0x18 #define PCIERC_CFG032 0x80 #define PCIERC_CFG006_SEC_BUS(reg) (((reg) >> 8) & 0xFF) #define PEM_CFG_RD_REG_ALIGN(reg) ((reg) & ~0x3) #define PEM_CFG_RD_REG_DATA(val) (((val) >> 32) & 0xFFFFFFFF) #define PEM_CFG_RD 0x30 #define PEM_CFG_LINK_MASK 0x3 #define PEM_CFG_LINK_RDY 0x3 #define PEM_CFG_SLIX_TO_REG(slix) ((slix) << 4) #define SBNUM_OFFSET 0x8 #define SBNUM_MASK 0xFF #define PEM_ON_REG 0x420 #define PEM_CTL_STATUS 0x0 #define PEM_LINK_ENABLE (1 << 4) #define PEM_LINK_DLLA (1 << 29) #define PEM_LINK_LT (1 << 27) #define PEM_BUS_SHIFT (24) #define PEM_SLOT_SHIFT (19) #define PEM_FUNC_SHIFT (16) #define SLIX_S2M_REGX_ACC 0x874001000000UL #define SLIX_S2M_REGX_ACC_SIZE 0x1000 #define SLIX_S2M_REGX_ACC_SPACING 0x001000000000UL #define SLI_BASE 0x880000000000UL #define SLI_WINDOW_SPACING 0x004000000000UL #define SLI_PCI_OFFSET 0x001000000000UL #define SLI_NODE_SHIFT (44) #define SLI_NODE_MASK (3) #define SLI_GROUP_SHIFT (40) #define SLI_ID_SHIFT (24) #define SLI_ID_MASK (7) #define SLI_PEMS_PER_GROUP (3) #define SLI_GROUPS_PER_NODE (2) #define SLI_PEMS_PER_NODE (SLI_PEMS_PER_GROUP * SLI_GROUPS_PER_NODE) #define SLI_ACC_REG_CNT (256) /* * Each PEM device creates its own bus with * own address translation, so we can adjust bus addresses * as we want. To support 32-bit cards let's assume * PCI window assignment looks as following: * * 0x00000000 - 0x000FFFFF IO * 0x00100000 - 0xFFFFFFFF Memory */ #define PCI_IO_BASE 0x00000000UL #define PCI_IO_SIZE 0x00100000UL #define PCI_MEMORY_BASE PCI_IO_SIZE #define PCI_MEMORY_SIZE 0xFFF00000UL #define RID_PEM_SPACE 1 static int thunder_pem_activate_resource(device_t, device_t, int, int, struct resource *); -static int thunder_pem_adjust_resource(device_t, device_t, int, +static int thunder_pem_adjust_resource(device_t, device_t, struct resource *, rman_res_t, rman_res_t); static struct resource * thunder_pem_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int thunder_pem_alloc_msi(device_t, device_t, int, int, int *); static int thunder_pem_release_msi(device_t, device_t, int, int *); static int thunder_pem_alloc_msix(device_t, device_t, int *); static int thunder_pem_release_msix(device_t, device_t, int); static int thunder_pem_map_msi(device_t, device_t, int, uint64_t *, uint32_t *); static int thunder_pem_get_id(device_t, device_t, enum pci_id_type, uintptr_t *); static int thunder_pem_attach(device_t); static int thunder_pem_deactivate_resource(device_t, device_t, int, int, struct resource *); static int thunder_pem_map_resource(device_t, device_t, int, struct resource *, struct resource_map_request *, struct resource_map *); static int thunder_pem_unmap_resource(device_t, device_t, int, struct resource *, struct resource_map *); static bus_dma_tag_t thunder_pem_get_dma_tag(device_t, device_t); static int thunder_pem_detach(device_t); static uint64_t thunder_pem_config_reg_read(struct thunder_pem_softc *, int); static int thunder_pem_link_init(struct thunder_pem_softc *); static int thunder_pem_maxslots(device_t); static int thunder_pem_probe(device_t); static uint32_t thunder_pem_read_config(device_t, u_int, u_int, u_int, u_int, int); static int thunder_pem_read_ivar(device_t, device_t, int, uintptr_t *); static void thunder_pem_release_all(device_t); static int thunder_pem_release_resource(device_t, device_t, int, int, struct resource *); static struct rman * thunder_pem_get_rman(device_t, int, u_int); static void thunder_pem_slix_s2m_regx_acc_modify(struct thunder_pem_softc *, int, int); static void thunder_pem_write_config(device_t, u_int, u_int, u_int, u_int, uint32_t, int); static int thunder_pem_write_ivar(device_t, device_t, int, uintptr_t); /* Global handlers for SLI interface */ static bus_space_handle_t sli0_s2m_regx_base = 0; static bus_space_handle_t sli1_s2m_regx_base = 0; static device_method_t thunder_pem_methods[] = { /* Device interface */ DEVMETHOD(device_probe, thunder_pem_probe), DEVMETHOD(device_attach, thunder_pem_attach), DEVMETHOD(device_detach, thunder_pem_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, thunder_pem_read_ivar), DEVMETHOD(bus_write_ivar, thunder_pem_write_ivar), DEVMETHOD(bus_get_rman, thunder_pem_get_rman), DEVMETHOD(bus_alloc_resource, thunder_pem_alloc_resource), DEVMETHOD(bus_release_resource, thunder_pem_release_resource), DEVMETHOD(bus_adjust_resource, thunder_pem_adjust_resource), DEVMETHOD(bus_activate_resource, thunder_pem_activate_resource), DEVMETHOD(bus_deactivate_resource, thunder_pem_deactivate_resource), DEVMETHOD(bus_map_resource, thunder_pem_map_resource), DEVMETHOD(bus_unmap_resource, thunder_pem_unmap_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_dma_tag, thunder_pem_get_dma_tag), /* pcib interface */ DEVMETHOD(pcib_maxslots, thunder_pem_maxslots), DEVMETHOD(pcib_read_config, thunder_pem_read_config), DEVMETHOD(pcib_write_config, thunder_pem_write_config), DEVMETHOD(pcib_alloc_msix, thunder_pem_alloc_msix), DEVMETHOD(pcib_release_msix, thunder_pem_release_msix), DEVMETHOD(pcib_alloc_msi, thunder_pem_alloc_msi), DEVMETHOD(pcib_release_msi, thunder_pem_release_msi), DEVMETHOD(pcib_map_msi, thunder_pem_map_msi), DEVMETHOD(pcib_get_id, thunder_pem_get_id), DEVMETHOD_END }; DEFINE_CLASS_0(pcib, thunder_pem_driver, thunder_pem_methods, sizeof(struct thunder_pem_softc)); extern struct bus_space memmap_bus; DRIVER_MODULE(thunder_pem, pci, thunder_pem_driver, 0, 0); MODULE_DEPEND(thunder_pem, pci, 1, 1, 1); static int thunder_pem_maxslots(device_t dev) { #if 0 /* max slots per bus acc. to standard */ return (PCI_SLOTMAX); #else /* * ARM64TODO Workaround - otherwise an em(4) interface appears to be * present on every PCI function on the bus to which it is connected */ return (0); #endif } static int thunder_pem_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct thunder_pem_softc *sc; int secondary_bus = 0; sc = device_get_softc(dev); if (index == PCIB_IVAR_BUS) { secondary_bus = thunder_pem_config_reg_read(sc, PCIERC_CFG006); *result = PCIERC_CFG006_SEC_BUS(secondary_bus); return (0); } if (index == PCIB_IVAR_DOMAIN) { *result = sc->id; return (0); } return (ENOENT); } static int thunder_pem_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { return (ENOENT); } static int thunder_pem_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct thunder_pem_softc *sc; sc = device_get_softc(dev); #endif switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_activate_bus(sc->id, child, rid, r)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_activate_resource(dev, child, type, rid, r)); default: return (bus_generic_activate_resource(dev, child, type, rid, r)); } } static int thunder_pem_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct thunder_pem_softc *sc; sc = device_get_softc(dev); #endif switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_deactivate_bus(sc->id, child, rid, r)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_deactivate_resource(dev, child, type, rid, r)); default: return (bus_generic_deactivate_resource(dev, child, type, rid, r)); } } static int thunder_pem_map_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; struct thunder_pem_softc *sc; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); sc = device_get_softc(dev); start = range_addr_pci_to_phys(sc->ranges, start); error = bus_space_map(&memmap_bus, start, length, 0, &map->r_bushandle); if (error) return (error); map->r_bustag = &memmap_bus; map->r_vaddr = (void *)map->r_bushandle; map->r_size = length; return (0); } static int thunder_pem_unmap_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map *map) { switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: bus_space_unmap(map->r_bustag, map->r_bushandle, map->r_size); return (0); default: return (EINVAL); } } static int -thunder_pem_adjust_resource(device_t dev, device_t child, int type, - struct resource *res, rman_res_t start, rman_res_t end) +thunder_pem_adjust_resource(device_t dev, device_t child, struct resource *res, + rman_res_t start, rman_res_t end) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct thunder_pem_softc *sc; sc = device_get_softc(dev); #endif - switch (type) { + switch (rman_get_type(res)) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_adjust_bus(sc->id, child, res, start, end)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: - return (bus_generic_rman_adjust_resource(dev, child, type, res, - start, end)); + return (bus_generic_rman_adjust_resource(dev, child, res, start, + end)); default: - return (bus_generic_adjust_resource(dev, child, type, res, - start, end)); + return (bus_generic_adjust_resource(dev, child, res, start, + end)); } } static bus_dma_tag_t thunder_pem_get_dma_tag(device_t dev, device_t child) { struct thunder_pem_softc *sc; sc = device_get_softc(dev); return (sc->dmat); } static int thunder_pem_alloc_msi(device_t pci, device_t child, int count, int maxcount, int *irqs) { device_t bus; bus = device_get_parent(pci); return (PCIB_ALLOC_MSI(device_get_parent(bus), child, count, maxcount, irqs)); } static int thunder_pem_release_msi(device_t pci, device_t child, int count, int *irqs) { device_t bus; bus = device_get_parent(pci); return (PCIB_RELEASE_MSI(device_get_parent(bus), child, count, irqs)); } static int thunder_pem_alloc_msix(device_t pci, device_t child, int *irq) { device_t bus; bus = device_get_parent(pci); return (PCIB_ALLOC_MSIX(device_get_parent(bus), child, irq)); } static int thunder_pem_release_msix(device_t pci, device_t child, int irq) { device_t bus; bus = device_get_parent(pci); return (PCIB_RELEASE_MSIX(device_get_parent(bus), child, irq)); } static int thunder_pem_map_msi(device_t pci, device_t child, int irq, uint64_t *addr, uint32_t *data) { device_t bus; bus = device_get_parent(pci); return (PCIB_MAP_MSI(device_get_parent(bus), child, irq, addr, data)); } static int thunder_pem_get_id(device_t pci, device_t child, enum pci_id_type type, uintptr_t *id) { int bsf; int pem; if (type != PCI_ID_MSI) return (pcib_get_id(pci, child, type, id)); bsf = pci_get_rid(child); /* PEM (PCIe MAC/root complex) number is equal to domain */ pem = pci_get_domain(child); /* * Set appropriate device ID (passed by the HW along with * the transaction to memory) for different root complex * numbers using hard-coded domain portion for each group. */ if (pem < 3) *id = (0x1 << PCI_RID_DOMAIN_SHIFT) | bsf; else if (pem < 6) *id = (0x3 << PCI_RID_DOMAIN_SHIFT) | bsf; else if (pem < 9) *id = (0x9 << PCI_RID_DOMAIN_SHIFT) | bsf; else if (pem < 12) *id = (0xB << PCI_RID_DOMAIN_SHIFT) | bsf; else return (ENXIO); return (0); } static int thunder_pem_identify(device_t dev) { struct thunder_pem_softc *sc; rman_res_t start; sc = device_get_softc(dev); start = rman_get_start(sc->reg); /* Calculate PEM designations from its address */ sc->node = (start >> SLI_NODE_SHIFT) & SLI_NODE_MASK; sc->id = ((start >> SLI_ID_SHIFT) & SLI_ID_MASK) + (SLI_PEMS_PER_NODE * sc->node); sc->sli = sc->id % SLI_PEMS_PER_GROUP; sc->sli_group = (sc->id / SLI_PEMS_PER_GROUP) % SLI_GROUPS_PER_NODE; sc->sli_window_base = SLI_BASE | (((uint64_t)sc->node) << SLI_NODE_SHIFT) | ((uint64_t)sc->sli_group << SLI_GROUP_SHIFT); sc->sli_window_base += SLI_WINDOW_SPACING * sc->sli; return (0); } static void thunder_pem_slix_s2m_regx_acc_modify(struct thunder_pem_softc *sc, int sli_group, int slix) { uint64_t regval; bus_space_handle_t handle = 0; KASSERT(slix >= 0 && slix <= SLI_ACC_REG_CNT, ("Invalid SLI index")); if (sli_group == 0) handle = sli0_s2m_regx_base; else if (sli_group == 1) handle = sli1_s2m_regx_base; else device_printf(sc->dev, "SLI group is not correct\n"); if (handle) { /* Clear lower 32-bits of the SLIx register */ regval = bus_space_read_8(sc->reg_bst, handle, PEM_CFG_SLIX_TO_REG(slix)); regval &= ~(0xFFFFFFFFUL); bus_space_write_8(sc->reg_bst, handle, PEM_CFG_SLIX_TO_REG(slix), regval); } } static int thunder_pem_link_init(struct thunder_pem_softc *sc) { uint64_t regval; /* check whether PEM is safe to access. */ regval = bus_space_read_8(sc->reg_bst, sc->reg_bsh, PEM_ON_REG); if ((regval & PEM_CFG_LINK_MASK) != PEM_CFG_LINK_RDY) { device_printf(sc->dev, "PEM%d is not ON\n", sc->id); return (ENXIO); } regval = bus_space_read_8(sc->reg_bst, sc->reg_bsh, PEM_CTL_STATUS); regval |= PEM_LINK_ENABLE; bus_space_write_8(sc->reg_bst, sc->reg_bsh, PEM_CTL_STATUS, regval); /* Wait 1ms as per Cavium specification */ DELAY(1000); regval = thunder_pem_config_reg_read(sc, PCIERC_CFG032); if (((regval & PEM_LINK_DLLA) == 0) || ((regval & PEM_LINK_LT) != 0)) { device_printf(sc->dev, "PCIe RC: Port %d Link Timeout\n", sc->id); return (ENXIO); } return (0); } static int thunder_pem_init(struct thunder_pem_softc *sc) { int i, retval = 0; retval = thunder_pem_link_init(sc); if (retval) { device_printf(sc->dev, "%s failed\n", __func__); return retval; } /* To support 32-bit PCIe devices, set S2M_REGx_ACC[BA]=0x0 */ for (i = 0; i < SLI_ACC_REG_CNT; i++) { thunder_pem_slix_s2m_regx_acc_modify(sc, sc->sli_group, i); } return (retval); } static uint64_t thunder_pem_config_reg_read(struct thunder_pem_softc *sc, int reg) { uint64_t data; /* Write to ADDR register */ bus_space_write_8(sc->reg_bst, sc->reg_bsh, PEM_CFG_RD, PEM_CFG_RD_REG_ALIGN(reg)); bus_space_barrier(sc->reg_bst, sc->reg_bsh, PEM_CFG_RD, 8, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* Read from DATA register */ data = PEM_CFG_RD_REG_DATA(bus_space_read_8(sc->reg_bst, sc->reg_bsh, PEM_CFG_RD)); return (data); } static uint32_t thunder_pem_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { uint64_t offset; uint32_t data; struct thunder_pem_softc *sc; bus_space_tag_t t; bus_space_handle_t h; if ((bus > PCI_BUSMAX) || (slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX)) return (~0U); sc = device_get_softc(dev); /* Calculate offset */ offset = (bus << PEM_BUS_SHIFT) | (slot << PEM_SLOT_SHIFT) | (func << PEM_FUNC_SHIFT); t = sc->reg_bst; h = sc->pem_sli_base; bus_space_map(sc->reg_bst, sc->sli_window_base + offset, PCIE_REGMAX, 0, &h); switch (bytes) { case 1: data = bus_space_read_1(t, h, reg); break; case 2: data = le16toh(bus_space_read_2(t, h, reg)); break; case 4: data = le32toh(bus_space_read_4(t, h, reg)); break; default: data = ~0U; break; } bus_space_unmap(sc->reg_bst, h, PCIE_REGMAX); return (data); } static void thunder_pem_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t val, int bytes) { uint64_t offset; struct thunder_pem_softc *sc; bus_space_tag_t t; bus_space_handle_t h; if ((bus > PCI_BUSMAX) || (slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX)) return; sc = device_get_softc(dev); /* Calculate offset */ offset = (bus << PEM_BUS_SHIFT) | (slot << PEM_SLOT_SHIFT) | (func << PEM_FUNC_SHIFT); t = sc->reg_bst; h = sc->pem_sli_base; bus_space_map(sc->reg_bst, sc->sli_window_base + offset, PCIE_REGMAX, 0, &h); switch (bytes) { case 1: bus_space_write_1(t, h, reg, val); break; case 2: bus_space_write_2(t, h, reg, htole16(val)); break; case 4: bus_space_write_4(t, h, reg, htole32(val)); break; default: break; } bus_space_unmap(sc->reg_bst, h, PCIE_REGMAX); } static struct resource * thunder_pem_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct thunder_pem_softc *sc = device_get_softc(dev); struct resource *res; device_t parent_dev; switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_alloc_bus(sc->id, child, rid, start, end, count, flags)); #endif case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: /* Find parent device. On ThunderX we know an exact path. */ parent_dev = device_get_parent(device_get_parent(dev)); return (BUS_ALLOC_RESOURCE(parent_dev, dev, type, rid, start, end, count, flags)); } if (!RMAN_IS_DEFAULT_RANGE(start, end)) { /* * We might get PHYS addresses here inherited from EFI. * Convert to PCI if necessary. */ if (range_addr_is_phys(sc->ranges, start, count)) { start = range_addr_phys_to_pci(sc->ranges, start); end = start + count - 1; } } if (bootverbose) { device_printf(dev, "thunder_pem_alloc_resource: start=%#lx, end=%#lx, count=%#lx\n", start, end, count); } res = bus_generic_rman_alloc_resource(dev, child, type, rid, start, end, count, flags); if (res == NULL && bootverbose) { device_printf(dev, "%s FAIL: type=%d, rid=%d, " "start=%016lx, end=%016lx, count=%016lx, flags=%x\n", __func__, type, *rid, start, end, count, flags); } return (res); } static int thunder_pem_release_resource(device_t dev, device_t child, int type, int rid, struct resource *res) { device_t parent_dev; #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct thunder_pem_softc *sc = device_get_softc(dev); #endif switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_release_bus(sc->id, child, rid, res)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_release_resource(dev, child, type, rid, res)); default: /* Find parent device. On ThunderX we know an exact path. */ parent_dev = device_get_parent(device_get_parent(dev)); return (BUS_RELEASE_RESOURCE(parent_dev, child, type, rid, res)); } } static struct rman * thunder_pem_get_rman(device_t bus, int type, u_int flags) { struct thunder_pem_softc *sc; sc = device_get_softc(bus); switch (type) { case SYS_RES_IOPORT: return (&sc->io_rman); case SYS_RES_MEMORY: return (&sc->mem_rman); default: break; } return (NULL); } static int thunder_pem_probe(device_t dev) { uint16_t pci_vendor_id; uint16_t pci_device_id; pci_vendor_id = pci_get_vendor(dev); pci_device_id = pci_get_device(dev); if ((pci_vendor_id == THUNDER_PEM_VENDOR_ID) && (pci_device_id == THUNDER_PEM_DEVICE_ID)) { device_set_desc_copy(dev, THUNDER_PEM_DESC); return (0); } return (ENXIO); } static int thunder_pem_attach(device_t dev) { struct resource_map_request req; struct resource_map map; devclass_t pci_class; device_t parent; struct thunder_pem_softc *sc; int error; int rid; int tuple; uint64_t base, size; struct rman *rman; sc = device_get_softc(dev); sc->dev = dev; /* Allocate memory for resource */ pci_class = devclass_find("pci"); parent = device_get_parent(dev); if (device_get_devclass(parent) == pci_class) rid = PCIR_BAR(0); else rid = RID_PEM_SPACE; sc->reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE | RF_UNMAPPED); if (sc->reg == NULL) { device_printf(dev, "Failed to allocate resource\n"); return (ENXIO); } resource_init_map_request(&req); req.memattr = VM_MEMATTR_DEVICE_NP; error = bus_map_resource(dev, SYS_RES_MEMORY, sc->reg, &req, &map); if (error != 0) { device_printf(dev, "could not map memory.\n"); return (error); } rman_set_mapping(sc->reg, &map); sc->reg_bst = rman_get_bustag(sc->reg); sc->reg_bsh = rman_get_bushandle(sc->reg); /* Create the parent DMA tag to pass down the coherent flag */ error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE, /* maxsegsize */ BUS_DMA_COHERENT, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->dmat); if (error != 0) return (error); /* Map SLI, do it only once */ if (!sli0_s2m_regx_base) { bus_space_map(sc->reg_bst, SLIX_S2M_REGX_ACC, SLIX_S2M_REGX_ACC_SIZE, 0, &sli0_s2m_regx_base); } if (!sli1_s2m_regx_base) { bus_space_map(sc->reg_bst, SLIX_S2M_REGX_ACC + SLIX_S2M_REGX_ACC_SPACING, SLIX_S2M_REGX_ACC_SIZE, 0, &sli1_s2m_regx_base); } if ((sli0_s2m_regx_base == 0) || (sli1_s2m_regx_base == 0)) { device_printf(dev, "bus_space_map failed to map slix_s2m_regx_base\n"); goto fail; } /* Identify PEM */ if (thunder_pem_identify(dev) != 0) goto fail; /* Initialize rman and allocate regions */ sc->mem_rman.rm_type = RMAN_ARRAY; sc->mem_rman.rm_descr = "PEM PCIe Memory"; error = rman_init(&sc->mem_rman); if (error != 0) { device_printf(dev, "memory rman_init() failed. error = %d\n", error); goto fail; } sc->io_rman.rm_type = RMAN_ARRAY; sc->io_rman.rm_descr = "PEM PCIe IO"; error = rman_init(&sc->io_rman); if (error != 0) { device_printf(dev, "IO rman_init() failed. error = %d\n", error); goto fail_mem; } /* * We ignore the values that may have been provided in FDT * and configure ranges according to the below formula * for all types of devices. This is because some DTBs provided * by EFI do not have proper ranges property or don't have them * at all. */ /* Fill memory window */ sc->ranges[0].pci_base = PCI_MEMORY_BASE; sc->ranges[0].size = PCI_MEMORY_SIZE; sc->ranges[0].phys_base = sc->sli_window_base + SLI_PCI_OFFSET + sc->ranges[0].pci_base; sc->ranges[0].flags = SYS_RES_MEMORY; /* Fill IO window */ sc->ranges[1].pci_base = PCI_IO_BASE; sc->ranges[1].size = PCI_IO_SIZE; sc->ranges[1].phys_base = sc->sli_window_base + SLI_PCI_OFFSET + sc->ranges[1].pci_base; sc->ranges[1].flags = SYS_RES_IOPORT; for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { base = sc->ranges[tuple].pci_base; size = sc->ranges[tuple].size; if (size == 0) continue; /* empty range element */ rman = thunder_pem_get_rman(dev, sc->ranges[tuple].flags, 0); if (rman != NULL) error = rman_manage_region(rman, base, base + size - 1); else error = EINVAL; if (error) { device_printf(dev, "rman_manage_region() failed. error = %d\n", error); rman_fini(&sc->mem_rman); return (error); } if (bootverbose) { device_printf(dev, "\tPCI addr: 0x%jx, CPU addr: 0x%jx, Size: 0x%jx, Flags:0x%jx\n", sc->ranges[tuple].pci_base, sc->ranges[tuple].phys_base, sc->ranges[tuple].size, sc->ranges[tuple].flags); } } if (thunder_pem_init(sc)) { device_printf(dev, "Failure during PEM init\n"); goto fail_io; } device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); fail_io: rman_fini(&sc->io_rman); fail_mem: rman_fini(&sc->mem_rman); fail: bus_free_resource(dev, SYS_RES_MEMORY, sc->reg); return (ENXIO); } static void thunder_pem_release_all(device_t dev) { struct thunder_pem_softc *sc; sc = device_get_softc(dev); rman_fini(&sc->io_rman); rman_fini(&sc->mem_rman); if (sc->reg != NULL) bus_free_resource(dev, SYS_RES_MEMORY, sc->reg); } static int thunder_pem_detach(device_t dev) { thunder_pem_release_all(dev); return (0); } diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c index 0ea9a32b1589..93bce7df70d5 100644 --- a/sys/dev/acpica/acpi.c +++ b/sys/dev/acpica/acpi.c @@ -1,4655 +1,4655 @@ /*- * Copyright (c) 2000 Takanori Watanabe * Copyright (c) 2000 Mitsuru IWASAKI * Copyright (c) 2000, 2001 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_acpi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_ACPIDEV, "acpidev", "ACPI devices"); /* Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_BUS ACPI_MODULE_NAME("ACPI") static d_open_t acpiopen; static d_close_t acpiclose; static d_ioctl_t acpiioctl; static struct cdevsw acpi_cdevsw = { .d_version = D_VERSION, .d_open = acpiopen, .d_close = acpiclose, .d_ioctl = acpiioctl, .d_name = "acpi", }; struct acpi_interface { ACPI_STRING *data; int num; }; static char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL }; /* Global mutex for locking access to the ACPI subsystem. */ struct mtx acpi_mutex; struct callout acpi_sleep_timer; /* Bitmap of device quirks. */ int acpi_quirks; /* Supported sleep states. */ static BOOLEAN acpi_sleep_states[ACPI_S_STATE_COUNT]; static void acpi_lookup(void *arg, const char *name, device_t *dev); static int acpi_modevent(struct module *mod, int event, void *junk); static device_probe_t acpi_probe; static device_attach_t acpi_attach; static device_suspend_t acpi_suspend; static device_resume_t acpi_resume; static device_shutdown_t acpi_shutdown; static bus_add_child_t acpi_add_child; static bus_print_child_t acpi_print_child; static bus_probe_nomatch_t acpi_probe_nomatch; static bus_driver_added_t acpi_driver_added; static bus_child_deleted_t acpi_child_deleted; static bus_read_ivar_t acpi_read_ivar; static bus_write_ivar_t acpi_write_ivar; static bus_get_resource_list_t acpi_get_rlist; static bus_get_rman_t acpi_get_rman; static bus_set_resource_t acpi_set_resource; static bus_alloc_resource_t acpi_alloc_resource; static bus_adjust_resource_t acpi_adjust_resource; static bus_release_resource_t acpi_release_resource; static bus_delete_resource_t acpi_delete_resource; static bus_activate_resource_t acpi_activate_resource; static bus_deactivate_resource_t acpi_deactivate_resource; static bus_map_resource_t acpi_map_resource; static bus_unmap_resource_t acpi_unmap_resource; static bus_child_pnpinfo_t acpi_child_pnpinfo_method; static bus_child_location_t acpi_child_location_method; static bus_hint_device_unit_t acpi_hint_device_unit; static bus_get_property_t acpi_bus_get_prop; static bus_get_device_path_t acpi_get_device_path; static acpi_id_probe_t acpi_device_id_probe; static acpi_evaluate_object_t acpi_device_eval_obj; static acpi_get_property_t acpi_device_get_prop; static acpi_scan_children_t acpi_device_scan_children; static isa_pnp_probe_t acpi_isa_pnp_probe; static void acpi_reserve_resources(device_t dev); static int acpi_sysres_alloc(device_t dev); static uint32_t acpi_isa_get_logicalid(device_t dev); static int acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count); static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *context, void **retval); static ACPI_STATUS acpi_find_dsd(struct acpi_device *ad); static void acpi_platform_osc(device_t dev); static void acpi_probe_children(device_t bus); static void acpi_probe_order(ACPI_HANDLE handle, int *order); static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status); static void acpi_sleep_enable(void *arg); static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc); static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state); static void acpi_shutdown_final(void *arg, int howto); static void acpi_enable_fixed_events(struct acpi_softc *sc); static void acpi_resync_clock(struct acpi_softc *sc); static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate); static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate); static int acpi_wake_prep_walk(int sstate); static int acpi_wake_sysctl_walk(device_t dev); static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS); static void acpi_system_eventhandler_sleep(void *arg, int state); static void acpi_system_eventhandler_wakeup(void *arg, int state); static int acpi_sname2sstate(const char *sname); static const char *acpi_sstate2sname(int sstate); static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_pm_func(u_long cmd, void *arg, ...); static void acpi_enable_pcie(void); static void acpi_reset_interfaces(device_t dev); static device_method_t acpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_probe), DEVMETHOD(device_attach, acpi_attach), DEVMETHOD(device_shutdown, acpi_shutdown), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_suspend, acpi_suspend), DEVMETHOD(device_resume, acpi_resume), /* Bus interface */ DEVMETHOD(bus_add_child, acpi_add_child), DEVMETHOD(bus_print_child, acpi_print_child), DEVMETHOD(bus_probe_nomatch, acpi_probe_nomatch), DEVMETHOD(bus_driver_added, acpi_driver_added), DEVMETHOD(bus_child_deleted, acpi_child_deleted), DEVMETHOD(bus_read_ivar, acpi_read_ivar), DEVMETHOD(bus_write_ivar, acpi_write_ivar), DEVMETHOD(bus_get_resource_list, acpi_get_rlist), DEVMETHOD(bus_get_rman, acpi_get_rman), DEVMETHOD(bus_set_resource, acpi_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_alloc_resource, acpi_alloc_resource), DEVMETHOD(bus_adjust_resource, acpi_adjust_resource), DEVMETHOD(bus_release_resource, acpi_release_resource), DEVMETHOD(bus_delete_resource, acpi_delete_resource), DEVMETHOD(bus_activate_resource, acpi_activate_resource), DEVMETHOD(bus_deactivate_resource, acpi_deactivate_resource), DEVMETHOD(bus_map_resource, acpi_map_resource), DEVMETHOD(bus_unmap_resource, acpi_unmap_resource), DEVMETHOD(bus_child_pnpinfo, acpi_child_pnpinfo_method), DEVMETHOD(bus_child_location, acpi_child_location_method), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_hint_device_unit, acpi_hint_device_unit), DEVMETHOD(bus_get_cpus, acpi_get_cpus), DEVMETHOD(bus_get_domain, acpi_get_domain), DEVMETHOD(bus_get_property, acpi_bus_get_prop), DEVMETHOD(bus_get_device_path, acpi_get_device_path), /* ACPI bus */ DEVMETHOD(acpi_id_probe, acpi_device_id_probe), DEVMETHOD(acpi_evaluate_object, acpi_device_eval_obj), DEVMETHOD(acpi_get_property, acpi_device_get_prop), DEVMETHOD(acpi_pwr_for_sleep, acpi_device_pwr_for_sleep), DEVMETHOD(acpi_scan_children, acpi_device_scan_children), /* ISA emulation */ DEVMETHOD(isa_pnp_probe, acpi_isa_pnp_probe), DEVMETHOD_END }; static driver_t acpi_driver = { "acpi", acpi_methods, sizeof(struct acpi_softc), }; EARLY_DRIVER_MODULE(acpi, nexus, acpi_driver, acpi_modevent, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(acpi, 1); ACPI_SERIAL_DECL(acpi, "ACPI root bus"); /* Local pools for managing system resources for ACPI child devices. */ static struct rman acpi_rman_io, acpi_rman_mem; #define ACPI_MINIMUM_AWAKETIME 5 /* Holds the description of the acpi0 device. */ static char acpi_desc[ACPI_OEM_ID_SIZE + ACPI_OEM_TABLE_ID_SIZE + 2]; SYSCTL_NODE(_debug, OID_AUTO, acpi, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ACPI debugging"); static char acpi_ca_version[12]; SYSCTL_STRING(_debug_acpi, OID_AUTO, acpi_ca_version, CTLFLAG_RD, acpi_ca_version, 0, "Version of Intel ACPI-CA"); /* * Allow overriding _OSI methods. */ static char acpi_install_interface[256]; TUNABLE_STR("hw.acpi.install_interface", acpi_install_interface, sizeof(acpi_install_interface)); static char acpi_remove_interface[256]; TUNABLE_STR("hw.acpi.remove_interface", acpi_remove_interface, sizeof(acpi_remove_interface)); /* Allow users to dump Debug objects without ACPI debugger. */ static int acpi_debug_objects; TUNABLE_INT("debug.acpi.enable_debug_objects", &acpi_debug_objects); SYSCTL_PROC(_debug_acpi, OID_AUTO, enable_debug_objects, CTLFLAG_RW | CTLTYPE_INT | CTLFLAG_MPSAFE, NULL, 0, acpi_debug_objects_sysctl, "I", "Enable Debug objects"); /* Allow the interpreter to ignore common mistakes in BIOS. */ static int acpi_interpreter_slack = 1; TUNABLE_INT("debug.acpi.interpreter_slack", &acpi_interpreter_slack); SYSCTL_INT(_debug_acpi, OID_AUTO, interpreter_slack, CTLFLAG_RDTUN, &acpi_interpreter_slack, 1, "Turn on interpreter slack mode."); /* Ignore register widths set by FADT and use default widths instead. */ static int acpi_ignore_reg_width = 1; TUNABLE_INT("debug.acpi.default_register_width", &acpi_ignore_reg_width); SYSCTL_INT(_debug_acpi, OID_AUTO, default_register_width, CTLFLAG_RDTUN, &acpi_ignore_reg_width, 1, "Ignore register widths set by FADT"); /* Allow users to override quirks. */ TUNABLE_INT("debug.acpi.quirks", &acpi_quirks); int acpi_susp_bounce; SYSCTL_INT(_debug_acpi, OID_AUTO, suspend_bounce, CTLFLAG_RW, &acpi_susp_bounce, 0, "Don't actually suspend, just test devices."); /* * ACPI standard UUID for Device Specific Data Package * "Device Properties UUID for _DSD" Rev. 2.0 */ static const struct uuid acpi_dsd_uuid = { 0xdaffd814, 0x6eba, 0x4d8c, 0x8a, 0x91, { 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01 } }; /* * ACPI can only be loaded as a module by the loader; activating it after * system bootstrap time is not useful, and can be fatal to the system. * It also cannot be unloaded, since the entire system bus hierarchy hangs * off it. */ static int acpi_modevent(struct module *mod, int event, void *junk) { switch (event) { case MOD_LOAD: if (!cold) { printf("The ACPI driver cannot be loaded after boot.\n"); return (EPERM); } break; case MOD_UNLOAD: if (!cold && power_pm_get_type() == POWER_PM_TYPE_ACPI) return (EBUSY); break; default: break; } return (0); } /* * Perform early initialization. */ ACPI_STATUS acpi_Startup(void) { static int started = 0; ACPI_STATUS status; int val; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* Only run the startup code once. The MADT driver also calls this. */ if (started) return_VALUE (AE_OK); started = 1; /* * Initialize the ACPICA subsystem. */ if (ACPI_FAILURE(status = AcpiInitializeSubsystem())) { printf("ACPI: Could not initialize Subsystem: %s\n", AcpiFormatException(status)); return_VALUE (status); } /* * Pre-allocate space for RSDT/XSDT and DSDT tables and allow resizing * if more tables exist. */ if (ACPI_FAILURE(status = AcpiInitializeTables(NULL, 2, TRUE))) { printf("ACPI: Table initialisation failed: %s\n", AcpiFormatException(status)); return_VALUE (status); } /* Set up any quirks we have for this system. */ if (acpi_quirks == ACPI_Q_OK) acpi_table_quirks(&acpi_quirks); /* If the user manually set the disabled hint to 0, force-enable ACPI. */ if (resource_int_value("acpi", 0, "disabled", &val) == 0 && val == 0) acpi_quirks &= ~ACPI_Q_BROKEN; if (acpi_quirks & ACPI_Q_BROKEN) { printf("ACPI disabled by blacklist. Contact your BIOS vendor.\n"); status = AE_SUPPORT; } return_VALUE (status); } /* * Detect ACPI and perform early initialisation. */ int acpi_identify(void) { ACPI_TABLE_RSDP *rsdp; ACPI_TABLE_HEADER *rsdt; ACPI_PHYSICAL_ADDRESS paddr; struct sbuf sb; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (!cold) return (ENXIO); /* Check that we haven't been disabled with a hint. */ if (resource_disabled("acpi", 0)) return (ENXIO); /* Check for other PM systems. */ if (power_pm_get_type() != POWER_PM_TYPE_NONE && power_pm_get_type() != POWER_PM_TYPE_ACPI) { printf("ACPI identify failed, other PM system enabled.\n"); return (ENXIO); } /* Initialize root tables. */ if (ACPI_FAILURE(acpi_Startup())) { printf("ACPI: Try disabling either ACPI or apic support.\n"); return (ENXIO); } if ((paddr = AcpiOsGetRootPointer()) == 0 || (rsdp = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_RSDP))) == NULL) return (ENXIO); if (rsdp->Revision > 1 && rsdp->XsdtPhysicalAddress != 0) paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->XsdtPhysicalAddress; else paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->RsdtPhysicalAddress; AcpiOsUnmapMemory(rsdp, sizeof(ACPI_TABLE_RSDP)); if ((rsdt = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_HEADER))) == NULL) return (ENXIO); sbuf_new(&sb, acpi_desc, sizeof(acpi_desc), SBUF_FIXEDLEN); sbuf_bcat(&sb, rsdt->OemId, ACPI_OEM_ID_SIZE); sbuf_trim(&sb); sbuf_putc(&sb, ' '); sbuf_bcat(&sb, rsdt->OemTableId, ACPI_OEM_TABLE_ID_SIZE); sbuf_trim(&sb); sbuf_finish(&sb); sbuf_delete(&sb); AcpiOsUnmapMemory(rsdt, sizeof(ACPI_TABLE_HEADER)); snprintf(acpi_ca_version, sizeof(acpi_ca_version), "%x", ACPI_CA_VERSION); return (0); } /* * Fetch some descriptive data from ACPI to put in our attach message. */ static int acpi_probe(device_t dev) { ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); device_set_desc(dev, acpi_desc); return_VALUE (BUS_PROBE_NOWILDCARD); } static int acpi_attach(device_t dev) { struct acpi_softc *sc; ACPI_STATUS status; int error, state; UINT32 flags; UINT8 TypeA, TypeB; char *env; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = device_get_softc(dev); sc->acpi_dev = dev; callout_init(&sc->susp_force_to, 1); error = ENXIO; /* Initialize resource manager. */ acpi_rman_io.rm_type = RMAN_ARRAY; acpi_rman_io.rm_start = 0; acpi_rman_io.rm_end = 0xffff; acpi_rman_io.rm_descr = "ACPI I/O ports"; if (rman_init(&acpi_rman_io) != 0) panic("acpi rman_init IO ports failed"); acpi_rman_mem.rm_type = RMAN_ARRAY; acpi_rman_mem.rm_descr = "ACPI I/O memory addresses"; if (rman_init(&acpi_rman_mem) != 0) panic("acpi rman_init memory failed"); resource_list_init(&sc->sysres_rl); /* Initialise the ACPI mutex */ mtx_init(&acpi_mutex, "ACPI global lock", NULL, MTX_DEF); /* * Set the globals from our tunables. This is needed because ACPI-CA * uses UINT8 for some values and we have no tunable_byte. */ AcpiGbl_EnableInterpreterSlack = acpi_interpreter_slack ? TRUE : FALSE; AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE; AcpiGbl_UseDefaultRegisterWidths = acpi_ignore_reg_width ? TRUE : FALSE; #ifndef ACPI_DEBUG /* * Disable all debugging layers and levels. */ AcpiDbgLayer = 0; AcpiDbgLevel = 0; #endif /* Override OS interfaces if the user requested. */ acpi_reset_interfaces(dev); /* Load ACPI name space. */ status = AcpiLoadTables(); if (ACPI_FAILURE(status)) { device_printf(dev, "Could not load Namespace: %s\n", AcpiFormatException(status)); goto out; } /* Handle MCFG table if present. */ acpi_enable_pcie(); /* * Note that some systems (specifically, those with namespace evaluation * issues that require the avoidance of parts of the namespace) must * avoid running _INI and _STA on everything, as well as dodging the final * object init pass. * * For these devices, we set ACPI_NO_DEVICE_INIT and ACPI_NO_OBJECT_INIT). * * XXX We should arrange for the object init pass after we have attached * all our child devices, but on many systems it works here. */ flags = 0; if (testenv("debug.acpi.avoid")) flags = ACPI_NO_DEVICE_INIT | ACPI_NO_OBJECT_INIT; /* Bring the hardware and basic handlers online. */ if (ACPI_FAILURE(status = AcpiEnableSubsystem(flags))) { device_printf(dev, "Could not enable ACPI: %s\n", AcpiFormatException(status)); goto out; } /* * Call the ECDT probe function to provide EC functionality before * the namespace has been evaluated. * * XXX This happens before the sysresource devices have been probed and * attached so its resources come from nexus0. In practice, this isn't * a problem but should be addressed eventually. */ acpi_ec_ecdt_probe(dev); /* Bring device objects and regions online. */ if (ACPI_FAILURE(status = AcpiInitializeObjects(flags))) { device_printf(dev, "Could not initialize ACPI objects: %s\n", AcpiFormatException(status)); goto out; } /* * Setup our sysctl tree. * * XXX: This doesn't check to make sure that none of these fail. */ sysctl_ctx_init(&sc->acpi_sysctl_ctx); sc->acpi_sysctl_tree = SYSCTL_ADD_NODE(&sc->acpi_sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_name(dev), CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "supported_sleep_state", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0, acpi_supported_sleep_state_sysctl, "A", "List supported ACPI sleep states."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "power_button_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A", "Power button ACPI sleep state."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "sleep_button_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A", "Sleep button ACPI sleep state."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "lid_switch_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A", "Lid ACPI sleep state. Set to S3 if you want to suspend your laptop when close the Lid."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "standby_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A", ""); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "suspend_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_suspend_sx, 0, acpi_sleep_state_sysctl, "A", ""); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "sleep_delay", CTLFLAG_RW, &sc->acpi_sleep_delay, 0, "sleep delay in seconds"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0, "S4BIOS mode"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "verbose", CTLFLAG_RW, &sc->acpi_verbose, 0, "verbose mode"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "disable_on_reboot", CTLFLAG_RW, &sc->acpi_do_disable, 0, "Disable ACPI when rebooting/halting system"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "handle_reboot", CTLFLAG_RW, &sc->acpi_handle_reboot, 0, "Use ACPI Reset Register to reboot"); /* * Default to 1 second before sleeping to give some machines time to * stabilize. */ sc->acpi_sleep_delay = 1; if (bootverbose) sc->acpi_verbose = 1; if ((env = kern_getenv("hw.acpi.verbose")) != NULL) { if (strcmp(env, "0") != 0) sc->acpi_verbose = 1; freeenv(env); } /* Only enable reboot by default if the FADT says it is available. */ if (AcpiGbl_FADT.Flags & ACPI_FADT_RESET_REGISTER) sc->acpi_handle_reboot = 1; #if !ACPI_REDUCED_HARDWARE /* Only enable S4BIOS by default if the FACS says it is available. */ if (AcpiGbl_FACS != NULL && AcpiGbl_FACS->Flags & ACPI_FACS_S4_BIOS_PRESENT) sc->acpi_s4bios = 1; #endif /* Probe all supported sleep states. */ acpi_sleep_states[ACPI_STATE_S0] = TRUE; for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++) if (ACPI_SUCCESS(AcpiEvaluateObject(ACPI_ROOT_OBJECT, __DECONST(char *, AcpiGbl_SleepStateNames[state]), NULL, NULL)) && ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB))) acpi_sleep_states[state] = TRUE; /* * Dispatch the default sleep state to devices. The lid switch is set * to UNKNOWN by default to avoid surprising users. */ sc->acpi_power_button_sx = acpi_sleep_states[ACPI_STATE_S5] ? ACPI_STATE_S5 : ACPI_STATE_UNKNOWN; sc->acpi_lid_switch_sx = ACPI_STATE_UNKNOWN; sc->acpi_standby_sx = acpi_sleep_states[ACPI_STATE_S1] ? ACPI_STATE_S1 : ACPI_STATE_UNKNOWN; sc->acpi_suspend_sx = acpi_sleep_states[ACPI_STATE_S3] ? ACPI_STATE_S3 : ACPI_STATE_UNKNOWN; /* Pick the first valid sleep state for the sleep button default. */ sc->acpi_sleep_button_sx = ACPI_STATE_UNKNOWN; for (state = ACPI_STATE_S1; state <= ACPI_STATE_S4; state++) if (acpi_sleep_states[state]) { sc->acpi_sleep_button_sx = state; break; } acpi_enable_fixed_events(sc); /* * Scan the namespace and attach/initialise children. */ /* Register our shutdown handler. */ EVENTHANDLER_REGISTER(shutdown_final, acpi_shutdown_final, sc, SHUTDOWN_PRI_LAST + 150); /* * Register our acpi event handlers. * XXX should be configurable eg. via userland policy manager. */ EVENTHANDLER_REGISTER(acpi_sleep_event, acpi_system_eventhandler_sleep, sc, ACPI_EVENT_PRI_LAST); EVENTHANDLER_REGISTER(acpi_wakeup_event, acpi_system_eventhandler_wakeup, sc, ACPI_EVENT_PRI_LAST); /* Flag our initial states. */ sc->acpi_enabled = TRUE; sc->acpi_sstate = ACPI_STATE_S0; sc->acpi_sleep_disabled = TRUE; /* Create the control device */ sc->acpi_dev_t = make_dev(&acpi_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0664, "acpi"); sc->acpi_dev_t->si_drv1 = sc; if ((error = acpi_machdep_init(dev))) goto out; /* Register ACPI again to pass the correct argument of pm_func. */ power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc); acpi_platform_osc(dev); if (!acpi_disabled("bus")) { EVENTHANDLER_REGISTER(dev_lookup, acpi_lookup, NULL, 1000); acpi_probe_children(dev); } /* Update all GPEs and enable runtime GPEs. */ status = AcpiUpdateAllGpes(); if (ACPI_FAILURE(status)) device_printf(dev, "Could not update all GPEs: %s\n", AcpiFormatException(status)); /* Allow sleep request after a while. */ callout_init_mtx(&acpi_sleep_timer, &acpi_mutex, 0); callout_reset(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME, acpi_sleep_enable, sc); error = 0; out: return_VALUE (error); } static void acpi_set_power_children(device_t dev, int state) { device_t child; device_t *devlist; int dstate, i, numdevs; if (device_get_children(dev, &devlist, &numdevs) != 0) return; /* * Retrieve and set D-state for the sleep state if _SxD is present. * Skip children who aren't attached since they are handled separately. */ for (i = 0; i < numdevs; i++) { child = devlist[i]; dstate = state; if (device_is_attached(child) && acpi_device_pwr_for_sleep(dev, child, &dstate) == 0) acpi_set_powerstate(child, dstate); } free(devlist, M_TEMP); } static int acpi_suspend(device_t dev) { int error; bus_topo_assert(); error = bus_generic_suspend(dev); if (error == 0) acpi_set_power_children(dev, ACPI_STATE_D3); return (error); } static int acpi_resume(device_t dev) { bus_topo_assert(); acpi_set_power_children(dev, ACPI_STATE_D0); return (bus_generic_resume(dev)); } static int acpi_shutdown(device_t dev) { bus_topo_assert(); /* Allow children to shutdown first. */ bus_generic_shutdown(dev); /* * Enable any GPEs that are able to power-on the system (i.e., RTC). * Also, disable any that are not valid for this state (most). */ acpi_wake_prep_walk(ACPI_STATE_S5); return (0); } /* * Handle a new device being added */ static device_t acpi_add_child(device_t bus, u_int order, const char *name, int unit) { struct acpi_device *ad; device_t child; if ((ad = malloc(sizeof(*ad), M_ACPIDEV, M_NOWAIT | M_ZERO)) == NULL) return (NULL); resource_list_init(&ad->ad_rl); child = device_add_child_ordered(bus, order, name, unit); if (child != NULL) device_set_ivars(child, ad); else free(ad, M_ACPIDEV); return (child); } static int acpi_print_child(device_t bus, device_t child) { struct acpi_device *adev = device_get_ivars(child); struct resource_list *rl = &adev->ad_rl; int retval = 0; retval += bus_print_child_header(bus, child); retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx"); retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); retval += resource_list_print_type(rl, "drq", SYS_RES_DRQ, "%jd"); if (device_get_flags(child)) retval += printf(" flags %#x", device_get_flags(child)); retval += bus_print_child_domain(bus, child); retval += bus_print_child_footer(bus, child); return (retval); } /* * If this device is an ACPI child but no one claimed it, attempt * to power it off. We'll power it back up when a driver is added. * * XXX Disabled for now since many necessary devices (like fdc and * ATA) don't claim the devices we created for them but still expect * them to be powered up. */ static void acpi_probe_nomatch(device_t bus, device_t child) { #ifdef ACPI_ENABLE_POWERDOWN_NODRIVER acpi_set_powerstate(child, ACPI_STATE_D3); #endif } /* * If a new driver has a chance to probe a child, first power it up. * * XXX Disabled for now (see acpi_probe_nomatch for details). */ static void acpi_driver_added(device_t dev, driver_t *driver) { device_t child, *devlist; int i, numdevs; DEVICE_IDENTIFY(driver, dev); if (device_get_children(dev, &devlist, &numdevs)) return; for (i = 0; i < numdevs; i++) { child = devlist[i]; if (device_get_state(child) == DS_NOTPRESENT) { #ifdef ACPI_ENABLE_POWERDOWN_NODRIVER acpi_set_powerstate(child, ACPI_STATE_D0); if (device_probe_and_attach(child) != 0) acpi_set_powerstate(child, ACPI_STATE_D3); #else device_probe_and_attach(child); #endif } } free(devlist, M_TEMP); } /* Location hint for devctl(8) */ static int acpi_child_location_method(device_t cbdev, device_t child, struct sbuf *sb) { struct acpi_device *dinfo = device_get_ivars(child); int pxm; if (dinfo->ad_handle) { sbuf_printf(sb, "handle=%s", acpi_name(dinfo->ad_handle)); if (ACPI_SUCCESS(acpi_GetInteger(dinfo->ad_handle, "_PXM", &pxm))) { sbuf_printf(sb, " _PXM=%d", pxm); } } return (0); } /* PnP information for devctl(8) */ int acpi_pnpinfo(ACPI_HANDLE handle, struct sbuf *sb) { ACPI_DEVICE_INFO *adinfo; if (ACPI_FAILURE(AcpiGetObjectInfo(handle, &adinfo))) { sbuf_printf(sb, "unknown"); return (0); } sbuf_printf(sb, "_HID=%s _UID=%lu _CID=%s", (adinfo->Valid & ACPI_VALID_HID) ? adinfo->HardwareId.String : "none", (adinfo->Valid & ACPI_VALID_UID) ? strtoul(adinfo->UniqueId.String, NULL, 10) : 0UL, ((adinfo->Valid & ACPI_VALID_CID) && adinfo->CompatibleIdList.Count > 0) ? adinfo->CompatibleIdList.Ids[0].String : "none"); AcpiOsFree(adinfo); return (0); } static int acpi_child_pnpinfo_method(device_t cbdev, device_t child, struct sbuf *sb) { struct acpi_device *dinfo = device_get_ivars(child); return (acpi_pnpinfo(dinfo->ad_handle, sb)); } /* * Note: the check for ACPI locator may be redundant. However, this routine is * suitable for both busses whose only locator is ACPI and as a building block * for busses that have multiple locators to cope with. */ int acpi_get_acpi_device_path(device_t bus, device_t child, const char *locator, struct sbuf *sb) { if (strcmp(locator, BUS_LOCATOR_ACPI) == 0) { ACPI_HANDLE *handle = acpi_get_handle(child); if (handle != NULL) sbuf_printf(sb, "%s", acpi_name(handle)); return (0); } return (bus_generic_get_device_path(bus, child, locator, sb)); } static int acpi_get_device_path(device_t bus, device_t child, const char *locator, struct sbuf *sb) { struct acpi_device *dinfo = device_get_ivars(child); if (strcmp(locator, BUS_LOCATOR_ACPI) == 0) return (acpi_get_acpi_device_path(bus, child, locator, sb)); if (strcmp(locator, BUS_LOCATOR_UEFI) == 0) { ACPI_DEVICE_INFO *adinfo; if (!ACPI_FAILURE(AcpiGetObjectInfo(dinfo->ad_handle, &adinfo)) && dinfo->ad_handle != 0 && (adinfo->Valid & ACPI_VALID_HID)) { const char *hid = adinfo->HardwareId.String; u_long uid = (adinfo->Valid & ACPI_VALID_UID) ? strtoul(adinfo->UniqueId.String, NULL, 10) : 0UL; u_long hidval; /* * In UEFI Stanard Version 2.6, Section 9.6.1.6 Text * Device Node Reference, there's an insanely long table * 98. This implements the relevant bits from that * table. Newer versions appear to have not required * anything new. The EDK2 firmware presents both PciRoot * and PcieRoot as PciRoot. Follow the EDK2 standard. */ if (strncmp("PNP", hid, 3) != 0) goto nomatch; hidval = strtoul(hid + 3, NULL, 16); switch (hidval) { case 0x0301: sbuf_printf(sb, "Keyboard(0x%lx)", uid); break; case 0x0401: sbuf_printf(sb, "ParallelPort(0x%lx)", uid); break; case 0x0501: sbuf_printf(sb, "Serial(0x%lx)", uid); break; case 0x0604: sbuf_printf(sb, "Floppy(0x%lx)", uid); break; case 0x0a03: case 0x0a08: sbuf_printf(sb, "PciRoot(0x%lx)", uid); break; default: /* Everything else gets a generic encode */ nomatch: sbuf_printf(sb, "Acpi(%s,0x%lx)", hid, uid); break; } } /* Not handled: AcpiAdr... unsure how to know it's one */ } /* For the rest, punt to the default handler */ return (bus_generic_get_device_path(bus, child, locator, sb)); } /* * Handle device deletion. */ static void acpi_child_deleted(device_t dev, device_t child) { struct acpi_device *dinfo = device_get_ivars(child); if (acpi_get_device(dinfo->ad_handle) == child) AcpiDetachData(dinfo->ad_handle, acpi_fake_objhandler); } /* * Handle per-device ivars */ static int acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct acpi_device *ad; if ((ad = device_get_ivars(child)) == NULL) { device_printf(child, "device has no ivars\n"); return (ENOENT); } /* ACPI and ISA compatibility ivars */ switch(index) { case ACPI_IVAR_HANDLE: *(ACPI_HANDLE *)result = ad->ad_handle; break; case ACPI_IVAR_PRIVATE: *(void **)result = ad->ad_private; break; case ACPI_IVAR_FLAGS: *(int *)result = ad->ad_flags; break; case ISA_IVAR_VENDORID: case ISA_IVAR_SERIAL: case ISA_IVAR_COMPATID: *(int *)result = -1; break; case ISA_IVAR_LOGICALID: *(int *)result = acpi_isa_get_logicalid(child); break; case PCI_IVAR_CLASS: *(uint8_t*)result = (ad->ad_cls_class >> 16) & 0xff; break; case PCI_IVAR_SUBCLASS: *(uint8_t*)result = (ad->ad_cls_class >> 8) & 0xff; break; case PCI_IVAR_PROGIF: *(uint8_t*)result = (ad->ad_cls_class >> 0) & 0xff; break; default: return (ENOENT); } return (0); } static int acpi_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { struct acpi_device *ad; if ((ad = device_get_ivars(child)) == NULL) { device_printf(child, "device has no ivars\n"); return (ENOENT); } switch(index) { case ACPI_IVAR_HANDLE: ad->ad_handle = (ACPI_HANDLE)value; break; case ACPI_IVAR_PRIVATE: ad->ad_private = (void *)value; break; case ACPI_IVAR_FLAGS: ad->ad_flags = (int)value; break; default: panic("bad ivar write request (%d)", index); return (ENOENT); } return (0); } /* * Handle child resource allocation/removal */ static struct resource_list * acpi_get_rlist(device_t dev, device_t child) { struct acpi_device *ad; ad = device_get_ivars(child); return (&ad->ad_rl); } static int acpi_match_resource_hint(device_t dev, int type, long value) { struct acpi_device *ad = device_get_ivars(dev); struct resource_list *rl = &ad->ad_rl; struct resource_list_entry *rle; STAILQ_FOREACH(rle, rl, link) { if (rle->type != type) continue; if (rle->start <= value && rle->end >= value) return (1); } return (0); } /* * Does this device match because the resources match? */ static bool acpi_hint_device_matches_resources(device_t child, const char *name, int unit) { long value; bool matches; /* * Check for matching resources. We must have at least one match. * Since I/O and memory resources cannot be shared, if we get a * match on either of those, ignore any mismatches in IRQs or DRQs. * * XXX: We may want to revisit this to be more lenient and wire * as long as it gets one match. */ matches = false; if (resource_long_value(name, unit, "port", &value) == 0) { /* * Floppy drive controllers are notorious for having a * wide variety of resources not all of which include the * first port that is specified by the hint (typically * 0x3f0) (see the comment above fdc_isa_alloc_resources() * in fdc_isa.c). However, they do all seem to include * port + 2 (e.g. 0x3f2) so for a floppy device, look for * 'value + 2' in the port resources instead of the hint * value. */ if (strcmp(name, "fdc") == 0) value += 2; if (acpi_match_resource_hint(child, SYS_RES_IOPORT, value)) matches = true; else return false; } if (resource_long_value(name, unit, "maddr", &value) == 0) { if (acpi_match_resource_hint(child, SYS_RES_MEMORY, value)) matches = true; else return false; } /* * If either the I/O address and/or the memory address matched, then * assumed this devices matches and that any mismatch in other resources * will be resolved by siltently ignoring those other resources. Otherwise * all further resources must match. */ if (matches) { return (true); } if (resource_long_value(name, unit, "irq", &value) == 0) { if (acpi_match_resource_hint(child, SYS_RES_IRQ, value)) matches = true; else return false; } if (resource_long_value(name, unit, "drq", &value) == 0) { if (acpi_match_resource_hint(child, SYS_RES_DRQ, value)) matches = true; else return false; } return matches; } /* * Wire device unit numbers based on resource matches in hints. */ static void acpi_hint_device_unit(device_t acdev, device_t child, const char *name, int *unitp) { device_location_cache_t *cache; const char *s; int line, unit; bool matches; /* * Iterate over all the hints for the devices with the specified * name to see if one's resources are a subset of this device. */ line = 0; cache = dev_wired_cache_init(); while (resource_find_dev(&line, name, &unit, "at", NULL) == 0) { /* Must have an "at" for acpi or isa. */ resource_string_value(name, unit, "at", &s); matches = false; if (strcmp(s, "acpi0") == 0 || strcmp(s, "acpi") == 0 || strcmp(s, "isa0") == 0 || strcmp(s, "isa") == 0) matches = acpi_hint_device_matches_resources(child, name, unit); else matches = dev_wired_cache_match(cache, child, s); if (matches) { /* We have a winner! */ *unitp = unit; break; } } dev_wired_cache_fini(cache); } /* * Fetch the NUMA domain for a device by mapping the value returned by * _PXM to a NUMA domain. If the device does not have a _PXM method, * -2 is returned. If any other error occurs, -1 is returned. */ static int acpi_parse_pxm(device_t dev) { #ifdef NUMA #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) ACPI_HANDLE handle; ACPI_STATUS status; int pxm; handle = acpi_get_handle(dev); if (handle == NULL) return (-2); status = acpi_GetInteger(handle, "_PXM", &pxm); if (ACPI_SUCCESS(status)) return (acpi_map_pxm_to_vm_domainid(pxm)); if (status == AE_NOT_FOUND) return (-2); #endif #endif return (-1); } int acpi_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { int d, error; d = acpi_parse_pxm(child); if (d < 0) return (bus_generic_get_cpus(dev, child, op, setsize, cpuset)); switch (op) { case LOCAL_CPUS: if (setsize != sizeof(cpuset_t)) return (EINVAL); *cpuset = cpuset_domain[d]; return (0); case INTR_CPUS: error = bus_generic_get_cpus(dev, child, op, setsize, cpuset); if (error != 0) return (error); if (setsize != sizeof(cpuset_t)) return (EINVAL); CPU_AND(cpuset, cpuset, &cpuset_domain[d]); return (0); default: return (bus_generic_get_cpus(dev, child, op, setsize, cpuset)); } } /* * Fetch the NUMA domain for the given device 'dev'. * * If a device has a _PXM method, map that to a NUMA domain. * Otherwise, pass the request up to the parent. * If there's no matching domain or the domain cannot be * determined, return ENOENT. */ int acpi_get_domain(device_t dev, device_t child, int *domain) { int d; d = acpi_parse_pxm(child); if (d >= 0) { *domain = d; return (0); } if (d == -1) return (ENOENT); /* No _PXM node; go up a level */ return (bus_generic_get_domain(dev, child, domain)); } static struct rman * acpi_get_rman(device_t bus, int type, u_int flags) { /* Only memory and IO resources are managed. */ switch (type) { case SYS_RES_IOPORT: return (&acpi_rman_io); case SYS_RES_MEMORY: return (&acpi_rman_mem); default: return (NULL); } } /* * Pre-allocate/manage all memory and IO resources. Since rman can't handle * duplicates, we merge any in the sysresource attach routine. */ static int acpi_sysres_alloc(device_t dev) { struct acpi_softc *sc = device_get_softc(dev); struct resource *res; struct resource_list_entry *rle; struct rman *rm; device_t *children; int child_count, i; /* * Probe/attach any sysresource devices. This would be unnecessary if we * had multi-pass probe/attach. */ if (device_get_children(dev, &children, &child_count) != 0) return (ENXIO); for (i = 0; i < child_count; i++) { if (ACPI_ID_PROBE(dev, children[i], sysres_ids, NULL) <= 0) device_probe_and_attach(children[i]); } free(children, M_TEMP); STAILQ_FOREACH(rle, &sc->sysres_rl, link) { if (rle->res != NULL) { device_printf(dev, "duplicate resource for %jx\n", rle->start); continue; } /* Only memory and IO resources are valid here. */ rm = acpi_get_rman(dev, rle->type, 0); if (rm == NULL) continue; /* Pre-allocate resource and add to our rman pool. */ res = bus_alloc_resource(dev, rle->type, &rle->rid, rle->start, rle->start + rle->count - 1, rle->count, RF_ACTIVE | RF_UNMAPPED); if (res != NULL) { rman_manage_region(rm, rman_get_start(res), rman_get_end(res)); rle->res = res; } else if (bootverbose) device_printf(dev, "reservation of %jx, %jx (%d) failed\n", rle->start, rle->count, rle->type); } return (0); } /* * Reserve declared resources for active devices found during the * namespace scan once the boot-time attach of devices has completed. * * Ideally reserving firmware-assigned resources would work in a * depth-first traversal of the device namespace, but this is * complicated. In particular, not all resources are enumerated by * ACPI (e.g. PCI bridges and devices enumerate their resources via * other means). Some systems also enumerate devices via ACPI behind * PCI bridges but without a matching a PCI device_t enumerated via * PCI bus scanning, the device_t's end up as direct children of * acpi0. Doing this scan late is not ideal, but works for now. */ static void acpi_reserve_resources(device_t dev) { struct resource_list_entry *rle; struct resource_list *rl; struct acpi_device *ad; device_t *children; int child_count, i; if (device_get_children(dev, &children, &child_count) != 0) return; for (i = 0; i < child_count; i++) { ad = device_get_ivars(children[i]); rl = &ad->ad_rl; /* Don't reserve system resources. */ if (ACPI_ID_PROBE(dev, children[i], sysres_ids, NULL) <= 0) continue; STAILQ_FOREACH(rle, rl, link) { /* * Don't reserve IRQ resources. There are many sticky things * to get right otherwise (e.g. IRQs for psm, atkbd, and HPET * when using legacy routing). */ if (rle->type == SYS_RES_IRQ) continue; /* * Don't reserve the resource if it is already allocated. * The acpi_ec(4) driver can allocate its resources early * if ECDT is present. */ if (rle->res != NULL) continue; /* * Try to reserve the resource from our parent. If this * fails because the resource is a system resource, just * let it be. The resource range is already reserved so * that other devices will not use it. If the driver * needs to allocate the resource, then * acpi_alloc_resource() will sub-alloc from the system * resource. */ resource_list_reserve(rl, dev, children[i], rle->type, &rle->rid, rle->start, rle->end, rle->count, 0); } } free(children, M_TEMP); } static int acpi_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct acpi_device *ad = device_get_ivars(child); struct resource_list *rl = &ad->ad_rl; rman_res_t end; #ifdef INTRNG /* map with default for now */ if (type == SYS_RES_IRQ) start = (rman_res_t)acpi_map_intr(child, (u_int)start, acpi_get_handle(child)); #endif /* If the resource is already allocated, fail. */ if (resource_list_busy(rl, type, rid)) return (EBUSY); /* If the resource is already reserved, release it. */ if (resource_list_reserved(rl, type, rid)) resource_list_unreserve(rl, dev, child, type, rid); /* Add the resource. */ end = (start + count - 1); resource_list_add(rl, type, rid, start, end, count); return (0); } static struct resource * acpi_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #ifndef INTRNG ACPI_RESOURCE ares; #endif struct acpi_device *ad; struct resource_list_entry *rle; struct resource_list *rl; struct resource *res; int isdefault = RMAN_IS_DEFAULT_RANGE(start, end); /* * First attempt at allocating the resource. For direct children, * use resource_list_alloc() to handle reserved resources. For * other devices, pass the request up to our parent. */ if (bus == device_get_parent(child)) { ad = device_get_ivars(child); rl = &ad->ad_rl; /* * Simulate the behavior of the ISA bus for direct children * devices. That is, if a non-default range is specified for * a resource that doesn't exist, use bus_set_resource() to * add the resource before allocating it. Note that these * resources will not be reserved. */ if (!isdefault && resource_list_find(rl, type, *rid) == NULL) resource_list_add(rl, type, *rid, start, end, count); res = resource_list_alloc(rl, bus, child, type, rid, start, end, count, flags); #ifndef INTRNG if (res != NULL && type == SYS_RES_IRQ) { /* * Since bus_config_intr() takes immediate effect, we cannot * configure the interrupt associated with a device when we * parse the resources but have to defer it until a driver * actually allocates the interrupt via bus_alloc_resource(). * * XXX: Should we handle the lookup failing? */ if (ACPI_SUCCESS(acpi_lookup_irq_resource(child, *rid, res, &ares))) acpi_config_intr(child, &ares); } #endif /* * If this is an allocation of the "default" range for a given * RID, fetch the exact bounds for this resource from the * resource list entry to try to allocate the range from the * system resource regions. */ if (res == NULL && isdefault) { rle = resource_list_find(rl, type, *rid); if (rle != NULL) { start = rle->start; end = rle->end; count = rle->count; } } } else res = bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags); /* * If the first attempt failed and this is an allocation of a * specific range, try to satisfy the request via a suballocation * from our system resource regions. */ if (res == NULL && start + count - 1 == end) res = bus_generic_rman_alloc_resource(bus, child, type, rid, start, end, count, flags); return (res); } static bool acpi_is_resource_managed(device_t bus, struct resource *r) { struct rman *rm; rm = acpi_get_rman(bus, rman_get_type(r), rman_get_flags(r)); if (rm == NULL) return (false); return (rman_is_region_manager(r, rm)); } static struct resource * acpi_managed_resource(device_t bus, int type, struct resource *r) { struct acpi_softc *sc = device_get_softc(bus); struct resource_list_entry *rle; KASSERT(acpi_is_resource_managed(bus, r), ("resource %p is not suballocated", r)); STAILQ_FOREACH(rle, &sc->sysres_rl, link) { if (rle->type != type || rle->res == NULL) continue; if (rman_get_start(r) >= rman_get_start(rle->res) && rman_get_end(r) <= rman_get_end(rle->res)) return (rle->res); } return (NULL); } static int -acpi_adjust_resource(device_t bus, device_t child, int type, struct resource *r, +acpi_adjust_resource(device_t bus, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { if (acpi_is_resource_managed(bus, r)) return (rman_adjust_resource(r, start, end)); - return (bus_generic_adjust_resource(bus, child, type, r, start, end)); + return (bus_generic_adjust_resource(bus, child, r, start, end)); } static int acpi_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { /* * If this resource belongs to one of our internal managers, * deactivate it and release it to the local pool. */ if (acpi_is_resource_managed(bus, r)) return (bus_generic_rman_release_resource(bus, child, type, rid, r)); return (bus_generic_rl_release_resource(bus, child, type, rid, r)); } static void acpi_delete_resource(device_t bus, device_t child, int type, int rid) { struct resource_list *rl; rl = acpi_get_rlist(bus, child); if (resource_list_busy(rl, type, rid)) { device_printf(bus, "delete_resource: Resource still owned by child" " (type=%d, rid=%d)\n", type, rid); return; } if (resource_list_reserved(rl, type, rid)) resource_list_unreserve(rl, bus, child, type, rid); resource_list_delete(rl, type, rid); } static int acpi_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { if (acpi_is_resource_managed(bus, r)) return (bus_generic_rman_activate_resource(bus, child, type, rid, r)); return (bus_generic_activate_resource(bus, child, type, rid, r)); } static int acpi_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { if (acpi_is_resource_managed(bus, r)) return (bus_generic_rman_deactivate_resource(bus, child, type, rid, r)); return (bus_generic_deactivate_resource(bus, child, type, rid, r)); } static int acpi_map_resource(device_t bus, device_t child, int type, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; struct resource *sysres; rman_res_t length, start; int error; if (!acpi_is_resource_managed(bus, r)) return (bus_generic_map_resource(bus, child, type, r, argsp, map)); /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); sysres = acpi_managed_resource(bus, type, r); if (sysres == NULL) return (ENOENT); args.offset = start - rman_get_start(sysres); args.length = length; return (bus_generic_map_resource(bus, child, type, sysres, &args, map)); } static int acpi_unmap_resource(device_t bus, device_t child, int type, struct resource *r, struct resource_map *map) { if (acpi_is_resource_managed(bus, r)) { r = acpi_managed_resource(bus, type, r); if (r == NULL) return (ENOENT); } return (bus_generic_unmap_resource(bus, child, type, r, map)); } /* Allocate an IO port or memory resource, given its GAS. */ int acpi_bus_alloc_gas(device_t dev, int *type, int *rid, ACPI_GENERIC_ADDRESS *gas, struct resource **res, u_int flags) { int error, res_type; error = ENOMEM; if (type == NULL || rid == NULL || gas == NULL || res == NULL) return (EINVAL); /* We only support memory and IO spaces. */ switch (gas->SpaceId) { case ACPI_ADR_SPACE_SYSTEM_MEMORY: res_type = SYS_RES_MEMORY; break; case ACPI_ADR_SPACE_SYSTEM_IO: res_type = SYS_RES_IOPORT; break; default: return (EOPNOTSUPP); } /* * If the register width is less than 8, assume the BIOS author means * it is a bit field and just allocate a byte. */ if (gas->BitWidth && gas->BitWidth < 8) gas->BitWidth = 8; /* Validate the address after we're sure we support the space. */ if (gas->Address == 0 || gas->BitWidth == 0) return (EINVAL); bus_set_resource(dev, res_type, *rid, gas->Address, gas->BitWidth / 8); *res = bus_alloc_resource_any(dev, res_type, rid, RF_ACTIVE | flags); if (*res != NULL) { *type = res_type; error = 0; } else bus_delete_resource(dev, res_type, *rid); return (error); } /* Probe _HID and _CID for compatible ISA PNP ids. */ static uint32_t acpi_isa_get_logicalid(device_t dev) { ACPI_DEVICE_INFO *devinfo; ACPI_HANDLE h; uint32_t pnpid; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* Fetch and validate the HID. */ if ((h = acpi_get_handle(dev)) == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return_VALUE (0); pnpid = (devinfo->Valid & ACPI_VALID_HID) != 0 && devinfo->HardwareId.Length >= ACPI_EISAID_STRING_SIZE ? PNP_EISAID(devinfo->HardwareId.String) : 0; AcpiOsFree(devinfo); return_VALUE (pnpid); } static int acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count) { ACPI_DEVICE_INFO *devinfo; ACPI_PNP_DEVICE_ID *ids; ACPI_HANDLE h; uint32_t *pnpid; int i, valid; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); pnpid = cids; /* Fetch and validate the CID */ if ((h = acpi_get_handle(dev)) == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return_VALUE (0); if ((devinfo->Valid & ACPI_VALID_CID) == 0) { AcpiOsFree(devinfo); return_VALUE (0); } if (devinfo->CompatibleIdList.Count < count) count = devinfo->CompatibleIdList.Count; ids = devinfo->CompatibleIdList.Ids; for (i = 0, valid = 0; i < count; i++) if (ids[i].Length >= ACPI_EISAID_STRING_SIZE && strncmp(ids[i].String, "PNP", 3) == 0) { *pnpid++ = PNP_EISAID(ids[i].String); valid++; } AcpiOsFree(devinfo); return_VALUE (valid); } static int acpi_device_id_probe(device_t bus, device_t dev, char **ids, char **match) { ACPI_HANDLE h; ACPI_OBJECT_TYPE t; int rv; int i; h = acpi_get_handle(dev); if (ids == NULL || h == NULL) return (ENXIO); t = acpi_get_type(dev); if (t != ACPI_TYPE_DEVICE && t != ACPI_TYPE_PROCESSOR) return (ENXIO); /* Try to match one of the array of IDs with a HID or CID. */ for (i = 0; ids[i] != NULL; i++) { rv = acpi_MatchHid(h, ids[i]); if (rv == ACPI_MATCHHID_NOMATCH) continue; if (match != NULL) { *match = ids[i]; } return ((rv == ACPI_MATCHHID_HID)? BUS_PROBE_DEFAULT : BUS_PROBE_LOW_PRIORITY); } return (ENXIO); } static ACPI_STATUS acpi_device_eval_obj(device_t bus, device_t dev, ACPI_STRING pathname, ACPI_OBJECT_LIST *parameters, ACPI_BUFFER *ret) { ACPI_HANDLE h; if (dev == NULL) h = ACPI_ROOT_OBJECT; else if ((h = acpi_get_handle(dev)) == NULL) return (AE_BAD_PARAMETER); return (AcpiEvaluateObject(h, pathname, parameters, ret)); } static ACPI_STATUS acpi_device_get_prop(device_t bus, device_t dev, ACPI_STRING propname, const ACPI_OBJECT **value) { const ACPI_OBJECT *pkg, *name, *val; struct acpi_device *ad; ACPI_STATUS status; int i; ad = device_get_ivars(dev); if (ad == NULL || propname == NULL) return (AE_BAD_PARAMETER); if (ad->dsd_pkg == NULL) { if (ad->dsd.Pointer == NULL) { status = acpi_find_dsd(ad); if (ACPI_FAILURE(status)) return (status); } else { return (AE_NOT_FOUND); } } for (i = 0; i < ad->dsd_pkg->Package.Count; i ++) { pkg = &ad->dsd_pkg->Package.Elements[i]; if (pkg->Type != ACPI_TYPE_PACKAGE || pkg->Package.Count != 2) continue; name = &pkg->Package.Elements[0]; val = &pkg->Package.Elements[1]; if (name->Type != ACPI_TYPE_STRING) continue; if (strncmp(propname, name->String.Pointer, name->String.Length) == 0) { if (value != NULL) *value = val; return (AE_OK); } } return (AE_NOT_FOUND); } static ACPI_STATUS acpi_find_dsd(struct acpi_device *ad) { const ACPI_OBJECT *dsd, *guid, *pkg; ACPI_STATUS status; ad->dsd.Length = ACPI_ALLOCATE_BUFFER; ad->dsd.Pointer = NULL; ad->dsd_pkg = NULL; status = AcpiEvaluateObject(ad->ad_handle, "_DSD", NULL, &ad->dsd); if (ACPI_FAILURE(status)) return (status); dsd = ad->dsd.Pointer; guid = &dsd->Package.Elements[0]; pkg = &dsd->Package.Elements[1]; if (guid->Type != ACPI_TYPE_BUFFER || pkg->Type != ACPI_TYPE_PACKAGE || guid->Buffer.Length != sizeof(acpi_dsd_uuid)) return (AE_NOT_FOUND); if (memcmp(guid->Buffer.Pointer, &acpi_dsd_uuid, sizeof(acpi_dsd_uuid)) == 0) { ad->dsd_pkg = pkg; return (AE_OK); } return (AE_NOT_FOUND); } static ssize_t acpi_bus_get_prop_handle(const ACPI_OBJECT *hobj, void *propvalue, size_t size) { ACPI_OBJECT *pobj; ACPI_HANDLE h; if (hobj->Type != ACPI_TYPE_PACKAGE) goto err; if (hobj->Package.Count != 1) goto err; pobj = &hobj->Package.Elements[0]; if (pobj == NULL) goto err; if (pobj->Type != ACPI_TYPE_LOCAL_REFERENCE) goto err; h = acpi_GetReference(NULL, pobj); if (h == NULL) goto err; if (propvalue != NULL && size >= sizeof(ACPI_HANDLE)) *(ACPI_HANDLE *)propvalue = h; return (sizeof(ACPI_HANDLE)); err: return (-1); } static ssize_t acpi_bus_get_prop(device_t bus, device_t child, const char *propname, void *propvalue, size_t size, device_property_type_t type) { ACPI_STATUS status; const ACPI_OBJECT *obj; status = acpi_device_get_prop(bus, child, __DECONST(char *, propname), &obj); if (ACPI_FAILURE(status)) return (-1); switch (type) { case DEVICE_PROP_ANY: case DEVICE_PROP_BUFFER: case DEVICE_PROP_UINT32: case DEVICE_PROP_UINT64: break; case DEVICE_PROP_HANDLE: return (acpi_bus_get_prop_handle(obj, propvalue, size)); default: return (-1); } switch (obj->Type) { case ACPI_TYPE_INTEGER: if (type == DEVICE_PROP_UINT32) { if (propvalue != NULL && size >= sizeof(uint32_t)) *((uint32_t *)propvalue) = obj->Integer.Value; return (sizeof(uint32_t)); } if (propvalue != NULL && size >= sizeof(uint64_t)) *((uint64_t *) propvalue) = obj->Integer.Value; return (sizeof(uint64_t)); case ACPI_TYPE_STRING: if (type != DEVICE_PROP_ANY && type != DEVICE_PROP_BUFFER) return (-1); if (propvalue != NULL && size > 0) memcpy(propvalue, obj->String.Pointer, MIN(size, obj->String.Length)); return (obj->String.Length); case ACPI_TYPE_BUFFER: if (propvalue != NULL && size > 0) memcpy(propvalue, obj->Buffer.Pointer, MIN(size, obj->Buffer.Length)); return (obj->Buffer.Length); case ACPI_TYPE_PACKAGE: if (propvalue != NULL && size >= sizeof(ACPI_OBJECT *)) { *((ACPI_OBJECT **) propvalue) = __DECONST(ACPI_OBJECT *, obj); } return (sizeof(ACPI_OBJECT *)); case ACPI_TYPE_LOCAL_REFERENCE: if (propvalue != NULL && size >= sizeof(ACPI_HANDLE)) { ACPI_HANDLE h; h = acpi_GetReference(NULL, __DECONST(ACPI_OBJECT *, obj)); memcpy(propvalue, h, sizeof(ACPI_HANDLE)); } return (sizeof(ACPI_HANDLE)); default: return (0); } } int acpi_device_pwr_for_sleep(device_t bus, device_t dev, int *dstate) { struct acpi_softc *sc; ACPI_HANDLE handle; ACPI_STATUS status; char sxd[8]; handle = acpi_get_handle(dev); /* * XXX If we find these devices, don't try to power them down. * The serial and IRDA ports on my T23 hang the system when * set to D3 and it appears that such legacy devices may * need special handling in their drivers. */ if (dstate == NULL || handle == NULL || acpi_MatchHid(handle, "PNP0500") || acpi_MatchHid(handle, "PNP0501") || acpi_MatchHid(handle, "PNP0502") || acpi_MatchHid(handle, "PNP0510") || acpi_MatchHid(handle, "PNP0511")) return (ENXIO); /* * Override next state with the value from _SxD, if present. * Note illegal _S0D is evaluated because some systems expect this. */ sc = device_get_softc(bus); snprintf(sxd, sizeof(sxd), "_S%dD", sc->acpi_sstate); status = acpi_GetInteger(handle, sxd, dstate); if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { device_printf(dev, "failed to get %s on %s: %s\n", sxd, acpi_name(handle), AcpiFormatException(status)); return (ENXIO); } return (0); } /* Callback arg for our implementation of walking the namespace. */ struct acpi_device_scan_ctx { acpi_scan_cb_t user_fn; void *arg; ACPI_HANDLE parent; }; static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *arg, void **retval) { struct acpi_device_scan_ctx *ctx; device_t dev, old_dev; ACPI_STATUS status; ACPI_OBJECT_TYPE type; /* * Skip this device if we think we'll have trouble with it or it is * the parent where the scan began. */ ctx = (struct acpi_device_scan_ctx *)arg; if (acpi_avoid(h) || h == ctx->parent) return (AE_OK); /* If this is not a valid device type (e.g., a method), skip it. */ if (ACPI_FAILURE(AcpiGetType(h, &type))) return (AE_OK); if (type != ACPI_TYPE_DEVICE && type != ACPI_TYPE_PROCESSOR && type != ACPI_TYPE_THERMAL && type != ACPI_TYPE_POWER) return (AE_OK); /* * Call the user function with the current device. If it is unchanged * afterwards, return. Otherwise, we update the handle to the new dev. */ old_dev = acpi_get_device(h); dev = old_dev; status = ctx->user_fn(h, &dev, level, ctx->arg); if (ACPI_FAILURE(status) || old_dev == dev) return (status); /* Remove the old child and its connection to the handle. */ if (old_dev != NULL) device_delete_child(device_get_parent(old_dev), old_dev); /* Recreate the handle association if the user created a device. */ if (dev != NULL) AcpiAttachData(h, acpi_fake_objhandler, dev); return (AE_OK); } static ACPI_STATUS acpi_device_scan_children(device_t bus, device_t dev, int max_depth, acpi_scan_cb_t user_fn, void *arg) { ACPI_HANDLE h; struct acpi_device_scan_ctx ctx; if (acpi_disabled("children")) return (AE_OK); if (dev == NULL) h = ACPI_ROOT_OBJECT; else if ((h = acpi_get_handle(dev)) == NULL) return (AE_BAD_PARAMETER); ctx.user_fn = user_fn; ctx.arg = arg; ctx.parent = h; return (AcpiWalkNamespace(ACPI_TYPE_ANY, h, max_depth, acpi_device_scan_cb, NULL, &ctx, NULL)); } /* * Even though ACPI devices are not PCI, we use the PCI approach for setting * device power states since it's close enough to ACPI. */ int acpi_set_powerstate(device_t child, int state) { ACPI_HANDLE h; ACPI_STATUS status; h = acpi_get_handle(child); if (state < ACPI_STATE_D0 || state > ACPI_D_STATES_MAX) return (EINVAL); if (h == NULL) return (0); /* Ignore errors if the power methods aren't present. */ status = acpi_pwr_switch_consumer(h, state); if (ACPI_SUCCESS(status)) { if (bootverbose) device_printf(child, "set ACPI power state D%d on %s\n", state, acpi_name(h)); } else if (status != AE_NOT_FOUND) device_printf(child, "failed to set ACPI power state D%d on %s: %s\n", state, acpi_name(h), AcpiFormatException(status)); return (0); } static int acpi_isa_pnp_probe(device_t bus, device_t child, struct isa_pnp_id *ids) { int result, cid_count, i; uint32_t lid, cids[8]; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* * ISA-style drivers attached to ACPI may persist and * probe manually if we return ENOENT. We never want * that to happen, so don't ever return it. */ result = ENXIO; /* Scan the supplied IDs for a match */ lid = acpi_isa_get_logicalid(child); cid_count = acpi_isa_get_compatid(child, cids, 8); while (ids && ids->ip_id) { if (lid == ids->ip_id) { result = 0; goto out; } for (i = 0; i < cid_count; i++) { if (cids[i] == ids->ip_id) { result = 0; goto out; } } ids++; } out: if (result == 0 && ids->ip_desc) device_set_desc(child, ids->ip_desc); return_VALUE (result); } /* * Look for a MCFG table. If it is present, use the settings for * domain (segment) 0 to setup PCI config space access via the memory * map. * * On non-x86 architectures (arm64 for now), this will be done from the * PCI host bridge driver. */ static void acpi_enable_pcie(void) { #if defined(__i386__) || defined(__amd64__) ACPI_TABLE_HEADER *hdr; ACPI_MCFG_ALLOCATION *alloc, *end; ACPI_STATUS status; status = AcpiGetTable(ACPI_SIG_MCFG, 1, &hdr); if (ACPI_FAILURE(status)) return; end = (ACPI_MCFG_ALLOCATION *)((char *)hdr + hdr->Length); alloc = (ACPI_MCFG_ALLOCATION *)((ACPI_TABLE_MCFG *)hdr + 1); while (alloc < end) { pcie_cfgregopen(alloc->Address, alloc->PciSegment, alloc->StartBusNumber, alloc->EndBusNumber); alloc++; } #endif } static void acpi_platform_osc(device_t dev) { ACPI_HANDLE sb_handle; ACPI_STATUS status; uint32_t cap_set[2]; /* 0811B06E-4A27-44F9-8D60-3CBBC22E7B48 */ static uint8_t acpi_platform_uuid[ACPI_UUID_LENGTH] = { 0x6e, 0xb0, 0x11, 0x08, 0x27, 0x4a, 0xf9, 0x44, 0x8d, 0x60, 0x3c, 0xbb, 0xc2, 0x2e, 0x7b, 0x48 }; if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle))) return; cap_set[1] = 0x10; /* APEI Support */ status = acpi_EvaluateOSC(sb_handle, acpi_platform_uuid, 1, nitems(cap_set), cap_set, cap_set, false); if (ACPI_FAILURE(status)) { if (status == AE_NOT_FOUND) return; device_printf(dev, "_OSC failed: %s\n", AcpiFormatException(status)); return; } } /* * Scan all of the ACPI namespace and attach child devices. * * We should only expect to find devices in the \_PR, \_TZ, \_SI, and * \_SB scopes, and \_PR and \_TZ became obsolete in the ACPI 2.0 spec. * However, in violation of the spec, some systems place their PCI link * devices in \, so we have to walk the whole namespace. We check the * type of namespace nodes, so this should be ok. */ static void acpi_probe_children(device_t bus) { ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* * Scan the namespace and insert placeholders for all the devices that * we find. We also probe/attach any early devices. * * Note that we use AcpiWalkNamespace rather than AcpiGetDevices because * we want to create nodes for all devices, not just those that are * currently present. (This assumes that we don't want to create/remove * devices as they appear, which might be smarter.) */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "namespace scan\n")); AcpiWalkNamespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, 100, acpi_probe_child, NULL, bus, NULL); /* Pre-allocate resources for our rman from any sysresource devices. */ acpi_sysres_alloc(bus); /* Create any static children by calling device identify methods. */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "device identify routines\n")); bus_generic_probe(bus); /* Probe/attach all children, created statically and from the namespace. */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "acpi bus_generic_attach\n")); bus_generic_attach(bus); /* * Reserve resources allocated to children but not yet allocated * by a driver. */ acpi_reserve_resources(bus); /* Attach wake sysctls. */ acpi_wake_sysctl_walk(bus); ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "done attaching children\n")); return_VOID; } /* * Determine the probe order for a given device. */ static void acpi_probe_order(ACPI_HANDLE handle, int *order) { ACPI_OBJECT_TYPE type; /* * 0. CPUs * 1. I/O port and memory system resource holders * 2. Clocks and timers (to handle early accesses) * 3. Embedded controllers (to handle early accesses) * 4. PCI Link Devices */ AcpiGetType(handle, &type); if (type == ACPI_TYPE_PROCESSOR) *order = 0; else if (acpi_MatchHid(handle, "PNP0C01") || acpi_MatchHid(handle, "PNP0C02")) *order = 1; else if (acpi_MatchHid(handle, "PNP0100") || acpi_MatchHid(handle, "PNP0103") || acpi_MatchHid(handle, "PNP0B00")) *order = 2; else if (acpi_MatchHid(handle, "PNP0C09")) *order = 3; else if (acpi_MatchHid(handle, "PNP0C0F")) *order = 4; } /* * Evaluate a child device and determine whether we might attach a device to * it. */ static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { ACPI_DEVICE_INFO *devinfo; struct acpi_device *ad; struct acpi_prw_data prw; ACPI_OBJECT_TYPE type; ACPI_HANDLE h; device_t bus, child; char *handle_str; int order; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (acpi_disabled("children")) return_ACPI_STATUS (AE_OK); /* Skip this device if we think we'll have trouble with it. */ if (acpi_avoid(handle)) return_ACPI_STATUS (AE_OK); bus = (device_t)context; if (ACPI_SUCCESS(AcpiGetType(handle, &type))) { handle_str = acpi_name(handle); switch (type) { case ACPI_TYPE_DEVICE: /* * Since we scan from \, be sure to skip system scope objects. * \_SB_ and \_TZ_ are defined in ACPICA as devices to work around * BIOS bugs. For example, \_SB_ is to allow \_SB_._INI to be run * during the initialization and \_TZ_ is to support Notify() on it. */ if (strcmp(handle_str, "\\_SB_") == 0 || strcmp(handle_str, "\\_TZ_") == 0) break; if (acpi_parse_prw(handle, &prw) == 0) AcpiSetupGpeForWake(handle, prw.gpe_handle, prw.gpe_bit); /* * Ignore devices that do not have a _HID or _CID. They should * be discovered by other buses (e.g. the PCI bus driver). */ if (!acpi_has_hid(handle)) break; /* FALLTHROUGH */ case ACPI_TYPE_PROCESSOR: case ACPI_TYPE_THERMAL: case ACPI_TYPE_POWER: /* * Create a placeholder device for this node. Sort the * placeholder so that the probe/attach passes will run * breadth-first. Orders less than ACPI_DEV_BASE_ORDER * are reserved for special objects (i.e., system * resources). */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "scanning '%s'\n", handle_str)); order = level * 10 + ACPI_DEV_BASE_ORDER; acpi_probe_order(handle, &order); child = BUS_ADD_CHILD(bus, order, NULL, -1); if (child == NULL) break; /* Associate the handle with the device_t and vice versa. */ acpi_set_handle(child, handle); AcpiAttachData(handle, acpi_fake_objhandler, child); /* * Check that the device is present. If it's not present, * leave it disabled (so that we have a device_t attached to * the handle, but we don't probe it). * * XXX PCI link devices sometimes report "present" but not * "functional" (i.e. if disabled). Go ahead and probe them * anyway since we may enable them later. */ if (type == ACPI_TYPE_DEVICE && !acpi_DeviceIsPresent(child)) { /* Never disable PCI link devices. */ if (acpi_MatchHid(handle, "PNP0C0F")) break; /* * RTC Device should be enabled for CMOS register space * unless FADT indicate it is not present. * (checked in RTC probe routine.) */ if (acpi_MatchHid(handle, "PNP0B00")) break; /* * Docking stations should remain enabled since the system * may be undocked at boot. */ if (ACPI_SUCCESS(AcpiGetHandle(handle, "_DCK", &h))) break; device_disable(child); break; } /* * Get the device's resource settings and attach them. * Note that if the device has _PRS but no _CRS, we need * to decide when it's appropriate to try to configure the * device. Ignore the return value here; it's OK for the * device not to have any resources. */ acpi_parse_resources(child, handle, &acpi_res_parse_set, NULL); ad = device_get_ivars(child); ad->ad_cls_class = 0xffffff; if (ACPI_SUCCESS(AcpiGetObjectInfo(handle, &devinfo))) { if ((devinfo->Valid & ACPI_VALID_CLS) != 0 && devinfo->ClassCode.Length >= ACPI_PCICLS_STRING_SIZE) { ad->ad_cls_class = strtoul(devinfo->ClassCode.String, NULL, 16); } AcpiOsFree(devinfo); } break; } } return_ACPI_STATUS (AE_OK); } /* * AcpiAttachData() requires an object handler but never uses it. This is a * placeholder object handler so we can store a device_t in an ACPI_HANDLE. */ void acpi_fake_objhandler(ACPI_HANDLE h, void *data) { } static void acpi_shutdown_final(void *arg, int howto) { struct acpi_softc *sc = (struct acpi_softc *)arg; register_t intr; ACPI_STATUS status; /* * XXX Shutdown code should only run on the BSP (cpuid 0). * Some chipsets do not power off the system correctly if called from * an AP. */ if ((howto & RB_POWEROFF) != 0) { status = AcpiEnterSleepStatePrep(ACPI_STATE_S5); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n", AcpiFormatException(status)); return; } device_printf(sc->acpi_dev, "Powering system off\n"); intr = intr_disable(); status = AcpiEnterSleepState(ACPI_STATE_S5); if (ACPI_FAILURE(status)) { intr_restore(intr); device_printf(sc->acpi_dev, "power-off failed - %s\n", AcpiFormatException(status)); } else { DELAY(1000000); intr_restore(intr); device_printf(sc->acpi_dev, "power-off failed - timeout\n"); } } else if ((howto & RB_HALT) == 0 && sc->acpi_handle_reboot) { /* Reboot using the reset register. */ status = AcpiReset(); if (ACPI_SUCCESS(status)) { DELAY(1000000); device_printf(sc->acpi_dev, "reset failed - timeout\n"); } else if (status != AE_NOT_EXIST) device_printf(sc->acpi_dev, "reset failed - %s\n", AcpiFormatException(status)); } else if (sc->acpi_do_disable && !KERNEL_PANICKED()) { /* * Only disable ACPI if the user requested. On some systems, writing * the disable value to SMI_CMD hangs the system. */ device_printf(sc->acpi_dev, "Shutting down\n"); AcpiTerminate(); } } static void acpi_enable_fixed_events(struct acpi_softc *sc) { static int first_time = 1; /* Enable and clear fixed events and install handlers. */ if ((AcpiGbl_FADT.Flags & ACPI_FADT_POWER_BUTTON) == 0) { AcpiClearEvent(ACPI_EVENT_POWER_BUTTON); AcpiInstallFixedEventHandler(ACPI_EVENT_POWER_BUTTON, acpi_event_power_button_sleep, sc); if (first_time) device_printf(sc->acpi_dev, "Power Button (fixed)\n"); } if ((AcpiGbl_FADT.Flags & ACPI_FADT_SLEEP_BUTTON) == 0) { AcpiClearEvent(ACPI_EVENT_SLEEP_BUTTON); AcpiInstallFixedEventHandler(ACPI_EVENT_SLEEP_BUTTON, acpi_event_sleep_button_sleep, sc); if (first_time) device_printf(sc->acpi_dev, "Sleep Button (fixed)\n"); } first_time = 0; } /* * Returns true if the device is actually present and should * be attached to. This requires the present, enabled, UI-visible * and diagnostics-passed bits to be set. */ BOOLEAN acpi_DeviceIsPresent(device_t dev) { ACPI_HANDLE h; UINT32 s; ACPI_STATUS status; h = acpi_get_handle(dev); if (h == NULL) return (FALSE); #ifdef ACPI_EARLY_EPYC_WAR /* * Certain Treadripper boards always returns 0 for FreeBSD because it * only returns non-zero for the OS string "Windows 2015". Otherwise it * will return zero. Force them to always be treated as present. * Beata versions were worse: they always returned 0. */ if (acpi_MatchHid(h, "AMDI0020") || acpi_MatchHid(h, "AMDI0010")) return (TRUE); #endif status = acpi_GetInteger(h, "_STA", &s); /* * If no _STA method or if it failed, then assume that * the device is present. */ if (ACPI_FAILURE(status)) return (TRUE); return (ACPI_DEVICE_PRESENT(s) ? TRUE : FALSE); } /* * Returns true if the battery is actually present and inserted. */ BOOLEAN acpi_BatteryIsPresent(device_t dev) { ACPI_HANDLE h; UINT32 s; ACPI_STATUS status; h = acpi_get_handle(dev); if (h == NULL) return (FALSE); status = acpi_GetInteger(h, "_STA", &s); /* * If no _STA method or if it failed, then assume that * the device is present. */ if (ACPI_FAILURE(status)) return (TRUE); return (ACPI_BATTERY_PRESENT(s) ? TRUE : FALSE); } /* * Returns true if a device has at least one valid device ID. */ BOOLEAN acpi_has_hid(ACPI_HANDLE h) { ACPI_DEVICE_INFO *devinfo; BOOLEAN ret; if (h == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return (FALSE); ret = FALSE; if ((devinfo->Valid & ACPI_VALID_HID) != 0) ret = TRUE; else if ((devinfo->Valid & ACPI_VALID_CID) != 0) if (devinfo->CompatibleIdList.Count > 0) ret = TRUE; AcpiOsFree(devinfo); return (ret); } /* * Match a HID string against a handle * returns ACPI_MATCHHID_HID if _HID match * ACPI_MATCHHID_CID if _CID match and not _HID match. * ACPI_MATCHHID_NOMATCH=0 if no match. */ int acpi_MatchHid(ACPI_HANDLE h, const char *hid) { ACPI_DEVICE_INFO *devinfo; BOOLEAN ret; int i; if (hid == NULL || h == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return (ACPI_MATCHHID_NOMATCH); ret = ACPI_MATCHHID_NOMATCH; if ((devinfo->Valid & ACPI_VALID_HID) != 0 && strcmp(hid, devinfo->HardwareId.String) == 0) ret = ACPI_MATCHHID_HID; else if ((devinfo->Valid & ACPI_VALID_CID) != 0) for (i = 0; i < devinfo->CompatibleIdList.Count; i++) { if (strcmp(hid, devinfo->CompatibleIdList.Ids[i].String) == 0) { ret = ACPI_MATCHHID_CID; break; } } AcpiOsFree(devinfo); return (ret); } /* * Return the handle of a named object within our scope, ie. that of (parent) * or one if its parents. */ ACPI_STATUS acpi_GetHandleInScope(ACPI_HANDLE parent, char *path, ACPI_HANDLE *result) { ACPI_HANDLE r; ACPI_STATUS status; /* Walk back up the tree to the root */ for (;;) { status = AcpiGetHandle(parent, path, &r); if (ACPI_SUCCESS(status)) { *result = r; return (AE_OK); } /* XXX Return error here? */ if (status != AE_NOT_FOUND) return (AE_OK); if (ACPI_FAILURE(AcpiGetParent(parent, &r))) return (AE_NOT_FOUND); parent = r; } } ACPI_STATUS acpi_GetProperty(device_t dev, ACPI_STRING propname, const ACPI_OBJECT **value) { device_t bus = device_get_parent(dev); return (ACPI_GET_PROPERTY(bus, dev, propname, value)); } /* * Allocate a buffer with a preset data size. */ ACPI_BUFFER * acpi_AllocBuffer(int size) { ACPI_BUFFER *buf; if ((buf = malloc(size + sizeof(*buf), M_ACPIDEV, M_NOWAIT)) == NULL) return (NULL); buf->Length = size; buf->Pointer = (void *)(buf + 1); return (buf); } ACPI_STATUS acpi_SetInteger(ACPI_HANDLE handle, char *path, UINT32 number) { ACPI_OBJECT arg1; ACPI_OBJECT_LIST args; arg1.Type = ACPI_TYPE_INTEGER; arg1.Integer.Value = number; args.Count = 1; args.Pointer = &arg1; return (AcpiEvaluateObject(handle, path, &args, NULL)); } /* * Evaluate a path that should return an integer. */ ACPI_STATUS acpi_GetInteger(ACPI_HANDLE handle, char *path, UINT32 *number) { ACPI_STATUS status; ACPI_BUFFER buf; ACPI_OBJECT param; if (handle == NULL) handle = ACPI_ROOT_OBJECT; /* * Assume that what we've been pointed at is an Integer object, or * a method that will return an Integer. */ buf.Pointer = ¶m; buf.Length = sizeof(param); status = AcpiEvaluateObject(handle, path, NULL, &buf); if (ACPI_SUCCESS(status)) { if (param.Type == ACPI_TYPE_INTEGER) *number = param.Integer.Value; else status = AE_TYPE; } /* * In some applications, a method that's expected to return an Integer * may instead return a Buffer (probably to simplify some internal * arithmetic). We'll try to fetch whatever it is, and if it's a Buffer, * convert it into an Integer as best we can. * * This is a hack. */ if (status == AE_BUFFER_OVERFLOW) { if ((buf.Pointer = AcpiOsAllocate(buf.Length)) == NULL) { status = AE_NO_MEMORY; } else { status = AcpiEvaluateObject(handle, path, NULL, &buf); if (ACPI_SUCCESS(status)) status = acpi_ConvertBufferToInteger(&buf, number); AcpiOsFree(buf.Pointer); } } return (status); } ACPI_STATUS acpi_ConvertBufferToInteger(ACPI_BUFFER *bufp, UINT32 *number) { ACPI_OBJECT *p; UINT8 *val; int i; p = (ACPI_OBJECT *)bufp->Pointer; if (p->Type == ACPI_TYPE_INTEGER) { *number = p->Integer.Value; return (AE_OK); } if (p->Type != ACPI_TYPE_BUFFER) return (AE_TYPE); if (p->Buffer.Length > sizeof(int)) return (AE_BAD_DATA); *number = 0; val = p->Buffer.Pointer; for (i = 0; i < p->Buffer.Length; i++) *number += val[i] << (i * 8); return (AE_OK); } /* * Iterate over the elements of an a package object, calling the supplied * function for each element. * * XXX possible enhancement might be to abort traversal on error. */ ACPI_STATUS acpi_ForeachPackageObject(ACPI_OBJECT *pkg, void (*func)(ACPI_OBJECT *comp, void *arg), void *arg) { ACPI_OBJECT *comp; int i; if (pkg == NULL || pkg->Type != ACPI_TYPE_PACKAGE) return (AE_BAD_PARAMETER); /* Iterate over components */ i = 0; comp = pkg->Package.Elements; for (; i < pkg->Package.Count; i++, comp++) func(comp, arg); return (AE_OK); } /* * Find the (index)th resource object in a set. */ ACPI_STATUS acpi_FindIndexedResource(ACPI_BUFFER *buf, int index, ACPI_RESOURCE **resp) { ACPI_RESOURCE *rp; int i; rp = (ACPI_RESOURCE *)buf->Pointer; i = index; while (i-- > 0) { /* Range check */ if (rp > (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length)) return (AE_BAD_PARAMETER); /* Check for terminator */ if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0) return (AE_NOT_FOUND); rp = ACPI_NEXT_RESOURCE(rp); } if (resp != NULL) *resp = rp; return (AE_OK); } /* * Append an ACPI_RESOURCE to an ACPI_BUFFER. * * Given a pointer to an ACPI_RESOURCE structure, expand the ACPI_BUFFER * provided to contain it. If the ACPI_BUFFER is empty, allocate a sensible * backing block. If the ACPI_RESOURCE is NULL, return an empty set of * resources. */ #define ACPI_INITIAL_RESOURCE_BUFFER_SIZE 512 ACPI_STATUS acpi_AppendBufferResource(ACPI_BUFFER *buf, ACPI_RESOURCE *res) { ACPI_RESOURCE *rp; void *newp; /* Initialise the buffer if necessary. */ if (buf->Pointer == NULL) { buf->Length = ACPI_INITIAL_RESOURCE_BUFFER_SIZE; if ((buf->Pointer = AcpiOsAllocate(buf->Length)) == NULL) return (AE_NO_MEMORY); rp = (ACPI_RESOURCE *)buf->Pointer; rp->Type = ACPI_RESOURCE_TYPE_END_TAG; rp->Length = ACPI_RS_SIZE_MIN; } if (res == NULL) return (AE_OK); /* * Scan the current buffer looking for the terminator. * This will either find the terminator or hit the end * of the buffer and return an error. */ rp = (ACPI_RESOURCE *)buf->Pointer; for (;;) { /* Range check, don't go outside the buffer */ if (rp >= (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length)) return (AE_BAD_PARAMETER); if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0) break; rp = ACPI_NEXT_RESOURCE(rp); } /* * Check the size of the buffer and expand if required. * * Required size is: * size of existing resources before terminator + * size of new resource and header + * size of terminator. * * Note that this loop should really only run once, unless * for some reason we are stuffing a *really* huge resource. */ while ((((u_int8_t *)rp - (u_int8_t *)buf->Pointer) + res->Length + ACPI_RS_SIZE_NO_DATA + ACPI_RS_SIZE_MIN) >= buf->Length) { if ((newp = AcpiOsAllocate(buf->Length * 2)) == NULL) return (AE_NO_MEMORY); bcopy(buf->Pointer, newp, buf->Length); rp = (ACPI_RESOURCE *)((u_int8_t *)newp + ((u_int8_t *)rp - (u_int8_t *)buf->Pointer)); AcpiOsFree(buf->Pointer); buf->Pointer = newp; buf->Length += buf->Length; } /* Insert the new resource. */ bcopy(res, rp, res->Length + ACPI_RS_SIZE_NO_DATA); /* And add the terminator. */ rp = ACPI_NEXT_RESOURCE(rp); rp->Type = ACPI_RESOURCE_TYPE_END_TAG; rp->Length = ACPI_RS_SIZE_MIN; return (AE_OK); } UINT64 acpi_DSMQuery(ACPI_HANDLE h, const uint8_t *uuid, int revision) { /* * ACPI spec 9.1.1 defines this. * * "Arg2: Function Index Represents a specific function whose meaning is * specific to the UUID and Revision ID. Function indices should start * with 1. Function number zero is a query function (see the special * return code defined below)." */ ACPI_BUFFER buf; ACPI_OBJECT *obj; UINT64 ret = 0; int i; if (!ACPI_SUCCESS(acpi_EvaluateDSM(h, uuid, revision, 0, NULL, &buf))) { ACPI_INFO(("Failed to enumerate DSM functions\n")); return (0); } obj = (ACPI_OBJECT *)buf.Pointer; KASSERT(obj, ("Object not allowed to be NULL\n")); /* * From ACPI 6.2 spec 9.1.1: * If Function Index = 0, a Buffer containing a function index bitfield. * Otherwise, the return value and type depends on the UUID and revision * ID (see below). */ switch (obj->Type) { case ACPI_TYPE_BUFFER: for (i = 0; i < MIN(obj->Buffer.Length, sizeof(ret)); i++) ret |= (((uint64_t)obj->Buffer.Pointer[i]) << (i * 8)); break; case ACPI_TYPE_INTEGER: ACPI_BIOS_WARNING((AE_INFO, "Possibly buggy BIOS with ACPI_TYPE_INTEGER for function enumeration\n")); ret = obj->Integer.Value; break; default: ACPI_WARNING((AE_INFO, "Unexpected return type %u\n", obj->Type)); }; AcpiOsFree(obj); return ret; } /* * DSM may return multiple types depending on the function. It is therefore * unsafe to use the typed evaluation. It is highly recommended that the caller * check the type of the returned object. */ ACPI_STATUS acpi_EvaluateDSM(ACPI_HANDLE handle, const uint8_t *uuid, int revision, UINT64 function, ACPI_OBJECT *package, ACPI_BUFFER *out_buf) { return (acpi_EvaluateDSMTyped(handle, uuid, revision, function, package, out_buf, ACPI_TYPE_ANY)); } ACPI_STATUS acpi_EvaluateDSMTyped(ACPI_HANDLE handle, const uint8_t *uuid, int revision, UINT64 function, ACPI_OBJECT *package, ACPI_BUFFER *out_buf, ACPI_OBJECT_TYPE type) { ACPI_OBJECT arg[4]; ACPI_OBJECT_LIST arglist; ACPI_BUFFER buf; ACPI_STATUS status; if (out_buf == NULL) return (AE_NO_MEMORY); arg[0].Type = ACPI_TYPE_BUFFER; arg[0].Buffer.Length = ACPI_UUID_LENGTH; arg[0].Buffer.Pointer = __DECONST(uint8_t *, uuid); arg[1].Type = ACPI_TYPE_INTEGER; arg[1].Integer.Value = revision; arg[2].Type = ACPI_TYPE_INTEGER; arg[2].Integer.Value = function; if (package) { arg[3] = *package; } else { arg[3].Type = ACPI_TYPE_PACKAGE; arg[3].Package.Count = 0; arg[3].Package.Elements = NULL; } arglist.Pointer = arg; arglist.Count = 4; buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObjectTyped(handle, "_DSM", &arglist, &buf, type); if (ACPI_FAILURE(status)) return (status); KASSERT(ACPI_SUCCESS(status), ("Unexpected status")); *out_buf = buf; return (status); } ACPI_STATUS acpi_EvaluateOSC(ACPI_HANDLE handle, uint8_t *uuid, int revision, int count, uint32_t *caps_in, uint32_t *caps_out, bool query) { ACPI_OBJECT arg[4], *ret; ACPI_OBJECT_LIST arglist; ACPI_BUFFER buf; ACPI_STATUS status; arglist.Pointer = arg; arglist.Count = 4; arg[0].Type = ACPI_TYPE_BUFFER; arg[0].Buffer.Length = ACPI_UUID_LENGTH; arg[0].Buffer.Pointer = uuid; arg[1].Type = ACPI_TYPE_INTEGER; arg[1].Integer.Value = revision; arg[2].Type = ACPI_TYPE_INTEGER; arg[2].Integer.Value = count; arg[3].Type = ACPI_TYPE_BUFFER; arg[3].Buffer.Length = count * sizeof(*caps_in); arg[3].Buffer.Pointer = (uint8_t *)caps_in; caps_in[0] = query ? 1 : 0; buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObjectTyped(handle, "_OSC", &arglist, &buf, ACPI_TYPE_BUFFER); if (ACPI_FAILURE(status)) return (status); if (caps_out != NULL) { ret = buf.Pointer; if (ret->Buffer.Length != count * sizeof(*caps_out)) { AcpiOsFree(buf.Pointer); return (AE_BUFFER_OVERFLOW); } bcopy(ret->Buffer.Pointer, caps_out, ret->Buffer.Length); } AcpiOsFree(buf.Pointer); return (status); } /* * Set interrupt model. */ ACPI_STATUS acpi_SetIntrModel(int model) { return (acpi_SetInteger(ACPI_ROOT_OBJECT, "_PIC", model)); } /* * Walk subtables of a table and call a callback routine for each * subtable. The caller should provide the first subtable and a * pointer to the end of the table. This can be used to walk tables * such as MADT and SRAT that use subtable entries. */ void acpi_walk_subtables(void *first, void *end, acpi_subtable_handler *handler, void *arg) { ACPI_SUBTABLE_HEADER *entry; for (entry = first; (void *)entry < end; ) { /* Avoid an infinite loop if we hit a bogus entry. */ if (entry->Length < sizeof(ACPI_SUBTABLE_HEADER)) return; handler(entry, arg); entry = ACPI_ADD_PTR(ACPI_SUBTABLE_HEADER, entry, entry->Length); } } /* * DEPRECATED. This interface has serious deficiencies and will be * removed. * * Immediately enter the sleep state. In the old model, acpiconf(8) ran * rc.suspend and rc.resume so we don't have to notify devd(8) to do this. */ ACPI_STATUS acpi_SetSleepState(struct acpi_softc *sc, int state) { static int once; if (!once) { device_printf(sc->acpi_dev, "warning: acpi_SetSleepState() deprecated, need to update your software\n"); once = 1; } return (acpi_EnterSleepState(sc, state)); } #if defined(__amd64__) || defined(__i386__) static void acpi_sleep_force_task(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate))) device_printf(sc->acpi_dev, "force sleep state S%d failed\n", sc->acpi_next_sstate); } static void acpi_sleep_force(void *arg) { struct acpi_softc *sc = (struct acpi_softc *)arg; device_printf(sc->acpi_dev, "suspend request timed out, forcing sleep now\n"); /* * XXX Suspending from callout causes freezes in DEVICE_SUSPEND(). * Suspend from acpi_task thread instead. */ if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_sleep_force_task, sc))) device_printf(sc->acpi_dev, "AcpiOsExecute() for sleeping failed\n"); } #endif /* * Request that the system enter the given suspend state. All /dev/apm * devices and devd(8) will be notified. Userland then has a chance to * save state and acknowledge the request. The system sleeps once all * acks are in. */ int acpi_ReqSleepState(struct acpi_softc *sc, int state) { #if defined(__amd64__) || defined(__i386__) struct apm_clone_data *clone; ACPI_STATUS status; if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX) return (EINVAL); if (!acpi_sleep_states[state]) return (EOPNOTSUPP); /* * If a reboot/shutdown/suspend request is already in progress or * suspend is blocked due to an upcoming shutdown, just return. */ if (rebooting || sc->acpi_next_sstate != 0 || suspend_blocked) { return (0); } /* Wait until sleep is enabled. */ while (sc->acpi_sleep_disabled) { AcpiOsSleep(1000); } ACPI_LOCK(acpi); sc->acpi_next_sstate = state; /* S5 (soft-off) should be entered directly with no waiting. */ if (state == ACPI_STATE_S5) { ACPI_UNLOCK(acpi); status = acpi_EnterSleepState(sc, state); return (ACPI_SUCCESS(status) ? 0 : ENXIO); } /* Record the pending state and notify all apm devices. */ STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) { clone->notify_status = APM_EV_NONE; if ((clone->flags & ACPI_EVF_DEVD) == 0) { selwakeuppri(&clone->sel_read, PZERO); KNOTE_LOCKED(&clone->sel_read.si_note, 0); } } /* If devd(8) is not running, immediately enter the sleep state. */ if (!devctl_process_running()) { ACPI_UNLOCK(acpi); status = acpi_EnterSleepState(sc, state); return (ACPI_SUCCESS(status) ? 0 : ENXIO); } /* * Set a timeout to fire if userland doesn't ack the suspend request * in time. This way we still eventually go to sleep if we were * overheating or running low on battery, even if userland is hung. * We cancel this timeout once all userland acks are in or the * suspend request is aborted. */ callout_reset(&sc->susp_force_to, 10 * hz, acpi_sleep_force, sc); ACPI_UNLOCK(acpi); /* Now notify devd(8) also. */ acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, state); return (0); #else /* This platform does not support acpi suspend/resume. */ return (EOPNOTSUPP); #endif } /* * Acknowledge (or reject) a pending sleep state. The caller has * prepared for suspend and is now ready for it to proceed. If the * error argument is non-zero, it indicates suspend should be cancelled * and gives an errno value describing why. Once all votes are in, * we suspend the system. */ int acpi_AckSleepState(struct apm_clone_data *clone, int error) { #if defined(__amd64__) || defined(__i386__) struct acpi_softc *sc; int ret, sleeping; /* If no pending sleep state, return an error. */ ACPI_LOCK(acpi); sc = clone->acpi_sc; if (sc->acpi_next_sstate == 0) { ACPI_UNLOCK(acpi); return (ENXIO); } /* Caller wants to abort suspend process. */ if (error) { sc->acpi_next_sstate = 0; callout_stop(&sc->susp_force_to); device_printf(sc->acpi_dev, "listener on %s cancelled the pending suspend\n", devtoname(clone->cdev)); ACPI_UNLOCK(acpi); return (0); } /* * Mark this device as acking the suspend request. Then, walk through * all devices, seeing if they agree yet. We only count devices that * are writable since read-only devices couldn't ack the request. */ sleeping = TRUE; clone->notify_status = APM_EV_ACKED; STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) { if ((clone->flags & ACPI_EVF_WRITE) != 0 && clone->notify_status != APM_EV_ACKED) { sleeping = FALSE; break; } } /* If all devices have voted "yes", we will suspend now. */ if (sleeping) callout_stop(&sc->susp_force_to); ACPI_UNLOCK(acpi); ret = 0; if (sleeping) { if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate))) ret = ENODEV; } return (ret); #else /* This platform does not support acpi suspend/resume. */ return (EOPNOTSUPP); #endif } static void acpi_sleep_enable(void *arg) { struct acpi_softc *sc = (struct acpi_softc *)arg; ACPI_LOCK_ASSERT(acpi); /* Reschedule if the system is not fully up and running. */ if (!AcpiGbl_SystemAwakeAndRunning) { callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME); return; } sc->acpi_sleep_disabled = FALSE; } static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc) { ACPI_STATUS status; /* Fail if the system is not fully up and running. */ if (!AcpiGbl_SystemAwakeAndRunning) return (AE_ERROR); ACPI_LOCK(acpi); status = sc->acpi_sleep_disabled ? AE_ERROR : AE_OK; sc->acpi_sleep_disabled = TRUE; ACPI_UNLOCK(acpi); return (status); } enum acpi_sleep_state { ACPI_SS_NONE, ACPI_SS_GPE_SET, ACPI_SS_DEV_SUSPEND, ACPI_SS_SLP_PREP, ACPI_SS_SLEPT, }; /* * Enter the desired system sleep state. * * Currently we support S1-S5 but S4 is only S4BIOS */ static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state) { register_t intr; ACPI_STATUS status; ACPI_EVENT_STATUS power_button_status; enum acpi_sleep_state slp_state; int sleep_result; ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX) return_ACPI_STATUS (AE_BAD_PARAMETER); if (!acpi_sleep_states[state]) { device_printf(sc->acpi_dev, "Sleep state S%d not supported by BIOS\n", state); return (AE_SUPPORT); } /* Re-entry once we're suspending is not allowed. */ status = acpi_sleep_disable(sc); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "suspend request ignored (not ready yet)\n"); return (status); } if (state == ACPI_STATE_S5) { /* * Shut down cleanly and power off. This will call us back through the * shutdown handlers. */ shutdown_nice(RB_POWEROFF); return_ACPI_STATUS (AE_OK); } EVENTHANDLER_INVOKE(power_suspend_early); stop_all_proc(); suspend_all_fs(); EVENTHANDLER_INVOKE(power_suspend); #ifdef EARLY_AP_STARTUP MPASS(mp_ncpus == 1 || smp_started); thread_lock(curthread); sched_bind(curthread, 0); thread_unlock(curthread); #else if (smp_started) { thread_lock(curthread); sched_bind(curthread, 0); thread_unlock(curthread); } #endif /* * Be sure to hold Giant across DEVICE_SUSPEND/RESUME */ bus_topo_lock(); slp_state = ACPI_SS_NONE; sc->acpi_sstate = state; /* Enable any GPEs as appropriate and requested by the user. */ acpi_wake_prep_walk(state); slp_state = ACPI_SS_GPE_SET; /* * Inform all devices that we are going to sleep. If at least one * device fails, DEVICE_SUSPEND() automatically resumes the tree. * * XXX Note that a better two-pass approach with a 'veto' pass * followed by a "real thing" pass would be better, but the current * bus interface does not provide for this. */ if (DEVICE_SUSPEND(root_bus) != 0) { device_printf(sc->acpi_dev, "device_suspend failed\n"); goto backout; } slp_state = ACPI_SS_DEV_SUSPEND; status = AcpiEnterSleepStatePrep(state); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n", AcpiFormatException(status)); goto backout; } slp_state = ACPI_SS_SLP_PREP; if (sc->acpi_sleep_delay > 0) DELAY(sc->acpi_sleep_delay * 1000000); suspendclock(); intr = intr_disable(); if (state != ACPI_STATE_S1) { sleep_result = acpi_sleep_machdep(sc, state); acpi_wakeup_machdep(sc, state, sleep_result, 0); /* * XXX According to ACPI specification SCI_EN bit should be restored * by ACPI platform (BIOS, firmware) to its pre-sleep state. * Unfortunately some BIOSes fail to do that and that leads to * unexpected and serious consequences during wake up like a system * getting stuck in SMI handlers. * This hack is picked up from Linux, which claims that it follows * Windows behavior. */ if (sleep_result == 1 && state != ACPI_STATE_S4) AcpiWriteBitRegister(ACPI_BITREG_SCI_ENABLE, ACPI_ENABLE_EVENT); if (sleep_result == 1 && state == ACPI_STATE_S3) { /* * Prevent mis-interpretation of the wakeup by power button * as a request for power off. * Ideally we should post an appropriate wakeup event, * perhaps using acpi_event_power_button_wake or alike. * * Clearing of power button status after wakeup is mandated * by ACPI specification in section "Fixed Power Button". * * XXX As of ACPICA 20121114 AcpiGetEventStatus provides * status as 0/1 corressponding to inactive/active despite * its type being ACPI_EVENT_STATUS. In other words, * we should not test for ACPI_EVENT_FLAG_SET for time being. */ if (ACPI_SUCCESS(AcpiGetEventStatus(ACPI_EVENT_POWER_BUTTON, &power_button_status)) && power_button_status != 0) { AcpiClearEvent(ACPI_EVENT_POWER_BUTTON); device_printf(sc->acpi_dev, "cleared fixed power button status\n"); } } intr_restore(intr); /* call acpi_wakeup_machdep() again with interrupt enabled */ acpi_wakeup_machdep(sc, state, sleep_result, 1); AcpiLeaveSleepStatePrep(state); if (sleep_result == -1) goto backout; /* Re-enable ACPI hardware on wakeup from sleep state 4. */ if (state == ACPI_STATE_S4) AcpiEnable(); } else { status = AcpiEnterSleepState(state); intr_restore(intr); AcpiLeaveSleepStatePrep(state); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n", AcpiFormatException(status)); goto backout; } } slp_state = ACPI_SS_SLEPT; /* * Back out state according to how far along we got in the suspend * process. This handles both the error and success cases. */ backout: if (slp_state >= ACPI_SS_SLP_PREP) resumeclock(); if (slp_state >= ACPI_SS_GPE_SET) { acpi_wake_prep_walk(state); sc->acpi_sstate = ACPI_STATE_S0; } if (slp_state >= ACPI_SS_DEV_SUSPEND) DEVICE_RESUME(root_bus); if (slp_state >= ACPI_SS_SLP_PREP) AcpiLeaveSleepState(state); if (slp_state >= ACPI_SS_SLEPT) { #if defined(__i386__) || defined(__amd64__) /* NB: we are still using ACPI timecounter at this point. */ resume_TSC(); #endif acpi_resync_clock(sc); acpi_enable_fixed_events(sc); } sc->acpi_next_sstate = 0; bus_topo_unlock(); #ifdef EARLY_AP_STARTUP thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); #else if (smp_started) { thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); } #endif resume_all_fs(); resume_all_proc(); EVENTHANDLER_INVOKE(power_resume); /* Allow another sleep request after a while. */ callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME); /* Run /etc/rc.resume after we are back. */ if (devctl_process_running()) acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, state); return_ACPI_STATUS (status); } static void acpi_resync_clock(struct acpi_softc *sc) { /* * Warm up timecounter again and reset system clock. */ (void)timecounter->tc_get_timecount(timecounter); inittodr(time_second + sc->acpi_sleep_delay); } /* Enable or disable the device's wake GPE. */ int acpi_wake_set_enable(device_t dev, int enable) { struct acpi_prw_data prw; ACPI_STATUS status; int flags; /* Make sure the device supports waking the system and get the GPE. */ if (acpi_parse_prw(acpi_get_handle(dev), &prw) != 0) return (ENXIO); flags = acpi_get_flags(dev); if (enable) { status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_ENABLE); if (ACPI_FAILURE(status)) { device_printf(dev, "enable wake failed\n"); return (ENXIO); } acpi_set_flags(dev, flags | ACPI_FLAG_WAKE_ENABLED); } else { status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE); if (ACPI_FAILURE(status)) { device_printf(dev, "disable wake failed\n"); return (ENXIO); } acpi_set_flags(dev, flags & ~ACPI_FLAG_WAKE_ENABLED); } return (0); } static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate) { struct acpi_prw_data prw; device_t dev; /* Check that this is a wake-capable device and get its GPE. */ if (acpi_parse_prw(handle, &prw) != 0) return (ENXIO); dev = acpi_get_device(handle); /* * The destination sleep state must be less than (i.e., higher power) * or equal to the value specified by _PRW. If this GPE cannot be * enabled for the next sleep state, then disable it. If it can and * the user requested it be enabled, turn on any required power resources * and set _PSW. */ if (sstate > prw.lowest_wake) { AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE); if (bootverbose) device_printf(dev, "wake_prep disabled wake for %s (S%d)\n", acpi_name(handle), sstate); } else if (dev && (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) != 0) { acpi_pwr_wake_enable(handle, 1); acpi_SetInteger(handle, "_PSW", 1); if (bootverbose) device_printf(dev, "wake_prep enabled for %s (S%d)\n", acpi_name(handle), sstate); } return (0); } static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate) { struct acpi_prw_data prw; device_t dev; /* * Check that this is a wake-capable device and get its GPE. Return * now if the user didn't enable this device for wake. */ if (acpi_parse_prw(handle, &prw) != 0) return (ENXIO); dev = acpi_get_device(handle); if (dev == NULL || (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) == 0) return (0); /* * If this GPE couldn't be enabled for the previous sleep state, it was * disabled before going to sleep so re-enable it. If it was enabled, * clear _PSW and turn off any power resources it used. */ if (sstate > prw.lowest_wake) { AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_ENABLE); if (bootverbose) device_printf(dev, "run_prep re-enabled %s\n", acpi_name(handle)); } else { acpi_SetInteger(handle, "_PSW", 0); acpi_pwr_wake_enable(handle, 0); if (bootverbose) device_printf(dev, "run_prep cleaned up for %s\n", acpi_name(handle)); } return (0); } static ACPI_STATUS acpi_wake_prep(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { int sstate; /* If suspending, run the sleep prep function, otherwise wake. */ sstate = *(int *)context; if (AcpiGbl_SystemAwakeAndRunning) acpi_wake_sleep_prep(handle, sstate); else acpi_wake_run_prep(handle, sstate); return (AE_OK); } /* Walk the tree rooted at acpi0 to prep devices for suspend/resume. */ static int acpi_wake_prep_walk(int sstate) { ACPI_HANDLE sb_handle; if (ACPI_SUCCESS(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle))) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle, 100, acpi_wake_prep, NULL, &sstate, NULL); return (0); } /* Walk the tree rooted at acpi0 to attach per-device wake sysctls. */ static int acpi_wake_sysctl_walk(device_t dev) { int error, i, numdevs; device_t *devlist; device_t child; ACPI_STATUS status; error = device_get_children(dev, &devlist, &numdevs); if (error != 0 || numdevs == 0) { if (numdevs == 0) free(devlist, M_TEMP); return (error); } for (i = 0; i < numdevs; i++) { child = devlist[i]; acpi_wake_sysctl_walk(child); if (!device_is_attached(child)) continue; status = AcpiEvaluateObject(acpi_get_handle(child), "_PRW", NULL, NULL); if (ACPI_SUCCESS(status)) { SYSCTL_ADD_PROC(device_get_sysctl_ctx(child), SYSCTL_CHILDREN(device_get_sysctl_tree(child)), OID_AUTO, "wake", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, child, 0, acpi_wake_set_sysctl, "I", "Device set to wake the system"); } } free(devlist, M_TEMP); return (0); } /* Enable or disable wake from userland. */ static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS) { int enable, error; device_t dev; dev = (device_t)arg1; enable = (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) ? 1 : 0; error = sysctl_handle_int(oidp, &enable, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (enable != 0 && enable != 1) return (EINVAL); return (acpi_wake_set_enable(dev, enable)); } /* Parse a device's _PRW into a structure. */ int acpi_parse_prw(ACPI_HANDLE h, struct acpi_prw_data *prw) { ACPI_STATUS status; ACPI_BUFFER prw_buffer; ACPI_OBJECT *res, *res2; int error, i, power_count; if (h == NULL || prw == NULL) return (EINVAL); /* * The _PRW object (7.2.9) is only required for devices that have the * ability to wake the system from a sleeping state. */ error = EINVAL; prw_buffer.Pointer = NULL; prw_buffer.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObject(h, "_PRW", NULL, &prw_buffer); if (ACPI_FAILURE(status)) return (ENOENT); res = (ACPI_OBJECT *)prw_buffer.Pointer; if (res == NULL) return (ENOENT); if (!ACPI_PKG_VALID(res, 2)) goto out; /* * Element 1 of the _PRW object: * The lowest power system sleeping state that can be entered while still * providing wake functionality. The sleeping state being entered must * be less than (i.e., higher power) or equal to this value. */ if (acpi_PkgInt32(res, 1, &prw->lowest_wake) != 0) goto out; /* * Element 0 of the _PRW object: */ switch (res->Package.Elements[0].Type) { case ACPI_TYPE_INTEGER: /* * If the data type of this package element is numeric, then this * _PRW package element is the bit index in the GPEx_EN, in the * GPE blocks described in the FADT, of the enable bit that is * enabled for the wake event. */ prw->gpe_handle = NULL; prw->gpe_bit = res->Package.Elements[0].Integer.Value; error = 0; break; case ACPI_TYPE_PACKAGE: /* * If the data type of this package element is a package, then this * _PRW package element is itself a package containing two * elements. The first is an object reference to the GPE Block * device that contains the GPE that will be triggered by the wake * event. The second element is numeric and it contains the bit * index in the GPEx_EN, in the GPE Block referenced by the * first element in the package, of the enable bit that is enabled for * the wake event. * * For example, if this field is a package then it is of the form: * Package() {\_SB.PCI0.ISA.GPE, 2} */ res2 = &res->Package.Elements[0]; if (!ACPI_PKG_VALID(res2, 2)) goto out; prw->gpe_handle = acpi_GetReference(NULL, &res2->Package.Elements[0]); if (prw->gpe_handle == NULL) goto out; if (acpi_PkgInt32(res2, 1, &prw->gpe_bit) != 0) goto out; error = 0; break; default: goto out; } /* Elements 2 to N of the _PRW object are power resources. */ power_count = res->Package.Count - 2; if (power_count > ACPI_PRW_MAX_POWERRES) { printf("ACPI device %s has too many power resources\n", acpi_name(h)); power_count = 0; } prw->power_res_count = power_count; for (i = 0; i < power_count; i++) prw->power_res[i] = res->Package.Elements[i]; out: if (prw_buffer.Pointer != NULL) AcpiOsFree(prw_buffer.Pointer); return (error); } /* * ACPI Event Handlers */ /* System Event Handlers (registered by EVENTHANDLER_REGISTER) */ static void acpi_system_eventhandler_sleep(void *arg, int state) { struct acpi_softc *sc = (struct acpi_softc *)arg; int ret; ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); /* Check if button action is disabled or unknown. */ if (state == ACPI_STATE_UNKNOWN) return; /* Request that the system prepare to enter the given suspend state. */ ret = acpi_ReqSleepState(sc, state); if (ret != 0) device_printf(sc->acpi_dev, "request to enter state S%d failed (err %d)\n", state, ret); return_VOID; } static void acpi_system_eventhandler_wakeup(void *arg, int state) { ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); /* Currently, nothing to do for wakeup. */ return_VOID; } /* * ACPICA Event Handlers (FixedEvent, also called from button notify handler) */ static void acpi_invoke_sleep_eventhandler(void *context) { EVENTHANDLER_INVOKE(acpi_sleep_event, *(int *)context); } static void acpi_invoke_wake_eventhandler(void *context) { EVENTHANDLER_INVOKE(acpi_wakeup_event, *(int *)context); } UINT32 acpi_event_power_button_sleep(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_sleep_eventhandler, &sc->acpi_power_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_power_button_wake(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_wake_eventhandler, &sc->acpi_power_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_sleep_button_sleep(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_sleep_eventhandler, &sc->acpi_sleep_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_sleep_button_wake(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_wake_eventhandler, &sc->acpi_sleep_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } /* * XXX This static buffer is suboptimal. There is no locking so only * use this for single-threaded callers. */ char * acpi_name(ACPI_HANDLE handle) { ACPI_BUFFER buf; static char data[256]; buf.Length = sizeof(data); buf.Pointer = data; if (handle && ACPI_SUCCESS(AcpiGetName(handle, ACPI_FULL_PATHNAME, &buf))) return (data); return ("(unknown)"); } /* * Debugging/bug-avoidance. Avoid trying to fetch info on various * parts of the namespace. */ int acpi_avoid(ACPI_HANDLE handle) { char *cp, *env, *np; int len; np = acpi_name(handle); if (*np == '\\') np++; if ((env = kern_getenv("debug.acpi.avoid")) == NULL) return (0); /* Scan the avoid list checking for a match */ cp = env; for (;;) { while (*cp != 0 && isspace(*cp)) cp++; if (*cp == 0) break; len = 0; while (cp[len] != 0 && !isspace(cp[len])) len++; if (!strncmp(cp, np, len)) { freeenv(env); return(1); } cp += len; } freeenv(env); return (0); } /* * Debugging/bug-avoidance. Disable ACPI subsystem components. */ int acpi_disabled(char *subsys) { char *cp, *env; int len; if ((env = kern_getenv("debug.acpi.disabled")) == NULL) return (0); if (strcmp(env, "all") == 0) { freeenv(env); return (1); } /* Scan the disable list, checking for a match. */ cp = env; for (;;) { while (*cp != '\0' && isspace(*cp)) cp++; if (*cp == '\0') break; len = 0; while (cp[len] != '\0' && !isspace(cp[len])) len++; if (strncmp(cp, subsys, len) == 0) { freeenv(env); return (1); } cp += len; } freeenv(env); return (0); } static void acpi_lookup(void *arg, const char *name, device_t *dev) { ACPI_HANDLE handle; if (*dev != NULL) return; /* * Allow any handle name that is specified as an absolute path and * starts with '\'. We could restrict this to \_SB and friends, * but see acpi_probe_children() for notes on why we scan the entire * namespace for devices. * * XXX: The pathname argument to AcpiGetHandle() should be fixed to * be const. */ if (name[0] != '\\') return; if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, __DECONST(char *, name), &handle))) return; *dev = acpi_get_device(handle); } /* * Control interface. * * We multiplex ioctls for all participating ACPI devices here. Individual * drivers wanting to be accessible via /dev/acpi should use the * register/deregister interface to make their handlers visible. */ struct acpi_ioctl_hook { TAILQ_ENTRY(acpi_ioctl_hook) link; u_long cmd; acpi_ioctl_fn fn; void *arg; }; static TAILQ_HEAD(,acpi_ioctl_hook) acpi_ioctl_hooks; static int acpi_ioctl_hooks_initted; int acpi_register_ioctl(u_long cmd, acpi_ioctl_fn fn, void *arg) { struct acpi_ioctl_hook *hp; if ((hp = malloc(sizeof(*hp), M_ACPIDEV, M_NOWAIT)) == NULL) return (ENOMEM); hp->cmd = cmd; hp->fn = fn; hp->arg = arg; ACPI_LOCK(acpi); if (acpi_ioctl_hooks_initted == 0) { TAILQ_INIT(&acpi_ioctl_hooks); acpi_ioctl_hooks_initted = 1; } TAILQ_INSERT_TAIL(&acpi_ioctl_hooks, hp, link); ACPI_UNLOCK(acpi); return (0); } void acpi_deregister_ioctl(u_long cmd, acpi_ioctl_fn fn) { struct acpi_ioctl_hook *hp; ACPI_LOCK(acpi); TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) if (hp->cmd == cmd && hp->fn == fn) break; if (hp != NULL) { TAILQ_REMOVE(&acpi_ioctl_hooks, hp, link); free(hp, M_ACPIDEV); } ACPI_UNLOCK(acpi); } static int acpiopen(struct cdev *dev, int flag, int fmt, struct thread *td) { return (0); } static int acpiclose(struct cdev *dev, int flag, int fmt, struct thread *td) { return (0); } static int acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct acpi_softc *sc; struct acpi_ioctl_hook *hp; int error, state; error = 0; hp = NULL; sc = dev->si_drv1; /* * Scan the list of registered ioctls, looking for handlers. */ ACPI_LOCK(acpi); if (acpi_ioctl_hooks_initted) TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) { if (hp->cmd == cmd) break; } ACPI_UNLOCK(acpi); if (hp) return (hp->fn(cmd, addr, hp->arg)); /* * Core ioctls are not permitted for non-writable user. * Currently, other ioctls just fetch information. * Not changing system behavior. */ if ((flag & FWRITE) == 0) return (EPERM); /* Core system ioctls. */ switch (cmd) { case ACPIIO_REQSLPSTATE: state = *(int *)addr; if (state != ACPI_STATE_S5) return (acpi_ReqSleepState(sc, state)); device_printf(sc->acpi_dev, "power off via acpi ioctl not supported\n"); error = EOPNOTSUPP; break; case ACPIIO_ACKSLPSTATE: error = *(int *)addr; error = acpi_AckSleepState(sc->acpi_clone, error); break; case ACPIIO_SETSLPSTATE: /* DEPRECATED */ state = *(int *)addr; if (state < ACPI_STATE_S0 || state > ACPI_S_STATES_MAX) return (EINVAL); if (!acpi_sleep_states[state]) return (EOPNOTSUPP); if (ACPI_FAILURE(acpi_SetSleepState(sc, state))) error = ENXIO; break; default: error = ENXIO; break; } return (error); } static int acpi_sname2sstate(const char *sname) { int sstate; if (toupper(sname[0]) == 'S') { sstate = sname[1] - '0'; if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5 && sname[2] == '\0') return (sstate); } else if (strcasecmp(sname, "NONE") == 0) return (ACPI_STATE_UNKNOWN); return (-1); } static const char * acpi_sstate2sname(int sstate) { static const char *snames[] = { "S0", "S1", "S2", "S3", "S4", "S5" }; if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5) return (snames[sstate]); else if (sstate == ACPI_STATE_UNKNOWN) return ("NONE"); return (NULL); } static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS) { int error; struct sbuf sb; UINT8 state; sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++) if (acpi_sleep_states[state]) sbuf_printf(&sb, "%s ", acpi_sstate2sname(state)); sbuf_trim(&sb); sbuf_finish(&sb); error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); sbuf_delete(&sb); return (error); } static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS) { char sleep_state[10]; int error, new_state, old_state; old_state = *(int *)oidp->oid_arg1; strlcpy(sleep_state, acpi_sstate2sname(old_state), sizeof(sleep_state)); error = sysctl_handle_string(oidp, sleep_state, sizeof(sleep_state), req); if (error == 0 && req->newptr != NULL) { new_state = acpi_sname2sstate(sleep_state); if (new_state < ACPI_STATE_S1) return (EINVAL); if (new_state < ACPI_S_STATE_COUNT && !acpi_sleep_states[new_state]) return (EOPNOTSUPP); if (new_state != old_state) *(int *)oidp->oid_arg1 = new_state; } return (error); } /* Inform devctl(4) when we receive a Notify. */ void acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify) { char notify_buf[16]; ACPI_BUFFER handle_buf; ACPI_STATUS status; if (subsystem == NULL) return; handle_buf.Pointer = NULL; handle_buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiNsHandleToPathname(h, &handle_buf, FALSE); if (ACPI_FAILURE(status)) return; snprintf(notify_buf, sizeof(notify_buf), "notify=0x%02x", notify); devctl_notify("ACPI", subsystem, handle_buf.Pointer, notify_buf); AcpiOsFree(handle_buf.Pointer); } #ifdef ACPI_DEBUG /* * Support for parsing debug options from the kernel environment. * * Bits may be set in the AcpiDbgLayer and AcpiDbgLevel debug registers * by specifying the names of the bits in the debug.acpi.layer and * debug.acpi.level environment variables. Bits may be unset by * prefixing the bit name with !. */ struct debugtag { char *name; UINT32 value; }; static struct debugtag dbg_layer[] = { {"ACPI_UTILITIES", ACPI_UTILITIES}, {"ACPI_HARDWARE", ACPI_HARDWARE}, {"ACPI_EVENTS", ACPI_EVENTS}, {"ACPI_TABLES", ACPI_TABLES}, {"ACPI_NAMESPACE", ACPI_NAMESPACE}, {"ACPI_PARSER", ACPI_PARSER}, {"ACPI_DISPATCHER", ACPI_DISPATCHER}, {"ACPI_EXECUTER", ACPI_EXECUTER}, {"ACPI_RESOURCES", ACPI_RESOURCES}, {"ACPI_CA_DEBUGGER", ACPI_CA_DEBUGGER}, {"ACPI_OS_SERVICES", ACPI_OS_SERVICES}, {"ACPI_CA_DISASSEMBLER", ACPI_CA_DISASSEMBLER}, {"ACPI_ALL_COMPONENTS", ACPI_ALL_COMPONENTS}, {"ACPI_AC_ADAPTER", ACPI_AC_ADAPTER}, {"ACPI_BATTERY", ACPI_BATTERY}, {"ACPI_BUS", ACPI_BUS}, {"ACPI_BUTTON", ACPI_BUTTON}, {"ACPI_EC", ACPI_EC}, {"ACPI_FAN", ACPI_FAN}, {"ACPI_POWERRES", ACPI_POWERRES}, {"ACPI_PROCESSOR", ACPI_PROCESSOR}, {"ACPI_THERMAL", ACPI_THERMAL}, {"ACPI_TIMER", ACPI_TIMER}, {"ACPI_ALL_DRIVERS", ACPI_ALL_DRIVERS}, {NULL, 0} }; static struct debugtag dbg_level[] = { {"ACPI_LV_INIT", ACPI_LV_INIT}, {"ACPI_LV_DEBUG_OBJECT", ACPI_LV_DEBUG_OBJECT}, {"ACPI_LV_INFO", ACPI_LV_INFO}, {"ACPI_LV_REPAIR", ACPI_LV_REPAIR}, {"ACPI_LV_ALL_EXCEPTIONS", ACPI_LV_ALL_EXCEPTIONS}, /* Trace verbosity level 1 [Standard Trace Level] */ {"ACPI_LV_INIT_NAMES", ACPI_LV_INIT_NAMES}, {"ACPI_LV_PARSE", ACPI_LV_PARSE}, {"ACPI_LV_LOAD", ACPI_LV_LOAD}, {"ACPI_LV_DISPATCH", ACPI_LV_DISPATCH}, {"ACPI_LV_EXEC", ACPI_LV_EXEC}, {"ACPI_LV_NAMES", ACPI_LV_NAMES}, {"ACPI_LV_OPREGION", ACPI_LV_OPREGION}, {"ACPI_LV_BFIELD", ACPI_LV_BFIELD}, {"ACPI_LV_TABLES", ACPI_LV_TABLES}, {"ACPI_LV_VALUES", ACPI_LV_VALUES}, {"ACPI_LV_OBJECTS", ACPI_LV_OBJECTS}, {"ACPI_LV_RESOURCES", ACPI_LV_RESOURCES}, {"ACPI_LV_USER_REQUESTS", ACPI_LV_USER_REQUESTS}, {"ACPI_LV_PACKAGE", ACPI_LV_PACKAGE}, {"ACPI_LV_VERBOSITY1", ACPI_LV_VERBOSITY1}, /* Trace verbosity level 2 [Function tracing and memory allocation] */ {"ACPI_LV_ALLOCATIONS", ACPI_LV_ALLOCATIONS}, {"ACPI_LV_FUNCTIONS", ACPI_LV_FUNCTIONS}, {"ACPI_LV_OPTIMIZATIONS", ACPI_LV_OPTIMIZATIONS}, {"ACPI_LV_VERBOSITY2", ACPI_LV_VERBOSITY2}, {"ACPI_LV_ALL", ACPI_LV_ALL}, /* Trace verbosity level 3 [Threading, I/O, and Interrupts] */ {"ACPI_LV_MUTEX", ACPI_LV_MUTEX}, {"ACPI_LV_THREADS", ACPI_LV_THREADS}, {"ACPI_LV_IO", ACPI_LV_IO}, {"ACPI_LV_INTERRUPTS", ACPI_LV_INTERRUPTS}, {"ACPI_LV_VERBOSITY3", ACPI_LV_VERBOSITY3}, /* Exceptionally verbose output -- also used in the global "DebugLevel" */ {"ACPI_LV_AML_DISASSEMBLE", ACPI_LV_AML_DISASSEMBLE}, {"ACPI_LV_VERBOSE_INFO", ACPI_LV_VERBOSE_INFO}, {"ACPI_LV_FULL_TABLES", ACPI_LV_FULL_TABLES}, {"ACPI_LV_EVENTS", ACPI_LV_EVENTS}, {"ACPI_LV_VERBOSE", ACPI_LV_VERBOSE}, {NULL, 0} }; static void acpi_parse_debug(char *cp, struct debugtag *tag, UINT32 *flag) { char *ep; int i, l; int set; while (*cp) { if (isspace(*cp)) { cp++; continue; } ep = cp; while (*ep && !isspace(*ep)) ep++; if (*cp == '!') { set = 0; cp++; if (cp == ep) continue; } else { set = 1; } l = ep - cp; for (i = 0; tag[i].name != NULL; i++) { if (!strncmp(cp, tag[i].name, l)) { if (set) *flag |= tag[i].value; else *flag &= ~tag[i].value; } } cp = ep; } } static void acpi_set_debugging(void *junk) { char *layer, *level; if (cold) { AcpiDbgLayer = 0; AcpiDbgLevel = 0; } layer = kern_getenv("debug.acpi.layer"); level = kern_getenv("debug.acpi.level"); if (layer == NULL && level == NULL) return; printf("ACPI set debug"); if (layer != NULL) { if (strcmp("NONE", layer) != 0) printf(" layer '%s'", layer); acpi_parse_debug(layer, &dbg_layer[0], &AcpiDbgLayer); freeenv(layer); } if (level != NULL) { if (strcmp("NONE", level) != 0) printf(" level '%s'", level); acpi_parse_debug(level, &dbg_level[0], &AcpiDbgLevel); freeenv(level); } printf("\n"); } SYSINIT(acpi_debugging, SI_SUB_TUNABLES, SI_ORDER_ANY, acpi_set_debugging, NULL); static int acpi_debug_sysctl(SYSCTL_HANDLER_ARGS) { int error, *dbg; struct debugtag *tag; struct sbuf sb; char temp[128]; if (sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND) == NULL) return (ENOMEM); if (strcmp(oidp->oid_arg1, "debug.acpi.layer") == 0) { tag = &dbg_layer[0]; dbg = &AcpiDbgLayer; } else { tag = &dbg_level[0]; dbg = &AcpiDbgLevel; } /* Get old values if this is a get request. */ ACPI_SERIAL_BEGIN(acpi); if (*dbg == 0) { sbuf_cpy(&sb, "NONE"); } else if (req->newptr == NULL) { for (; tag->name != NULL; tag++) { if ((*dbg & tag->value) == tag->value) sbuf_printf(&sb, "%s ", tag->name); } } sbuf_trim(&sb); sbuf_finish(&sb); strlcpy(temp, sbuf_data(&sb), sizeof(temp)); sbuf_delete(&sb); error = sysctl_handle_string(oidp, temp, sizeof(temp), req); /* Check for error or no change */ if (error == 0 && req->newptr != NULL) { *dbg = 0; kern_setenv((char *)oidp->oid_arg1, temp); acpi_set_debugging(NULL); } ACPI_SERIAL_END(acpi); return (error); } SYSCTL_PROC(_debug_acpi, OID_AUTO, layer, CTLFLAG_RW | CTLTYPE_STRING | CTLFLAG_MPSAFE, "debug.acpi.layer", 0, acpi_debug_sysctl, "A", ""); SYSCTL_PROC(_debug_acpi, OID_AUTO, level, CTLFLAG_RW | CTLTYPE_STRING | CTLFLAG_MPSAFE, "debug.acpi.level", 0, acpi_debug_sysctl, "A", ""); #endif /* ACPI_DEBUG */ static int acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS) { int error; int old; old = acpi_debug_objects; error = sysctl_handle_int(oidp, &acpi_debug_objects, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (old == acpi_debug_objects || (old && acpi_debug_objects)) return (0); ACPI_SERIAL_BEGIN(acpi); AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE; ACPI_SERIAL_END(acpi); return (0); } static int acpi_parse_interfaces(char *str, struct acpi_interface *iface) { char *p; size_t len; int i, j; p = str; while (isspace(*p) || *p == ',') p++; len = strlen(p); if (len == 0) return (0); p = strdup(p, M_TEMP); for (i = 0; i < len; i++) if (p[i] == ',') p[i] = '\0'; i = j = 0; while (i < len) if (isspace(p[i]) || p[i] == '\0') i++; else { i += strlen(p + i) + 1; j++; } if (j == 0) { free(p, M_TEMP); return (0); } iface->data = malloc(sizeof(*iface->data) * j, M_TEMP, M_WAITOK); iface->num = j; i = j = 0; while (i < len) if (isspace(p[i]) || p[i] == '\0') i++; else { iface->data[j] = p + i; i += strlen(p + i) + 1; j++; } return (j); } static void acpi_free_interfaces(struct acpi_interface *iface) { free(iface->data[0], M_TEMP); free(iface->data, M_TEMP); } static void acpi_reset_interfaces(device_t dev) { struct acpi_interface list; ACPI_STATUS status; int i; if (acpi_parse_interfaces(acpi_install_interface, &list) > 0) { for (i = 0; i < list.num; i++) { status = AcpiInstallInterface(list.data[i]); if (ACPI_FAILURE(status)) device_printf(dev, "failed to install _OSI(\"%s\"): %s\n", list.data[i], AcpiFormatException(status)); else if (bootverbose) device_printf(dev, "installed _OSI(\"%s\")\n", list.data[i]); } acpi_free_interfaces(&list); } if (acpi_parse_interfaces(acpi_remove_interface, &list) > 0) { for (i = 0; i < list.num; i++) { status = AcpiRemoveInterface(list.data[i]); if (ACPI_FAILURE(status)) device_printf(dev, "failed to remove _OSI(\"%s\"): %s\n", list.data[i], AcpiFormatException(status)); else if (bootverbose) device_printf(dev, "removed _OSI(\"%s\")\n", list.data[i]); } acpi_free_interfaces(&list); } } static int acpi_pm_func(u_long cmd, void *arg, ...) { int state, acpi_state; int error; struct acpi_softc *sc; va_list ap; error = 0; switch (cmd) { case POWER_CMD_SUSPEND: sc = (struct acpi_softc *)arg; if (sc == NULL) { error = EINVAL; goto out; } va_start(ap, arg); state = va_arg(ap, int); va_end(ap); switch (state) { case POWER_SLEEP_STATE_STANDBY: acpi_state = sc->acpi_standby_sx; break; case POWER_SLEEP_STATE_SUSPEND: acpi_state = sc->acpi_suspend_sx; break; case POWER_SLEEP_STATE_HIBERNATE: acpi_state = ACPI_STATE_S4; break; default: error = EINVAL; goto out; } if (ACPI_FAILURE(acpi_EnterSleepState(sc, acpi_state))) error = ENXIO; break; default: error = EINVAL; goto out; } out: return (error); } static void acpi_pm_register(void *arg) { if (!cold || resource_disabled("acpi", 0)) return; power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL); } SYSINIT(power, SI_SUB_KLD, SI_ORDER_ANY, acpi_pm_register, NULL); diff --git a/sys/dev/acpica/acpi_pcib_acpi.c b/sys/dev/acpica/acpi_pcib_acpi.c index 451a8d8b736d..235670076dae 100644 --- a/sys/dev/acpica/acpi_pcib_acpi.c +++ b/sys/dev/acpica/acpi_pcib_acpi.c @@ -1,833 +1,832 @@ /*- * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_acpi.h" #include "opt_pci.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include /* Hooks for the ACPI CA debugging infrastructure. */ #define _COMPONENT ACPI_BUS ACPI_MODULE_NAME("PCI_ACPI") struct acpi_hpcib_softc { device_t ap_dev; ACPI_HANDLE ap_handle; bus_dma_tag_t ap_dma_tag; int ap_flags; uint32_t ap_osc_ctl; int ap_segment; /* PCI domain */ int ap_bus; /* bios-assigned bus number */ int ap_addr; /* device/func of PCI-Host bridge */ ACPI_BUFFER ap_prt; /* interrupt routing table */ #ifdef NEW_PCIB struct pcib_host_resources ap_host_res; #endif }; static int acpi_pcib_acpi_probe(device_t bus); static int acpi_pcib_acpi_attach(device_t bus); static int acpi_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); static int acpi_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value); static uint32_t acpi_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes); static void acpi_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes); static int acpi_pcib_acpi_route_interrupt(device_t pcib, device_t dev, int pin); static int acpi_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs); static int acpi_pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data); static int acpi_pcib_alloc_msix(device_t pcib, device_t dev, int *irq); static struct resource *acpi_pcib_acpi_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); #ifdef NEW_PCIB static int acpi_pcib_acpi_adjust_resource(device_t dev, - device_t child, int type, struct resource *r, + device_t child, struct resource *r, rman_res_t start, rman_res_t end); #ifdef PCI_RES_BUS static int acpi_pcib_acpi_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r); static int acpi_pcib_acpi_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r); static int acpi_pcib_acpi_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r); #endif #endif static int acpi_pcib_request_feature(device_t pcib, device_t dev, enum pci_feature feature); static bus_dma_tag_t acpi_pcib_get_dma_tag(device_t bus, device_t child); static device_method_t acpi_pcib_acpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_pcib_acpi_probe), DEVMETHOD(device_attach, acpi_pcib_acpi_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, acpi_pcib_read_ivar), DEVMETHOD(bus_write_ivar, acpi_pcib_write_ivar), DEVMETHOD(bus_alloc_resource, acpi_pcib_acpi_alloc_resource), #ifdef NEW_PCIB DEVMETHOD(bus_adjust_resource, acpi_pcib_acpi_adjust_resource), #else DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), #endif #if defined(NEW_PCIB) && defined(PCI_RES_BUS) DEVMETHOD(bus_release_resource, acpi_pcib_acpi_release_resource), DEVMETHOD(bus_activate_resource, acpi_pcib_acpi_activate_resource), DEVMETHOD(bus_deactivate_resource, acpi_pcib_acpi_deactivate_resource), #else DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), #endif DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_cpus, acpi_pcib_get_cpus), DEVMETHOD(bus_get_dma_tag, acpi_pcib_get_dma_tag), /* pcib interface */ DEVMETHOD(pcib_maxslots, pcib_maxslots), DEVMETHOD(pcib_read_config, acpi_pcib_read_config), DEVMETHOD(pcib_write_config, acpi_pcib_write_config), DEVMETHOD(pcib_route_interrupt, acpi_pcib_acpi_route_interrupt), DEVMETHOD(pcib_alloc_msi, acpi_pcib_alloc_msi), DEVMETHOD(pcib_release_msi, pcib_release_msi), DEVMETHOD(pcib_alloc_msix, acpi_pcib_alloc_msix), DEVMETHOD(pcib_release_msix, pcib_release_msix), DEVMETHOD(pcib_map_msi, acpi_pcib_map_msi), DEVMETHOD(pcib_power_for_sleep, acpi_pcib_power_for_sleep), DEVMETHOD(pcib_request_feature, acpi_pcib_request_feature), DEVMETHOD_END }; DEFINE_CLASS_0(pcib, acpi_pcib_acpi_driver, acpi_pcib_acpi_methods, sizeof(struct acpi_hpcib_softc)); DRIVER_MODULE(acpi_pcib, acpi, acpi_pcib_acpi_driver, 0, 0); MODULE_DEPEND(acpi_pcib, acpi, 1, 1, 1); static int acpi_pcib_acpi_probe(device_t dev) { ACPI_DEVICE_INFO *devinfo; ACPI_HANDLE h; int root; if (acpi_disabled("pcib") || (h = acpi_get_handle(dev)) == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return (ENXIO); root = (devinfo->Flags & ACPI_PCI_ROOT_BRIDGE) != 0; AcpiOsFree(devinfo); if (!root || pci_cfgregopen() == 0) return (ENXIO); device_set_desc(dev, "ACPI Host-PCI bridge"); return (0); } #ifdef NEW_PCIB static ACPI_STATUS acpi_pcib_producer_handler(ACPI_RESOURCE *res, void *context) { struct acpi_hpcib_softc *sc; UINT64 length, min, max; u_int flags; int error, type; sc = context; switch (res->Type) { case ACPI_RESOURCE_TYPE_START_DEPENDENT: case ACPI_RESOURCE_TYPE_END_DEPENDENT: panic("host bridge has depenedent resources"); case ACPI_RESOURCE_TYPE_ADDRESS16: case ACPI_RESOURCE_TYPE_ADDRESS32: case ACPI_RESOURCE_TYPE_ADDRESS64: case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: if (res->Data.Address.ProducerConsumer != ACPI_PRODUCER) break; switch (res->Type) { case ACPI_RESOURCE_TYPE_ADDRESS16: min = res->Data.Address16.Address.Minimum; max = res->Data.Address16.Address.Maximum; length = res->Data.Address16.Address.AddressLength; break; case ACPI_RESOURCE_TYPE_ADDRESS32: min = res->Data.Address32.Address.Minimum; max = res->Data.Address32.Address.Maximum; length = res->Data.Address32.Address.AddressLength; break; case ACPI_RESOURCE_TYPE_ADDRESS64: min = res->Data.Address64.Address.Minimum; max = res->Data.Address64.Address.Maximum; length = res->Data.Address64.Address.AddressLength; break; default: KASSERT(res->Type == ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64, ("should never happen")); min = res->Data.ExtAddress64.Address.Minimum; max = res->Data.ExtAddress64.Address.Maximum; length = res->Data.ExtAddress64.Address.AddressLength; break; } if (length == 0) break; if (min + length - 1 != max && (res->Data.Address.MinAddressFixed != ACPI_ADDRESS_FIXED || res->Data.Address.MaxAddressFixed != ACPI_ADDRESS_FIXED)) break; flags = 0; switch (res->Data.Address.ResourceType) { case ACPI_MEMORY_RANGE: type = SYS_RES_MEMORY; if (res->Type != ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64) { if (res->Data.Address.Info.Mem.Caching == ACPI_PREFETCHABLE_MEMORY) flags |= RF_PREFETCHABLE; } else { /* * XXX: Parse prefetch flag out of * TypeSpecific. */ } break; case ACPI_IO_RANGE: type = SYS_RES_IOPORT; break; #ifdef PCI_RES_BUS case ACPI_BUS_NUMBER_RANGE: type = PCI_RES_BUS; break; #endif default: return (AE_OK); } if (min + length - 1 != max) device_printf(sc->ap_dev, "Length mismatch for %d range: %jx vs %jx\n", type, (uintmax_t)(max - min + 1), (uintmax_t)length); #ifdef __i386__ if (min > ULONG_MAX) { device_printf(sc->ap_dev, "Ignoring %d range above 4GB (%#jx-%#jx)\n", type, (uintmax_t)min, (uintmax_t)max); break; } if (max > ULONG_MAX) { device_printf(sc->ap_dev, "Truncating end of %d range above 4GB (%#jx-%#jx)\n", type, (uintmax_t)min, (uintmax_t)max); max = ULONG_MAX; } #endif error = pcib_host_res_decodes(&sc->ap_host_res, type, min, max, flags); if (error) panic("Failed to manage %d range (%#jx-%#jx): %d", type, (uintmax_t)min, (uintmax_t)max, error); break; default: break; } return (AE_OK); } #endif #if defined(NEW_PCIB) && defined(PCI_RES_BUS) static bool get_decoded_bus_range(struct acpi_hpcib_softc *sc, rman_res_t *startp, rman_res_t *endp) { struct resource_list_entry *rle; rle = resource_list_find(&sc->ap_host_res.hr_rl, PCI_RES_BUS, 0); if (rle == NULL) return (false); *startp = rle->start; *endp = rle->end; return (true); } #endif static int acpi_pcib_osc(struct acpi_hpcib_softc *sc, uint32_t osc_ctl) { ACPI_STATUS status; uint32_t cap_set[3]; static uint8_t pci_host_bridge_uuid[ACPI_UUID_LENGTH] = { 0x5b, 0x4d, 0xdb, 0x33, 0xf7, 0x1f, 0x1c, 0x40, 0x96, 0x57, 0x74, 0x41, 0xc0, 0x3d, 0xd7, 0x66 }; /* * Don't invoke _OSC if a control is already granted. * However, always invoke _OSC during attach when 0 is passed. */ if (osc_ctl != 0 && (sc->ap_osc_ctl & osc_ctl) == osc_ctl) return (0); /* Support Field: Extended PCI Config Space, PCI Segment Groups, MSI */ cap_set[PCI_OSC_SUPPORT] = PCIM_OSC_SUPPORT_EXT_PCI_CONF | PCIM_OSC_SUPPORT_SEG_GROUP | PCIM_OSC_SUPPORT_MSI; /* Active State Power Management, Clock Power Management Capability */ if (pci_enable_aspm) cap_set[PCI_OSC_SUPPORT] |= PCIM_OSC_SUPPORT_ASPM | PCIM_OSC_SUPPORT_CPMC; /* Control Field */ cap_set[PCI_OSC_CTL] = sc->ap_osc_ctl | osc_ctl; status = acpi_EvaluateOSC(sc->ap_handle, pci_host_bridge_uuid, 1, nitems(cap_set), cap_set, cap_set, false); if (ACPI_FAILURE(status)) { if (status == AE_NOT_FOUND) { sc->ap_osc_ctl |= osc_ctl; return (0); } device_printf(sc->ap_dev, "_OSC failed: %s\n", AcpiFormatException(status)); return (EIO); } /* * _OSC may return an error in the status word, but will * update the control mask always. _OSC should not revoke * previously-granted controls. */ if ((cap_set[PCI_OSC_CTL] & sc->ap_osc_ctl) != sc->ap_osc_ctl) device_printf(sc->ap_dev, "_OSC revoked %#x\n", (cap_set[PCI_OSC_CTL] & sc->ap_osc_ctl) ^ sc->ap_osc_ctl); sc->ap_osc_ctl = cap_set[PCI_OSC_CTL]; if ((sc->ap_osc_ctl & osc_ctl) != osc_ctl) return (EIO); return (0); } static int acpi_pcib_acpi_attach(device_t dev) { struct acpi_hpcib_softc *sc; ACPI_STATUS status; static int bus0_seen = 0; u_int slot, func, busok; #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct resource *bus_res; rman_res_t end, start; int rid; #endif int error, domain; uint8_t busno; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = device_get_softc(dev); sc->ap_dev = dev; sc->ap_handle = acpi_get_handle(dev); /* * Don't attach if we're not really there. */ if (!acpi_DeviceIsPresent(dev)) return (ENXIO); acpi_pcib_osc(sc, 0); /* * Get our segment number by evaluating _SEG. * It's OK for this to not exist. */ status = acpi_GetInteger(sc->ap_handle, "_SEG", &sc->ap_segment); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) { device_printf(dev, "could not evaluate _SEG - %s\n", AcpiFormatException(status)); return_VALUE (ENXIO); } /* If it's not found, assume 0. */ sc->ap_segment = 0; } /* * Get the address (device and function) of the associated * PCI-Host bridge device from _ADR. Assume we don't have one if * it doesn't exist. */ status = acpi_GetInteger(sc->ap_handle, "_ADR", &sc->ap_addr); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) device_printf(dev, "could not evaluate _ADR - %s\n", AcpiFormatException(status)); sc->ap_addr = -1; } #ifdef NEW_PCIB /* * Determine which address ranges this bridge decodes and setup * resource managers for those ranges. */ if (pcib_host_res_init(sc->ap_dev, &sc->ap_host_res) != 0) panic("failed to init hostb resources"); if (!acpi_disabled("hostres")) { status = AcpiWalkResources(sc->ap_handle, "_CRS", acpi_pcib_producer_handler, sc); if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) device_printf(sc->ap_dev, "failed to parse resources: %s\n", AcpiFormatException(status)); } #endif /* * Get our base bus number by evaluating _BBN. * If this doesn't work, we assume we're bus number 0. * * XXX note that it may also not exist in the case where we are * meant to use a private configuration space mechanism for this bus, * so we should dig out our resources and check to see if we have * anything like that. How do we do this? * XXX If we have the requisite information, and if we don't think the * default PCI configuration space handlers can deal with this bus, * we should attach our own handler. * XXX invoke _REG on this for the PCI config space address space? * XXX It seems many BIOS's with multiple Host-PCI bridges do not set * _BBN correctly. They set _BBN to zero for all bridges. Thus, * if _BBN is zero and PCI bus 0 already exists, we try to read our * bus number from the configuration registers at address _ADR. * We only do this for domain/segment 0 in the hopes that this is * only needed for old single-domain machines. */ status = acpi_GetInteger(sc->ap_handle, "_BBN", &sc->ap_bus); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) { device_printf(dev, "could not evaluate _BBN - %s\n", AcpiFormatException(status)); return (ENXIO); } else { /* If it's not found, assume 0. */ sc->ap_bus = 0; } } /* * If this is segment 0, the bus is zero, and PCI bus 0 already * exists, read the bus number via PCI config space. */ busok = 1; if (sc->ap_segment == 0 && sc->ap_bus == 0 && bus0_seen) { busok = 0; if (sc->ap_addr != -1) { /* XXX: We assume bus 0. */ slot = ACPI_ADR_PCI_SLOT(sc->ap_addr); func = ACPI_ADR_PCI_FUNC(sc->ap_addr); if (bootverbose) device_printf(dev, "reading config registers from 0:%d:%d\n", slot, func); if (host_pcib_get_busno(pci_cfgregread, 0, slot, func, &busno) == 0) device_printf(dev, "couldn't read bus number from cfg space\n"); else { sc->ap_bus = busno; busok = 1; } } } #if defined(NEW_PCIB) && defined(PCI_RES_BUS) /* * If nothing else worked, hope that ACPI at least lays out the * Host-PCI bridges in order and that as a result the next free * bus number is our bus number. */ if (busok == 0) { /* * If we have a region of bus numbers, use the first * number for our bus. */ if (get_decoded_bus_range(sc, &start, &end)) sc->ap_bus = start; else { rid = 0; bus_res = pci_domain_alloc_bus(sc->ap_segment, dev, &rid, 0, PCI_BUSMAX, 1, 0); if (bus_res == NULL) { device_printf(dev, "could not allocate bus number\n"); pcib_host_res_free(dev, &sc->ap_host_res); return (ENXIO); } sc->ap_bus = rman_get_start(bus_res); pci_domain_release_bus(sc->ap_segment, dev, rid, bus_res); } } else { /* * If there is a decoded bus range, assume the bus number is * the first value in the range. Warn if _BBN doesn't match. */ if (get_decoded_bus_range(sc, &start, &end)) { if (sc->ap_bus != start) { device_printf(dev, "WARNING: BIOS configured bus number (%d) is " "not within decoded bus number range " "(%ju - %ju).\n", sc->ap_bus, (uintmax_t)start, (uintmax_t)end); device_printf(dev, "Using range start (%ju) as bus number.\n", (uintmax_t)start); sc->ap_bus = start; } } } #else /* * If nothing else worked, hope that ACPI at least lays out the * host-PCI bridges in order and that as a result our unit number * is actually our bus number. There are several reasons this * might not be true. */ if (busok == 0) { sc->ap_bus = device_get_unit(dev); device_printf(dev, "trying bus number %d\n", sc->ap_bus); } #endif /* If this is bus 0 on segment 0, note that it has been seen already. */ if (sc->ap_segment == 0 && sc->ap_bus == 0) bus0_seen = 1; acpi_pcib_fetch_prt(dev, &sc->ap_prt); error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->ap_dma_tag); if (error != 0) goto errout; error = bus_get_domain(dev, &domain); if (error == 0) error = bus_dma_tag_set_domain(sc->ap_dma_tag, domain); /* Don't fail to attach if the domain can't be queried or set. */ error = 0; bus_generic_probe(dev); if (device_add_child(dev, "pci", -1) == NULL) { bus_dma_tag_destroy(sc->ap_dma_tag); sc->ap_dma_tag = NULL; error = ENXIO; goto errout; } return (bus_generic_attach(dev)); errout: device_printf(device_get_parent(dev), "couldn't attach pci bus\n"); #if defined(NEW_PCIB) && defined(PCI_RES_BUS) pcib_host_res_free(dev, &sc->ap_host_res); #endif return (error); } /* * Support for standard PCI bridge ivars. */ static int acpi_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct acpi_hpcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = sc->ap_segment; return (0); case PCIB_IVAR_BUS: *result = sc->ap_bus; return (0); case ACPI_IVAR_HANDLE: *result = (uintptr_t)sc->ap_handle; return (0); case ACPI_IVAR_FLAGS: *result = (uintptr_t)sc->ap_flags; return (0); } return (ENOENT); } static int acpi_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct acpi_hpcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: return (EINVAL); case PCIB_IVAR_BUS: sc->ap_bus = value; return (0); case ACPI_IVAR_HANDLE: sc->ap_handle = (ACPI_HANDLE)value; return (0); case ACPI_IVAR_FLAGS: sc->ap_flags = (int)value; return (0); } return (ENOENT); } static uint32_t acpi_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct acpi_hpcib_softc *sc = device_get_softc(dev); return (pci_cfgregread(sc->ap_segment, bus, slot, func, reg, bytes)); } static void acpi_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { struct acpi_hpcib_softc *sc = device_get_softc(dev); pci_cfgregwrite(sc->ap_segment, bus, slot, func, reg, data, bytes); } static int acpi_pcib_acpi_route_interrupt(device_t pcib, device_t dev, int pin) { struct acpi_hpcib_softc *sc = device_get_softc(pcib); return (acpi_pcib_route_interrupt(pcib, dev, pin, &sc->ap_prt)); } static int acpi_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs) { device_t bus; bus = device_get_parent(pcib); return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount, irqs)); } static int acpi_pcib_alloc_msix(device_t pcib, device_t dev, int *irq) { device_t bus; bus = device_get_parent(pcib); return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq)); } static int acpi_pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data) { struct acpi_hpcib_softc *sc; device_t bus, hostb; int error; bus = device_get_parent(pcib); error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data); if (error) return (error); sc = device_get_softc(pcib); if (sc->ap_addr == -1) return (0); /* XXX: Assumes all bridges are on bus 0. */ hostb = pci_find_dbsf(sc->ap_segment, 0, ACPI_ADR_PCI_SLOT(sc->ap_addr), ACPI_ADR_PCI_FUNC(sc->ap_addr)); if (hostb != NULL) pci_ht_map_msi(hostb, *addr); return (0); } struct resource * acpi_pcib_acpi_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #ifdef NEW_PCIB struct acpi_hpcib_softc *sc; struct resource *res; #endif #if defined(__i386__) || defined(__amd64__) start = hostb_alloc_start(type, start, end, count); #endif #ifdef NEW_PCIB sc = device_get_softc(dev); #ifdef PCI_RES_BUS if (type == PCI_RES_BUS) return (pci_domain_alloc_bus(sc->ap_segment, child, rid, start, end, count, flags)); #endif res = pcib_host_res_alloc(&sc->ap_host_res, child, type, rid, start, end, count, flags); /* * XXX: If this is a request for a specific range, assume it is * correct and pass it up to the parent. What we probably want to * do long-term is explicitly trust any firmware-configured * resources during the initial bus scan on boot and then disable * this after that. */ if (res == NULL && start + count - 1 == end) res = bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags); return (res); #else return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); #endif } #ifdef NEW_PCIB int -acpi_pcib_acpi_adjust_resource(device_t dev, device_t child, int type, +acpi_pcib_acpi_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct acpi_hpcib_softc *sc; sc = device_get_softc(dev); #ifdef PCI_RES_BUS - if (type == PCI_RES_BUS) + if (rman_get_type(r) == PCI_RES_BUS) return (pci_domain_adjust_bus(sc->ap_segment, child, r, start, end)); #endif - return (pcib_host_res_adjust(&sc->ap_host_res, child, type, r, start, - end)); + return (pcib_host_res_adjust(&sc->ap_host_res, child, r, start, end)); } #ifdef PCI_RES_BUS int acpi_pcib_acpi_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct acpi_hpcib_softc *sc; sc = device_get_softc(dev); if (type == PCI_RES_BUS) return (pci_domain_release_bus(sc->ap_segment, child, rid, r)); return (bus_generic_release_resource(dev, child, type, rid, r)); } int acpi_pcib_acpi_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct acpi_hpcib_softc *sc; sc = device_get_softc(dev); if (type == PCI_RES_BUS) return (pci_domain_activate_bus(sc->ap_segment, child, rid, r)); return (bus_generic_activate_resource(dev, child, type, rid, r)); } int acpi_pcib_acpi_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct acpi_hpcib_softc *sc; sc = device_get_softc(dev); if (type == PCI_RES_BUS) return (pci_domain_deactivate_bus(sc->ap_segment, child, rid, r)); return (bus_generic_deactivate_resource(dev, child, type, rid, r)); } #endif #endif static int acpi_pcib_request_feature(device_t pcib, device_t dev, enum pci_feature feature) { uint32_t osc_ctl; struct acpi_hpcib_softc *sc; sc = device_get_softc(pcib); switch (feature) { case PCI_FEATURE_HP: osc_ctl = PCIM_OSC_CTL_PCIE_HP; break; case PCI_FEATURE_AER: osc_ctl = PCIM_OSC_CTL_PCIE_AER; break; default: return (EINVAL); } return (acpi_pcib_osc(sc, osc_ctl)); } static bus_dma_tag_t acpi_pcib_get_dma_tag(device_t bus, device_t child) { struct acpi_hpcib_softc *sc; sc = device_get_softc(bus); return (sc->ap_dma_tag); } diff --git a/sys/dev/bhnd/bhndb/bhndb.c b/sys/dev/bhnd/bhndb/bhndb.c index ea5230bf459b..f8a1467894d1 100644 --- a/sys/dev/bhnd/bhndb/bhndb.c +++ b/sys/dev/bhnd/bhndb/bhndb.c @@ -1,2309 +1,2309 @@ /*- * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include /* * Abstract BHND Bridge Device Driver * * Provides generic support for bridging from a parent bus (such as PCI) to * a BHND-compatible bus (e.g. bcma or siba). */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "bhnd_chipc_if.h" #include "bhnd_nvram_if.h" #include "bhndbvar.h" #include "bhndb_bus_if.h" #include "bhndb_hwdata.h" #include "bhndb_private.h" /* Debugging flags */ static u_long bhndb_debug = 0; TUNABLE_ULONG("hw.bhndb.debug", &bhndb_debug); enum { BHNDB_DEBUG_PRIO = 1 << 0, }; #define BHNDB_DEBUG(_type) (BHNDB_DEBUG_ ## _type & bhndb_debug) static bool bhndb_hw_matches(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw *hw); static int bhndb_init_region_cfg(struct bhndb_softc *sc, bhnd_erom_t *erom, struct bhndb_resources *r, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw_priority *table); static int bhndb_find_hwspec(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw **hw); bhndb_addrspace bhndb_get_addrspace(struct bhndb_softc *sc, device_t child); static struct rman *bhndb_get_rman(struct bhndb_softc *sc, device_t child, int type); static int bhndb_init_child_resource(struct resource *r, struct resource *parent, bhnd_size_t offset, bhnd_size_t size); static int bhndb_activate_static_region( struct bhndb_softc *sc, struct bhndb_region *region, device_t child, int type, int rid, struct resource *r); static int bhndb_try_activate_resource( struct bhndb_softc *sc, device_t child, int type, int rid, struct resource *r, bool *indirect); static inline struct bhndb_dw_alloc *bhndb_io_resource(struct bhndb_softc *sc, bus_addr_t addr, bus_size_t size, bus_size_t *offset, bool *stolen, bus_addr_t *restore); /** * Default bhndb(4) implementation of DEVICE_PROBE(). * * This function provides the default bhndb implementation of DEVICE_PROBE(), * and is compatible with bhndb(4) bridges attached via bhndb_attach_bridge(). */ int bhndb_generic_probe(device_t dev) { return (BUS_PROBE_NOWILDCARD); } static void bhndb_probe_nomatch(device_t dev, device_t child) { const char *name; name = device_get_name(child); if (name == NULL) name = "unknown device"; device_printf(dev, "<%s> (no driver attached)\n", name); } static int bhndb_print_child(device_t dev, device_t child) { struct resource_list *rl; int retval = 0; retval += bus_print_child_header(dev, child); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) { retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); } retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } static int bhndb_child_location(device_t dev, device_t child, struct sbuf *sb) { struct bhndb_softc *sc; sc = device_get_softc(dev); sbuf_printf(sb, "base=0x%llx", (unsigned long long) sc->chipid.enum_addr); return (0); } /** * Return true if @p cores matches the @p hw specification. * * @param sc BHNDB device state. * @param cores A device table to match against. * @param ncores The number of cores in @p cores. * @param hw The hardware description to be matched against. */ static bool bhndb_hw_matches(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw *hw) { for (u_int i = 0; i < hw->num_hw_reqs; i++) { const struct bhnd_core_match *match; bool found; match = &hw->hw_reqs[i]; found = false; for (u_int d = 0; d < ncores; d++) { struct bhnd_core_info *core = &cores[d]; if (BHNDB_IS_CORE_DISABLED(sc->dev, sc->bus_dev, core)) continue; if (!bhnd_core_matches(core, match)) continue; found = true; break; } if (!found) return (false); } return (true); } /** * Initialize the region maps and priority configuration in @p br using * the priority @p table and the set of cores enumerated by @p erom. * * @param sc The bhndb device state. * @param br The resource state to be configured. * @param erom EROM parser used to enumerate @p cores. * @param cores All cores enumerated on the bridged bhnd bus. * @param ncores The length of @p cores. * @param table Hardware priority table to be used to determine the relative * priorities of per-core port resources. */ static int bhndb_init_region_cfg(struct bhndb_softc *sc, bhnd_erom_t *erom, struct bhndb_resources *br, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw_priority *table) { const struct bhndb_hw_priority *hp; bhnd_addr_t addr; bhnd_size_t size; size_t prio_low, prio_default, prio_high; int error; /* The number of port regions per priority band that must be accessible * via dynamic register windows */ prio_low = 0; prio_default = 0; prio_high = 0; /* * Register bridge regions covering all statically mapped ports. */ for (u_int i = 0; i < ncores; i++) { const struct bhndb_regwin *regw; struct bhnd_core_info *core; struct bhnd_core_match md; core = &cores[i]; md = bhnd_core_get_match_desc(core); for (regw = br->cfg->register_windows; regw->win_type != BHNDB_REGWIN_T_INVALID; regw++) { const struct bhndb_port_priority *pp; uint32_t alloc_flags; /* Only core windows are supported */ if (regw->win_type != BHNDB_REGWIN_T_CORE) continue; /* Skip non-matching cores. */ if (!bhndb_regwin_match_core(regw, core)) continue; /* Fetch the base address of the mapped port */ error = bhnd_erom_lookup_core_addr(erom, &md, regw->d.core.port_type, regw->d.core.port, regw->d.core.region, NULL, &addr, &size); if (error) { /* Skip non-applicable register windows */ if (error == ENOENT) continue; return (error); } /* * Apply the register window's region offset, if any. */ if (regw->d.core.offset > size) { device_printf(sc->dev, "invalid register " "window offset %#jx for region %#jx+%#jx\n", regw->d.core.offset, addr, size); return (EINVAL); } addr += regw->d.core.offset; /* * Always defer to the register window's size. * * If the port size is smaller than the window size, * this ensures that we fully utilize register windows * larger than the referenced port. * * If the port size is larger than the window size, this * ensures that we do not directly map the allocations * within the region to a too-small window. */ size = regw->win_size; /* Fetch allocation flags from the corresponding port * priority entry, if any */ pp = bhndb_hw_priorty_find_port(table, core, regw->d.core.port_type, regw->d.core.port, regw->d.core.region); if (pp != NULL) { alloc_flags = pp->alloc_flags; } else { alloc_flags = 0; } /* * Add to the bus region list. * * The window priority for a statically mapped region is * always HIGH. */ error = bhndb_add_resource_region(br, addr, size, BHNDB_PRIORITY_HIGH, alloc_flags, regw); if (error) return (error); } } /* * Perform priority accounting and register bridge regions for all * ports defined in the priority table */ for (u_int i = 0; i < ncores; i++) { struct bhnd_core_info *core; struct bhnd_core_match md; core = &cores[i]; md = bhnd_core_get_match_desc(core); /* * Skip priority accounting for cores that ... */ /* ... do not require bridge resources */ if (BHNDB_IS_CORE_DISABLED(sc->dev, sc->bus_dev, core)) continue; /* ... do not have a priority table entry */ hp = bhndb_hw_priority_find_core(table, core); if (hp == NULL) continue; /* ... are explicitly disabled in the priority table. */ if (hp->priority == BHNDB_PRIORITY_NONE) continue; /* Determine the number of dynamic windows required and * register their bus_region entries. */ for (u_int i = 0; i < hp->num_ports; i++) { const struct bhndb_port_priority *pp; pp = &hp->ports[i]; /* Fetch the address+size of the mapped port. */ error = bhnd_erom_lookup_core_addr(erom, &md, pp->type, pp->port, pp->region, NULL, &addr, &size); if (error) { /* Skip ports not defined on this device */ if (error == ENOENT) continue; return (error); } /* Skip ports with an existing static mapping */ if (bhndb_has_static_region_mapping(br, addr, size)) continue; /* Define a dynamic region for this port */ error = bhndb_add_resource_region(br, addr, size, pp->priority, pp->alloc_flags, NULL); if (error) return (error); /* Update port mapping counts */ switch (pp->priority) { case BHNDB_PRIORITY_NONE: break; case BHNDB_PRIORITY_LOW: prio_low++; break; case BHNDB_PRIORITY_DEFAULT: prio_default++; break; case BHNDB_PRIORITY_HIGH: prio_high++; break; } } } /* Determine the minimum priority at which we'll allocate direct * register windows from our dynamic pool */ size_t prio_total = prio_low + prio_default + prio_high; if (prio_total <= br->dwa_count) { /* low+default+high priority regions get windows */ br->min_prio = BHNDB_PRIORITY_LOW; } else if (prio_default + prio_high <= br->dwa_count) { /* default+high priority regions get windows */ br->min_prio = BHNDB_PRIORITY_DEFAULT; } else { /* high priority regions get windows */ br->min_prio = BHNDB_PRIORITY_HIGH; } if (BHNDB_DEBUG(PRIO)) { struct bhndb_region *region; const char *direct_msg, *type_msg; bhndb_priority_t prio, prio_min; uint32_t flags; prio_min = br->min_prio; device_printf(sc->dev, "min_prio: %d\n", prio_min); STAILQ_FOREACH(region, &br->bus_regions, link) { prio = region->priority; flags = region->alloc_flags; direct_msg = prio >= prio_min ? "direct" : "indirect"; type_msg = region->static_regwin ? "static" : "dynamic"; device_printf(sc->dev, "region 0x%llx+0x%llx priority " "%u %s/%s", (unsigned long long) region->addr, (unsigned long long) region->size, region->priority, direct_msg, type_msg); if (flags & BHNDB_ALLOC_FULFILL_ON_OVERCOMMIT) printf(" [overcommit]\n"); else printf("\n"); } } return (0); } /** * Find a hardware specification for @p dev. * * @param sc The bhndb device state. * @param cores All cores enumerated on the bridged bhnd bus. * @param ncores The length of @p cores. * @param[out] hw On success, the matched hardware specification. * with @p dev. * * @retval 0 success * @retval non-zero if an error occurs fetching device info for comparison. */ static int bhndb_find_hwspec(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw **hw) { const struct bhndb_hw *next, *hw_table; /* Search for the first matching hardware config. */ hw_table = BHNDB_BUS_GET_HARDWARE_TABLE(sc->parent_dev, sc->dev); for (next = hw_table; next->hw_reqs != NULL; next++) { if (!bhndb_hw_matches(sc, cores, ncores, next)) continue; /* Found */ *hw = next; return (0); } return (ENOENT); } /** * Helper function that must be called by subclass bhndb(4) drivers * when implementing DEVICE_ATTACH() before calling any bhnd(4) or bhndb(4) * APIs on the bridge device. * * This function will add a bridged bhnd(4) child device with a device order of * BHND_PROBE_BUS. Any subclass bhndb(4) driver may use the BHND_PROBE_* * priority bands to add additional devices that will be attached in * their preferred order relative to the bridged bhnd(4) bus. * * @param dev The bridge device to attach. * @param cid The bridged device's chip identification. * @param cores The bridged device's core table. * @param ncores The number of cores in @p cores. * @param bridge_core Core info for the bhnd(4) core serving as the host * bridge. * @param erom_class An erom parser class that may be used to parse * the bridged device's device enumeration table. */ int bhndb_attach(device_t dev, struct bhnd_chipid *cid, struct bhnd_core_info *cores, u_int ncores, struct bhnd_core_info *bridge_core, bhnd_erom_class_t *erom_class) { struct bhndb_devinfo *dinfo; struct bhndb_softc *sc; const struct bhndb_hw *hw; const struct bhndb_hwcfg *hwcfg; const struct bhndb_hw_priority *hwprio; struct bhnd_erom_io *eio; bhnd_erom_t *erom; int error; sc = device_get_softc(dev); sc->dev = dev; sc->parent_dev = device_get_parent(dev); sc->bridge_core = *bridge_core; sc->chipid = *cid; if ((error = bhnd_service_registry_init(&sc->services))) return (error); BHNDB_LOCK_INIT(sc); erom = NULL; /* Find a matching bridge hardware configuration */ if ((error = bhndb_find_hwspec(sc, cores, ncores, &hw))) { device_printf(sc->dev, "unable to identify device, " " using generic bridge resource definitions\n"); hwcfg = BHNDB_BUS_GET_GENERIC_HWCFG(sc->parent_dev, dev); hw = NULL; } else { hwcfg = hw->cfg; } if (hw != NULL && (bootverbose || BHNDB_DEBUG(PRIO))) { device_printf(sc->dev, "%s resource configuration\n", hw->name); } /* Allocate bridge resource state using the discovered hardware * configuration */ sc->bus_res = bhndb_alloc_resources(sc->dev, sc->parent_dev, hwcfg); if (sc->bus_res == NULL) { device_printf(sc->dev, "failed to allocate bridge resource " "state\n"); error = ENOMEM; goto failed; } /* Add our bridged bus device */ sc->bus_dev = BUS_ADD_CHILD(dev, BHND_PROBE_BUS, "bhnd", -1); if (sc->bus_dev == NULL) { error = ENXIO; goto failed; } dinfo = device_get_ivars(sc->bus_dev); dinfo->addrspace = BHNDB_ADDRSPACE_BRIDGED; /* We can now use bhndb to perform bridging of SYS_RES_MEMORY resources; * we use this to instantiate an erom parser instance */ eio = bhnd_erom_iores_new(sc->bus_dev, 0); if ((erom = bhnd_erom_alloc(erom_class, cid, eio)) == NULL) { bhnd_erom_io_fini(eio); error = ENXIO; goto failed; } /* Populate our resource priority configuration */ hwprio = BHNDB_BUS_GET_HARDWARE_PRIO(sc->parent_dev, sc->dev); error = bhndb_init_region_cfg(sc, erom, sc->bus_res, cores, ncores, hwprio); if (error) { device_printf(sc->dev, "failed to initialize resource " "priority configuration: %d\n", error); goto failed; } /* Free our erom instance */ bhnd_erom_free(erom); erom = NULL; return (0); failed: BHNDB_LOCK_DESTROY(sc); if (sc->bus_res != NULL) bhndb_free_resources(sc->bus_res); if (erom != NULL) bhnd_erom_free(erom); bhnd_service_registry_fini(&sc->services); return (error); } /** * Default bhndb(4) implementation of DEVICE_DETACH(). * * This function detaches any child devices, and if successful, releases all * resources held by the bridge device. */ int bhndb_generic_detach(device_t dev) { struct bhndb_softc *sc; int error; sc = device_get_softc(dev); /* Detach children */ if ((error = bus_generic_detach(dev))) return (error); /* Delete children */ if ((error = device_delete_children(dev))) return (error); /* Clean up our service registry */ if ((error = bhnd_service_registry_fini(&sc->services))) return (error); /* Clean up our driver state. */ bhndb_free_resources(sc->bus_res); BHNDB_LOCK_DESTROY(sc); return (0); } /** * Default bhndb(4) implementation of DEVICE_SUSPEND(). * * This function calls bus_generic_suspend() (or implements equivalent * behavior). */ int bhndb_generic_suspend(device_t dev) { return (bus_generic_suspend(dev)); } /** * Default bhndb(4) implementation of DEVICE_RESUME(). * * This function calls bus_generic_resume() (or implements equivalent * behavior). */ int bhndb_generic_resume(device_t dev) { struct bhndb_softc *sc; struct bhndb_resources *bus_res; struct bhndb_dw_alloc *dwa; int error; sc = device_get_softc(dev); bus_res = sc->bus_res; /* Guarantee that all in-use dynamic register windows are mapped to * their previously configured target address. */ BHNDB_LOCK(sc); error = 0; for (size_t i = 0; i < bus_res->dwa_count; i++) { dwa = &bus_res->dw_alloc[i]; /* Skip regions that were not previously used */ if (bhndb_dw_is_free(bus_res, dwa) && dwa->target == 0x0) continue; /* Otherwise, ensure the register window is correct before * any children attempt MMIO */ error = BHNDB_SET_WINDOW_ADDR(dev, dwa->win, dwa->target); if (error) break; } BHNDB_UNLOCK(sc); /* Error restoring hardware state; children cannot be safely resumed */ if (error) { device_printf(dev, "Unable to restore hardware configuration; " "cannot resume: %d\n", error); return (error); } return (bus_generic_resume(dev)); } /** * Default implementation of BHNDB_SUSPEND_RESOURCE. */ static void bhndb_suspend_resource(device_t dev, device_t child, int type, struct resource *r) { struct bhndb_softc *sc; struct bhndb_dw_alloc *dwa; sc = device_get_softc(dev); /* Non-MMIO resources (e.g. IRQs) are handled solely by our parent */ if (type != SYS_RES_MEMORY) return; BHNDB_LOCK(sc); dwa = bhndb_dw_find_resource(sc->bus_res, r); if (dwa == NULL) { BHNDB_UNLOCK(sc); return; } if (BHNDB_DEBUG(PRIO)) device_printf(child, "suspend resource type=%d 0x%jx+0x%jx\n", type, rman_get_start(r), rman_get_size(r)); /* Release the resource's window reference */ bhndb_dw_release(sc->bus_res, dwa, r); BHNDB_UNLOCK(sc); } /** * Default implementation of BHNDB_RESUME_RESOURCE. */ static int bhndb_resume_resource(device_t dev, device_t child, int type, struct resource *r) { struct bhndb_softc *sc; sc = device_get_softc(dev); /* Non-MMIO resources (e.g. IRQs) are handled solely by our parent */ if (type != SYS_RES_MEMORY) return (0); /* Inactive resources don't require reallocation of bridge resources */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (0); if (BHNDB_DEBUG(PRIO)) device_printf(child, "resume resource type=%d 0x%jx+0x%jx\n", type, rman_get_start(r), rman_get_size(r)); return (bhndb_try_activate_resource(sc, rman_get_device(r), type, rman_get_rid(r), r, NULL)); } /** * Default bhndb(4) implementation of BUS_READ_IVAR(). */ static int bhndb_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { return (ENOENT); } /** * Default bhndb(4) implementation of BUS_WRITE_IVAR(). */ static int bhndb_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { return (ENOENT); } /** * Return the address space for the given @p child device. */ bhndb_addrspace bhndb_get_addrspace(struct bhndb_softc *sc, device_t child) { struct bhndb_devinfo *dinfo; device_t imd_dev; /* Find the directly attached parent of the requesting device */ imd_dev = child; while (imd_dev != NULL && device_get_parent(imd_dev) != sc->dev) imd_dev = device_get_parent(imd_dev); if (imd_dev == NULL) panic("bhndb address space request for non-child device %s\n", device_get_nameunit(child)); dinfo = device_get_ivars(imd_dev); return (dinfo->addrspace); } /** * Return the rman instance for a given resource @p type, if any. * * @param sc The bhndb device state. * @param child The requesting child. * @param type The resource type (e.g. SYS_RES_MEMORY, SYS_RES_IRQ, ...) */ static struct rman * bhndb_get_rman(struct bhndb_softc *sc, device_t child, int type) { switch (bhndb_get_addrspace(sc, child)) { case BHNDB_ADDRSPACE_NATIVE: switch (type) { case SYS_RES_MEMORY: return (&sc->bus_res->ht_mem_rman); case SYS_RES_IRQ: return (NULL); default: return (NULL); } case BHNDB_ADDRSPACE_BRIDGED: switch (type) { case SYS_RES_MEMORY: return (&sc->bus_res->br_mem_rman); case SYS_RES_IRQ: return (&sc->bus_res->br_irq_rman); default: return (NULL); } } /* Quieten gcc */ return (NULL); } /** * Default implementation of BUS_ADD_CHILD() */ static device_t bhndb_add_child(device_t dev, u_int order, const char *name, int unit) { struct bhndb_devinfo *dinfo; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (NULL); dinfo = malloc(sizeof(struct bhndb_devinfo), M_BHND, M_NOWAIT); if (dinfo == NULL) { device_delete_child(dev, child); return (NULL); } dinfo->addrspace = BHNDB_ADDRSPACE_NATIVE; resource_list_init(&dinfo->resources); device_set_ivars(child, dinfo); return (child); } /** * Default implementation of BUS_CHILD_DELETED(). */ static void bhndb_child_deleted(device_t dev, device_t child) { struct bhndb_devinfo *dinfo = device_get_ivars(child); if (dinfo != NULL) { resource_list_free(&dinfo->resources); free(dinfo, M_BHND); } device_set_ivars(child, NULL); } /** * Default implementation of BHNDB_GET_CHIPID(). */ static const struct bhnd_chipid * bhndb_get_chipid(device_t dev, device_t child) { struct bhndb_softc *sc = device_get_softc(dev); return (&sc->chipid); } /** * Default implementation of BHNDB_IS_CORE_DISABLED(). */ static bool bhndb_is_core_disabled(device_t dev, device_t child, struct bhnd_core_info *core) { struct bhndb_softc *sc; sc = device_get_softc(dev); /* Try to defer to the bhndb bus parent */ if (BHNDB_BUS_IS_CORE_DISABLED(sc->parent_dev, dev, core)) return (true); /* Otherwise, we treat bridge-capable cores as unpopulated if they're * not the configured host bridge */ if (BHND_DEVCLASS_SUPPORTS_HOSTB(bhnd_core_class(core))) return (!bhnd_cores_equal(core, &sc->bridge_core)); /* Assume the core is populated */ return (false); } /** * Default bhndb(4) implementation of BHNDB_GET_HOSTB_CORE(). * * This function uses a heuristic valid on all known PCI/PCIe/PCMCIA-bridged * bhnd(4) devices. */ static int bhndb_get_hostb_core(device_t dev, device_t child, struct bhnd_core_info *core) { struct bhndb_softc *sc = device_get_softc(dev); *core = sc->bridge_core; return (0); } /** * Default bhndb(4) implementation of BHND_BUS_GET_SERVICE_REGISTRY(). */ static struct bhnd_service_registry * bhndb_get_service_registry(device_t dev, device_t child) { struct bhndb_softc *sc = device_get_softc(dev); return (&sc->services); } /** * Default bhndb(4) implementation of BUS_ALLOC_RESOURCE(). */ static struct resource * bhndb_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct bhndb_softc *sc; struct resource_list_entry *rle; struct resource *rv; struct rman *rm; int error; bool passthrough, isdefault; sc = device_get_softc(dev); passthrough = (device_get_parent(child) != dev); isdefault = RMAN_IS_DEFAULT_RANGE(start, end); rle = NULL; /* Fetch the resource manager */ rm = bhndb_get_rman(sc, child, type); if (rm == NULL) { /* Delegate to our parent device's bus; the requested * resource type isn't handled locally. */ return (BUS_ALLOC_RESOURCE(device_get_parent(sc->parent_dev), child, type, rid, start, end, count, flags)); } /* Populate defaults */ if (!passthrough && isdefault) { /* Fetch the resource list entry. */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, *rid); if (rle == NULL) { device_printf(dev, "default resource %#x type %d for child %s " "not found\n", *rid, type, device_get_nameunit(child)); return (NULL); } if (rle->res != NULL) { device_printf(dev, "resource entry %#x type %d for child %s is busy\n", *rid, type, device_get_nameunit(child)); return (NULL); } start = rle->start; end = rle->end; count = ulmax(count, rle->count); } /* Validate resource addresses */ if (start > end || count > ((end - start) + 1)) return (NULL); /* Make our reservation */ rv = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); rman_set_type(rv, type); /* Activate */ if (flags & RF_ACTIVE) { error = bus_activate_resource(child, type, *rid, rv); if (error) { device_printf(dev, "failed to activate entry %#x type %d for " "child %s: %d\n", *rid, type, device_get_nameunit(child), error); rman_release_resource(rv); return (NULL); } } /* Update child's resource list entry */ if (rle != NULL) { rle->res = rv; rle->start = rman_get_start(rv); rle->end = rman_get_end(rv); rle->count = rman_get_size(rv); } return (rv); } /** * Default bhndb(4) implementation of BUS_RELEASE_RESOURCE(). */ static int bhndb_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct bhndb_softc *sc; struct resource_list_entry *rle; bool passthrough; int error; sc = device_get_softc(dev); passthrough = (device_get_parent(child) != dev); /* Delegate to our parent device's bus if the requested resource type * isn't handled locally. */ if (bhndb_get_rman(sc, child, type) == NULL) { return (BUS_RELEASE_RESOURCE(device_get_parent(sc->parent_dev), child, type, rid, r)); } /* Deactivate resources */ if (rman_get_flags(r) & RF_ACTIVE) { error = BUS_DEACTIVATE_RESOURCE(dev, child, type, rid, r); if (error) return (error); } if ((error = rman_release_resource(r))) return (error); if (!passthrough) { /* Clean resource list entry */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, rid); if (rle != NULL) rle->res = NULL; } return (0); } /** * Default bhndb(4) implementation of BUS_ADJUST_RESOURCE(). */ static int -bhndb_adjust_resource(device_t dev, device_t child, int type, +bhndb_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct bhndb_softc *sc; struct rman *rm; rman_res_t mstart, mend; int error; sc = device_get_softc(dev); error = 0; /* Delegate to our parent device's bus if the requested resource type * isn't handled locally. */ - rm = bhndb_get_rman(sc, child, type); + rm = bhndb_get_rman(sc, child, rman_get_type(r)); if (rm == NULL) { return (BUS_ADJUST_RESOURCE(device_get_parent(sc->parent_dev), - child, type, r, start, end)); + child, r, start, end)); } /* Verify basic constraints */ if (end <= start) return (EINVAL); if (!rman_is_region_manager(r, rm)) return (ENXIO); BHNDB_LOCK(sc); /* If not active, allow any range permitted by the resource manager */ if (!(rman_get_flags(r) & RF_ACTIVE)) goto done; /* Otherwise, the range is limited by the bridged resource mapping */ error = bhndb_find_resource_limits(sc->bus_res, r, &mstart, &mend); if (error) goto done; if (start < mstart || end > mend) { error = EINVAL; goto done; } /* Fall through */ done: if (!error) error = rman_adjust_resource(r, start, end); BHNDB_UNLOCK(sc); return (error); } /** * Initialize child resource @p r with a virtual address, tag, and handle * copied from @p parent, adjusted to contain only the range defined by * @p offsize and @p size. * * @param r The register to be initialized. * @param parent The parent bus resource that fully contains the subregion. * @param offset The subregion offset within @p parent. * @param size The subregion size. * @p r. */ static int bhndb_init_child_resource(struct resource *r, struct resource *parent, bhnd_size_t offset, bhnd_size_t size) { bus_space_handle_t bh, child_bh; bus_space_tag_t bt; uintptr_t vaddr; int error; /* Fetch the parent resource's real bus values */ vaddr = (uintptr_t) rman_get_virtual(parent); bt = rman_get_bustag(parent); bh = rman_get_bushandle(parent); /* Configure child resource with window-adjusted real bus values */ vaddr += offset; error = bus_space_subregion(bt, bh, offset, size, &child_bh); if (error) return (error); rman_set_virtual(r, (void *) vaddr); rman_set_bustag(r, bt); rman_set_bushandle(r, child_bh); return (0); } /** * Attempt activation of a fixed register window mapping for @p child. * * @param sc BHNDB device state. * @param region The static region definition capable of mapping @p r. * @param child A child requesting resource activation. * @param type Resource type. * @param rid Resource identifier. * @param r Resource to be activated. * * @retval 0 if @p r was activated successfully * @retval ENOENT if no fixed register window was found. * @retval non-zero if @p r could not be activated. */ static int bhndb_activate_static_region(struct bhndb_softc *sc, struct bhndb_region *region, device_t child, int type, int rid, struct resource *r) { struct resource *bridge_res; const struct bhndb_regwin *win; bhnd_size_t parent_offset; rman_res_t r_start, r_size; int error; win = region->static_regwin; KASSERT(win != NULL && BHNDB_REGWIN_T_IS_STATIC(win->win_type), ("can't activate non-static region")); r_start = rman_get_start(r); r_size = rman_get_size(r); /* Find the corresponding bridge resource */ bridge_res = bhndb_host_resource_for_regwin(sc->bus_res->res, win); if (bridge_res == NULL) return (ENXIO); /* Calculate subregion offset within the parent resource */ parent_offset = r_start - region->addr; parent_offset += win->win_offset; /* Configure resource with its real bus values. */ error = bhndb_init_child_resource(r, bridge_res, parent_offset, r_size); if (error) return (error); /* Mark active */ if ((error = rman_activate_resource(r))) return (error); return (0); } /** * Attempt to allocate/retain a dynamic register window for @p r, returning * the retained window. * * @param sc The bhndb driver state. * @param r The resource for which a window will be retained. */ static struct bhndb_dw_alloc * bhndb_retain_dynamic_window(struct bhndb_softc *sc, struct resource *r) { struct bhndb_dw_alloc *dwa; rman_res_t r_start, r_size; int error; BHNDB_LOCK_ASSERT(sc, MA_OWNED); r_start = rman_get_start(r); r_size = rman_get_size(r); /* Look for an existing dynamic window we can reference */ dwa = bhndb_dw_find_mapping(sc->bus_res, r_start, r_size); if (dwa != NULL) { if (bhndb_dw_retain(sc->bus_res, dwa, r) == 0) return (dwa); return (NULL); } /* Otherwise, try to reserve a free window */ dwa = bhndb_dw_next_free(sc->bus_res); if (dwa == NULL) { /* No free windows */ return (NULL); } /* Window must be large enough to map the entire resource */ if (dwa->win->win_size < rman_get_size(r)) return (NULL); /* Set the window target */ error = bhndb_dw_set_addr(sc->dev, sc->bus_res, dwa, rman_get_start(r), rman_get_size(r)); if (error) { device_printf(sc->dev, "dynamic window initialization " "for 0x%llx-0x%llx failed: %d\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1, error); return (NULL); } /* Add our reservation */ if (bhndb_dw_retain(sc->bus_res, dwa, r)) return (NULL); return (dwa); } /** * Activate a resource using any viable static or dynamic register window. * * @param sc The bhndb driver state. * @param child The child holding ownership of @p r. * @param type The type of the resource to be activated. * @param rid The resource ID of @p r. * @param r The resource to be activated * @param[out] indirect On error and if not NULL, will be set to 'true' if * the caller should instead use an indirect resource mapping. * * @retval 0 success * @retval non-zero activation failed. */ static int bhndb_try_activate_resource(struct bhndb_softc *sc, device_t child, int type, int rid, struct resource *r, bool *indirect) { struct bhndb_region *region; struct bhndb_dw_alloc *dwa; bhndb_priority_t dw_priority; rman_res_t r_start, r_size; rman_res_t parent_offset; int error; BHNDB_LOCK_ASSERT(sc, MA_NOTOWNED); if (indirect != NULL) *indirect = false; switch (type) { case SYS_RES_IRQ: /* IRQ resources are always directly mapped */ return (rman_activate_resource(r)); case SYS_RES_MEMORY: /* Handled below */ break; default: device_printf(sc->dev, "unsupported resource type %d\n", type); return (ENXIO); } /* Only MMIO resources can be mapped via register windows */ KASSERT(type == SYS_RES_MEMORY, ("invalid type: %d", type)); r_start = rman_get_start(r); r_size = rman_get_size(r); /* Activate native addrspace resources using the host address space */ if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_NATIVE) { struct resource *parent; /* Find the bridge resource referenced by the child */ parent = bhndb_host_resource_for_range(sc->bus_res->res, type, r_start, r_size); if (parent == NULL) { device_printf(sc->dev, "host resource not found " "for 0x%llx-0x%llx\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1); return (ENOENT); } /* Initialize child resource with the real bus values */ error = bhndb_init_child_resource(r, parent, r_start - rman_get_start(parent), r_size); if (error) return (error); /* Try to activate child resource */ return (rman_activate_resource(r)); } /* Default to low priority */ dw_priority = BHNDB_PRIORITY_LOW; /* Look for a bus region matching the resource's address range */ region = bhndb_find_resource_region(sc->bus_res, r_start, r_size); if (region != NULL) dw_priority = region->priority; /* Prefer static mappings over consuming a dynamic windows. */ if (region && region->static_regwin) { error = bhndb_activate_static_region(sc, region, child, type, rid, r); if (error) device_printf(sc->dev, "static window allocation " "for 0x%llx-0x%llx failed\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1); return (error); } /* A dynamic window will be required; is this resource high enough * priority to be reserved a dynamic window? */ if (dw_priority < sc->bus_res->min_prio) { if (indirect) *indirect = true; return (ENOMEM); } /* Find and retain a usable window */ BHNDB_LOCK(sc); { dwa = bhndb_retain_dynamic_window(sc, r); } BHNDB_UNLOCK(sc); if (dwa == NULL) { if (indirect) *indirect = true; return (ENOMEM); } /* Configure resource with its real bus values. */ parent_offset = dwa->win->win_offset; parent_offset += r_start - dwa->target; error = bhndb_init_child_resource(r, dwa->parent_res, parent_offset, dwa->win->win_size); if (error) goto failed; /* Mark active */ if ((error = rman_activate_resource(r))) goto failed; return (0); failed: /* Release our region allocation. */ BHNDB_LOCK(sc); bhndb_dw_release(sc->bus_res, dwa, r); BHNDB_UNLOCK(sc); return (error); } /** * Default bhndb(4) implementation of BUS_ACTIVATE_RESOURCE(). */ static int bhndb_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct bhndb_softc *sc = device_get_softc(dev); /* Delegate directly to our parent device's bus if the requested * resource type isn't handled locally. */ if (bhndb_get_rman(sc, child, type) == NULL) { return (BUS_ACTIVATE_RESOURCE(device_get_parent(sc->parent_dev), child, type, rid, r)); } return (bhndb_try_activate_resource(sc, child, type, rid, r, NULL)); } /** * Default bhndb(4) implementation of BUS_DEACTIVATE_RESOURCE(). */ static int bhndb_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct bhndb_dw_alloc *dwa; struct bhndb_softc *sc; struct rman *rm; int error; sc = device_get_softc(dev); /* Delegate directly to our parent device's bus if the requested * resource type isn't handled locally. */ rm = bhndb_get_rman(sc, child, type); if (rm == NULL) { return (BUS_DEACTIVATE_RESOURCE( device_get_parent(sc->parent_dev), child, type, rid, r)); } /* Mark inactive */ if ((error = rman_deactivate_resource(r))) return (error); switch (type) { case SYS_RES_IRQ: /* No bridge-level state to be freed */ return (0); case SYS_RES_MEMORY: /* Free any dynamic window allocation. */ if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) { BHNDB_LOCK(sc); dwa = bhndb_dw_find_resource(sc->bus_res, r); if (dwa != NULL) bhndb_dw_release(sc->bus_res, dwa, r); BHNDB_UNLOCK(sc); } return (0); default: device_printf(dev, "unsupported resource type %d\n", type); return (ENXIO); } } /** * Default bhndb(4) implementation of BUS_GET_RESOURCE_LIST(). */ static struct resource_list * bhndb_get_resource_list(device_t dev, device_t child) { struct bhndb_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } /** * Default bhndb(4) implementation of BHND_BUS_ACTIVATE_RESOURCE(). * * For BHNDB_ADDRSPACE_NATIVE children, all resources are activated as direct * resources via BUS_ACTIVATE_RESOURCE(). * * For BHNDB_ADDRSPACE_BRIDGED children, the resource priority is determined, * and if possible, the resource is activated as a direct resource. For example, * depending on resource priority and bridge resource availability, this * function will attempt to activate SYS_RES_MEMORY resources using either a * static register window, a dynamic register window, or it will configure @p r * as an indirect resource -- in that order. */ static int bhndb_activate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { struct bhndb_softc *sc; struct bhndb_region *region; bhndb_priority_t r_prio; rman_res_t r_start, r_size; int error; bool indirect; KASSERT(!r->direct, ("direct flag set on inactive resource")); KASSERT(!(rman_get_flags(r->res) & RF_ACTIVE), ("RF_ACTIVE set on inactive resource")); sc = device_get_softc(dev); /* Delegate directly to BUS_ACTIVATE_RESOURCE() if the requested * resource type isn't handled locally. */ if (bhndb_get_rman(sc, child, type) == NULL) { error = BUS_ACTIVATE_RESOURCE(dev, child, type, rid, r->res); if (error == 0) r->direct = true; return (error); } r_start = rman_get_start(r->res); r_size = rman_get_size(r->res); /* Determine the resource priority of bridged resources, and skip direct * allocation if the priority is too low. */ if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) { switch (type) { case SYS_RES_IRQ: /* IRQ resources are always direct */ break; case SYS_RES_MEMORY: region = bhndb_find_resource_region(sc->bus_res, r_start, r_size); if (region != NULL) r_prio = region->priority; else r_prio = BHNDB_PRIORITY_NONE; /* If less than the minimum dynamic window priority, * this resource should always be indirect. */ if (r_prio < sc->bus_res->min_prio) return (0); break; default: device_printf(dev, "unsupported resource type %d\n", type); return (ENXIO); } } /* Attempt direct activation */ error = bhndb_try_activate_resource(sc, child, type, rid, r->res, &indirect); if (!error) { r->direct = true; } else if (indirect) { /* The request was valid, but no viable register window is * available; indirection must be employed. */ error = 0; r->direct = false; } if (BHNDB_DEBUG(PRIO) && bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) { device_printf(child, "activated 0x%llx-0x%llx as %s " "resource\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1, r->direct ? "direct" : "indirect"); } return (error); } /** * Default bhndb(4) implementation of BHND_BUS_DEACTIVATE_RESOURCE(). */ static int bhndb_deactivate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { int error; /* Indirect resources don't require activation */ if (!r->direct) return (0); KASSERT(rman_get_flags(r->res) & RF_ACTIVE, ("RF_ACTIVE not set on direct resource")); /* Perform deactivation */ error = BUS_DEACTIVATE_RESOURCE(dev, child, type, rid, r->res); if (!error) r->direct = false; return (error); } /** * Find the best available bridge resource allocation record capable of handling * bus I/O requests of @p size at @p addr. * * In order of preference, this function will either: * * - Configure and return a free allocation record * - Return an existing allocation record mapping the requested space, or * - Steal, configure, and return an in-use allocation record. * * Will panic if a usable record cannot be found. * * @param sc Bridge driver state. * @param addr The I/O target address. * @param size The size of the I/O operation to be performed at @p addr. * @param[out] borrowed Set to true if the allocation record was borrowed to * fulfill this request; the borrowed record maps the target address range, * and must not be modified. * @param[out] stolen Set to true if the allocation record was stolen to fulfill * this request. If a stolen allocation record is returned, * bhndb_io_resource_restore() must be called upon completion of the bus I/O * request. * @param[out] restore If the allocation record was stolen, this will be set * to the target that must be restored. */ static struct bhndb_dw_alloc * bhndb_io_resource_get_window(struct bhndb_softc *sc, bus_addr_t addr, bus_size_t size, bool *borrowed, bool *stolen, bus_addr_t *restore) { struct bhndb_resources *br; struct bhndb_dw_alloc *dwa; struct bhndb_region *region; BHNDB_LOCK_ASSERT(sc, MA_OWNED); br = sc->bus_res; *borrowed = false; *stolen = false; /* Try to fetch a free window */ if ((dwa = bhndb_dw_next_free(br)) != NULL) return (dwa); /* Search for an existing dynamic mapping of this address range. * Static regions are not searched, as a statically mapped * region would never be allocated as an indirect resource. */ for (size_t i = 0; i < br->dwa_count; i++) { const struct bhndb_regwin *win; dwa = &br->dw_alloc[i]; win = dwa->win; KASSERT(win->win_type == BHNDB_REGWIN_T_DYN, ("invalid register window type")); /* Verify the range */ if (addr < dwa->target) continue; if (addr + size > dwa->target + win->win_size) continue; /* Found */ *borrowed = true; return (dwa); } /* Try to steal a window; this should only be required on very early * PCI_V0 (BCM4318, etc) Wi-Fi chipsets */ region = bhndb_find_resource_region(br, addr, size); if (region == NULL) return (NULL); if ((region->alloc_flags & BHNDB_ALLOC_FULFILL_ON_OVERCOMMIT) == 0) return (NULL); /* Steal a window. This acquires our backing spinlock, disabling * interrupts; the spinlock will be released by * bhndb_dw_return_stolen() */ if ((dwa = bhndb_dw_steal(br, restore)) != NULL) { *stolen = true; return (dwa); } panic("register windows exhausted attempting to map 0x%llx-0x%llx\n", (unsigned long long) addr, (unsigned long long) addr+size-1); } /** * Return a borrowed reference to a bridge resource allocation record capable * of handling bus I/O requests of @p size at @p addr. * * This will either return a reference to an existing allocation record mapping * the requested space, or will configure and return a free allocation record. * * Will panic if a usable record cannot be found. * * @param sc Bridge driver state. * @param addr The I/O target address. * @param size The size of the I/O operation to be performed at @p addr. * @param[out] offset The offset within the returned resource at which * to perform the I/O request. * @param[out] stolen Set to true if the allocation record was stolen to fulfill * this request. If a stolen allocation record is returned, * bhndb_io_resource_restore() must be called upon completion of the bus I/O * request. * @param[out] restore If the allocation record was stolen, this will be set * to the target that must be restored. */ static inline struct bhndb_dw_alloc * bhndb_io_resource(struct bhndb_softc *sc, bus_addr_t addr, bus_size_t size, bus_size_t *offset, bool *stolen, bus_addr_t *restore) { struct bhndb_dw_alloc *dwa; bool borrowed; int error; BHNDB_LOCK_ASSERT(sc, MA_OWNED); dwa = bhndb_io_resource_get_window(sc, addr, size, &borrowed, stolen, restore); /* Adjust the window if the I/O request won't fit in the current * target range. */ if (addr < dwa->target || addr > dwa->target + dwa->win->win_size || (dwa->target + dwa->win->win_size) - addr < size) { /* Cannot modify target of borrowed windows */ if (borrowed) { panic("borrowed register window does not map expected " "range 0x%llx-0x%llx\n", (unsigned long long) addr, (unsigned long long) addr+size-1); } error = bhndb_dw_set_addr(sc->dev, sc->bus_res, dwa, addr, size); if (error) { panic("failed to set register window target mapping " "0x%llx-0x%llx\n", (unsigned long long) addr, (unsigned long long) addr+size-1); } } /* Calculate the offset and return */ *offset = (addr - dwa->target) + dwa->win->win_offset; return (dwa); } /* * BHND_BUS_(READ|WRITE_* implementations */ /* bhndb_bus_(read|write) common implementation */ #define BHNDB_IO_COMMON_SETUP(_io_size) \ struct bhndb_softc *sc; \ struct bhndb_dw_alloc *dwa; \ struct resource *io_res; \ bus_size_t io_offset; \ bus_addr_t restore; \ bool stolen; \ \ sc = device_get_softc(dev); \ \ BHNDB_LOCK(sc); \ dwa = bhndb_io_resource(sc, rman_get_start(r->res) + \ offset, _io_size, &io_offset, &stolen, &restore); \ io_res = dwa->parent_res; \ \ KASSERT(!r->direct, \ ("bhnd_bus slow path used for direct resource")); \ \ KASSERT(rman_get_flags(io_res) & RF_ACTIVE, \ ("i/o resource is not active")); #define BHNDB_IO_COMMON_TEARDOWN() \ if (stolen) { \ bhndb_dw_return_stolen(sc->dev, sc->bus_res, \ dwa, restore); \ } \ BHNDB_UNLOCK(sc); /* Defines a bhndb_bus_read_* method implementation */ #define BHNDB_IO_READ(_type, _name) \ static _type \ bhndb_bus_read_ ## _name (device_t dev, device_t child, \ struct bhnd_resource *r, bus_size_t offset) \ { \ _type v; \ BHNDB_IO_COMMON_SETUP(sizeof(_type)); \ v = bus_read_ ## _name (io_res, io_offset); \ BHNDB_IO_COMMON_TEARDOWN(); \ \ return (v); \ } /* Defines a bhndb_bus_write_* method implementation */ #define BHNDB_IO_WRITE(_type, _name) \ static void \ bhndb_bus_write_ ## _name (device_t dev, device_t child, \ struct bhnd_resource *r, bus_size_t offset, _type value) \ { \ BHNDB_IO_COMMON_SETUP(sizeof(_type)); \ bus_write_ ## _name (io_res, io_offset, value); \ BHNDB_IO_COMMON_TEARDOWN(); \ } /* Defines a bhndb_bus_(read|write|set)_(multi|region)_* method */ #define BHNDB_IO_MISC(_type, _ptr, _op, _size) \ static void \ bhndb_bus_ ## _op ## _ ## _size (device_t dev, \ device_t child, struct bhnd_resource *r, bus_size_t offset, \ _type _ptr datap, bus_size_t count) \ { \ BHNDB_IO_COMMON_SETUP(sizeof(_type) * count); \ bus_ ## _op ## _ ## _size (io_res, io_offset, \ datap, count); \ BHNDB_IO_COMMON_TEARDOWN(); \ } /* Defines a complete set of read/write methods */ #define BHNDB_IO_METHODS(_type, _size) \ BHNDB_IO_READ(_type, _size) \ BHNDB_IO_WRITE(_type, _size) \ \ BHNDB_IO_READ(_type, stream_ ## _size) \ BHNDB_IO_WRITE(_type, stream_ ## _size) \ \ BHNDB_IO_MISC(_type, *, read_multi, _size) \ BHNDB_IO_MISC(_type, *, write_multi, _size) \ \ BHNDB_IO_MISC(_type, *, read_multi_stream, _size) \ BHNDB_IO_MISC(_type, *, write_multi_stream, _size) \ \ BHNDB_IO_MISC(_type, , set_multi, _size) \ BHNDB_IO_MISC(_type, , set_region, _size) \ BHNDB_IO_MISC(_type, *, read_region, _size) \ BHNDB_IO_MISC(_type, *, write_region, _size) \ \ BHNDB_IO_MISC(_type, *, read_region_stream, _size) \ BHNDB_IO_MISC(_type, *, write_region_stream, _size) BHNDB_IO_METHODS(uint8_t, 1); BHNDB_IO_METHODS(uint16_t, 2); BHNDB_IO_METHODS(uint32_t, 4); /** * Default bhndb(4) implementation of BHND_BUS_BARRIER(). */ static void bhndb_bus_barrier(device_t dev, device_t child, struct bhnd_resource *r, bus_size_t offset, bus_size_t length, int flags) { BHNDB_IO_COMMON_SETUP(length); bus_barrier(io_res, io_offset + offset, length, flags); BHNDB_IO_COMMON_TEARDOWN(); } /** * Default bhndb(4) implementation of BHND_MAP_INTR(). */ static int bhndb_bhnd_map_intr(device_t dev, device_t child, u_int intr, rman_res_t *irq) { u_int ivec; int error; /* Is the intr valid? */ if (intr >= bhnd_get_intr_count(child)) return (EINVAL); /* Fetch the interrupt vector */ if ((error = bhnd_get_intr_ivec(child, intr, &ivec))) return (error); /* Map directly to the actual backplane interrupt vector */ *irq = ivec; return (0); } /** * Default bhndb(4) implementation of BHND_UNMAP_INTR(). */ static void bhndb_bhnd_unmap_intr(device_t dev, device_t child, rman_res_t irq) { /* No state to clean up */ } /** * Default bhndb(4) implementation of BUS_SETUP_INTR(). */ static int bhndb_setup_intr(device_t dev, device_t child, struct resource *r, int flags, driver_filter_t filter, driver_intr_t handler, void *arg, void **cookiep) { struct bhndb_softc *sc; struct bhndb_intr_isrc *isrc; struct bhndb_intr_handler *ih; int error; sc = device_get_softc(dev); /* Fetch the isrc */ if ((error = BHNDB_MAP_INTR_ISRC(dev, r, &isrc))) { device_printf(dev, "failed to fetch isrc: %d\n", error); return (error); } /* Allocate new ihandler entry */ ih = bhndb_alloc_intr_handler(child, r, isrc); if (ih == NULL) return (ENOMEM); /* Perform actual interrupt setup via the host isrc */ error = bus_setup_intr(isrc->is_owner, isrc->is_res, flags, filter, handler, arg, &ih->ih_cookiep); if (error) { bhndb_free_intr_handler(ih); return (error); } /* Add to our interrupt handler list */ BHNDB_LOCK(sc); bhndb_register_intr_handler(sc->bus_res, ih); BHNDB_UNLOCK(sc); /* Provide the interrupt handler entry as our cookiep value */ *cookiep = ih; return (0); } /** * Default bhndb(4) implementation of BUS_TEARDOWN_INTR(). */ static int bhndb_teardown_intr(device_t dev, device_t child, struct resource *r, void *cookiep) { struct bhndb_softc *sc; struct bhndb_intr_handler *ih; struct bhndb_intr_isrc *isrc; int error; sc = device_get_softc(dev); /* Locate and claim ownership of the interrupt handler entry */ BHNDB_LOCK(sc); ih = bhndb_find_intr_handler(sc->bus_res, cookiep); if (ih == NULL) { panic("%s requested teardown of invalid cookiep %p", device_get_nameunit(child), cookiep); } bhndb_deregister_intr_handler(sc->bus_res, ih); BHNDB_UNLOCK(sc); /* Perform actual interrupt teardown via the host isrc */ isrc = ih->ih_isrc; error = bus_teardown_intr(isrc->is_owner, isrc->is_res, ih->ih_cookiep); if (error) { /* If teardown fails, we need to reinsert the handler entry * to allow later teardown */ BHNDB_LOCK(sc); bhndb_register_intr_handler(sc->bus_res, ih); BHNDB_UNLOCK(sc); return (error); } /* Free the entry */ bhndb_free_intr_handler(ih); return (0); } /** * Default bhndb(4) implementation of BUS_BIND_INTR(). */ static int bhndb_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { struct bhndb_softc *sc; struct bhndb_intr_handler *ih; struct bhndb_intr_isrc *isrc; sc = device_get_softc(dev); isrc = NULL; /* Fetch the isrc corresponding to the child IRQ resource */ BHNDB_LOCK(sc); STAILQ_FOREACH(ih, &sc->bus_res->bus_intrs, ih_link) { if (ih->ih_res == irq) { isrc = ih->ih_isrc; break; } } BHNDB_UNLOCK(sc); if (isrc == NULL) { panic("%s requested bind of invalid irq %#jx-%#jx", device_get_nameunit(child), rman_get_start(irq), rman_get_end(irq)); } /* Perform actual bind via the host isrc */ return (bus_bind_intr(isrc->is_owner, isrc->is_res, cpu)); } /** * Default bhndb(4) implementation of BUS_DESCRIBE_INTR(). */ static int bhndb_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { struct bhndb_softc *sc; struct bhndb_intr_handler *ih; struct bhndb_intr_isrc *isrc; sc = device_get_softc(dev); /* Locate the interrupt handler entry; the caller owns the handler * reference, and thus our entry is guaranteed to remain valid after * we drop out lock below. */ BHNDB_LOCK(sc); ih = bhndb_find_intr_handler(sc->bus_res, cookie); if (ih == NULL) { panic("%s requested invalid cookiep %p", device_get_nameunit(child), cookie); } isrc = ih->ih_isrc; BHNDB_UNLOCK(sc); /* Perform the actual request via the host isrc */ return (BUS_DESCRIBE_INTR(device_get_parent(isrc->is_owner), isrc->is_owner, isrc->is_res, ih->ih_cookiep, descr)); } /** * Default bhndb(4) implementation of BUS_CONFIG_INTR(). */ static int bhndb_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { /* Unsupported */ return (ENXIO); } /** * Default bhndb(4) implementation of BUS_REMAP_INTR(). */ static int bhndb_remap_intr(device_t dev, device_t child, u_int irq) { /* Unsupported */ return (ENXIO); } /** * Default bhndb(4) implementation of BHND_BUS_GET_DMA_TRANSLATION(). */ static inline int bhndb_get_dma_translation(device_t dev, device_t child, u_int width, uint32_t flags, bus_dma_tag_t *dmat, struct bhnd_dma_translation *translation) { struct bhndb_softc *sc; const struct bhndb_hwcfg *hwcfg; const struct bhnd_dma_translation *match; bus_dma_tag_t match_dmat; bhnd_addr_t addr_mask, match_addr_mask; sc = device_get_softc(dev); hwcfg = sc->bus_res->cfg; /* Is DMA supported? */ if (sc->bus_res->res->dma_tags == NULL) return (ENODEV); /* Is the requested width supported? */ if (width > BHND_DMA_ADDR_32BIT) { /* Backplane must support 64-bit addressing */ if (!(sc->chipid.chip_caps & BHND_CAP_BP64)) width = BHND_DMA_ADDR_32BIT; } /* Find the best matching descriptor for the requested width */ addr_mask = BHND_DMA_ADDR_BITMASK(width); match = NULL; match_addr_mask = 0x0; match_dmat = NULL; for (size_t i = 0; i < sc->bus_res->res->num_dma_tags; i++) { const struct bhnd_dma_translation *dwin; bhnd_addr_t masked; dwin = &hwcfg->dma_translations[i]; /* The base address must be device addressable */ if ((dwin->base_addr & addr_mask) != dwin->base_addr) continue; /* The flags must match */ if ((dwin->flags & flags) != flags) continue; /* The window must cover at least part of our addressable * range */ masked = (dwin->addr_mask | dwin->addrext_mask) & addr_mask; if (masked == 0) continue; /* Is this a better match? */ if (match == NULL || masked > match_addr_mask) { match = dwin; match_addr_mask = masked; match_dmat = sc->bus_res->res->dma_tags[i]; } } if (match == NULL || match_addr_mask == 0) return (ENOENT); if (dmat != NULL) *dmat = match_dmat; if (translation != NULL) *translation = *match; return (0); } /** * Default bhndb(4) implementation of BUS_GET_DMA_TAG(). */ static bus_dma_tag_t bhndb_get_dma_tag(device_t dev, device_t child) { struct bhndb_softc *sc = device_get_softc(dev); /* * A bridge may have multiple DMA translation descriptors, each with * their own incompatible restrictions; drivers should in general call * BHND_BUS_GET_DMA_TRANSLATION() to fetch both the best available DMA * translation, and its corresponding DMA tag. * * Child drivers that do not use BHND_BUS_GET_DMA_TRANSLATION() are * responsible for creating their own restricted DMA tag; since we * cannot do this for them in BUS_GET_DMA_TAG(), we simply return the * bridge parent's DMA tag directly; */ return (bus_get_dma_tag(sc->parent_dev)); } static device_method_t bhndb_methods[] = { /* Device interface */ \ DEVMETHOD(device_probe, bhndb_generic_probe), DEVMETHOD(device_detach, bhndb_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bhndb_generic_suspend), DEVMETHOD(device_resume, bhndb_generic_resume), /* Bus interface */ DEVMETHOD(bus_probe_nomatch, bhndb_probe_nomatch), DEVMETHOD(bus_print_child, bhndb_print_child), DEVMETHOD(bus_child_location, bhndb_child_location), DEVMETHOD(bus_add_child, bhndb_add_child), DEVMETHOD(bus_child_deleted, bhndb_child_deleted), DEVMETHOD(bus_alloc_resource, bhndb_alloc_resource), DEVMETHOD(bus_release_resource, bhndb_release_resource), DEVMETHOD(bus_activate_resource, bhndb_activate_resource), DEVMETHOD(bus_deactivate_resource, bhndb_deactivate_resource), DEVMETHOD(bus_setup_intr, bhndb_setup_intr), DEVMETHOD(bus_teardown_intr, bhndb_teardown_intr), DEVMETHOD(bus_config_intr, bhndb_config_intr), DEVMETHOD(bus_bind_intr, bhndb_bind_intr), DEVMETHOD(bus_describe_intr, bhndb_describe_intr), DEVMETHOD(bus_remap_intr, bhndb_remap_intr), DEVMETHOD(bus_get_dma_tag, bhndb_get_dma_tag), DEVMETHOD(bus_adjust_resource, bhndb_adjust_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_get_resource_list, bhndb_get_resource_list), DEVMETHOD(bus_read_ivar, bhndb_read_ivar), DEVMETHOD(bus_write_ivar, bhndb_write_ivar), /* BHNDB interface */ DEVMETHOD(bhndb_get_chipid, bhndb_get_chipid), DEVMETHOD(bhndb_is_core_disabled, bhndb_is_core_disabled), DEVMETHOD(bhndb_get_hostb_core, bhndb_get_hostb_core), DEVMETHOD(bhndb_suspend_resource, bhndb_suspend_resource), DEVMETHOD(bhndb_resume_resource, bhndb_resume_resource), /* BHND interface */ DEVMETHOD(bhnd_bus_get_chipid, bhndb_get_chipid), DEVMETHOD(bhnd_bus_activate_resource, bhndb_activate_bhnd_resource), DEVMETHOD(bhnd_bus_deactivate_resource, bhndb_deactivate_bhnd_resource), DEVMETHOD(bhnd_bus_get_nvram_var, bhnd_bus_generic_get_nvram_var), DEVMETHOD(bhnd_bus_map_intr, bhndb_bhnd_map_intr), DEVMETHOD(bhnd_bus_unmap_intr, bhndb_bhnd_unmap_intr), DEVMETHOD(bhnd_bus_get_dma_translation, bhndb_get_dma_translation), DEVMETHOD(bhnd_bus_get_service_registry,bhndb_get_service_registry), DEVMETHOD(bhnd_bus_register_provider, bhnd_bus_generic_sr_register_provider), DEVMETHOD(bhnd_bus_deregister_provider, bhnd_bus_generic_sr_deregister_provider), DEVMETHOD(bhnd_bus_retain_provider, bhnd_bus_generic_sr_retain_provider), DEVMETHOD(bhnd_bus_release_provider, bhnd_bus_generic_sr_release_provider), DEVMETHOD(bhnd_bus_read_1, bhndb_bus_read_1), DEVMETHOD(bhnd_bus_read_2, bhndb_bus_read_2), DEVMETHOD(bhnd_bus_read_4, bhndb_bus_read_4), DEVMETHOD(bhnd_bus_write_1, bhndb_bus_write_1), DEVMETHOD(bhnd_bus_write_2, bhndb_bus_write_2), DEVMETHOD(bhnd_bus_write_4, bhndb_bus_write_4), DEVMETHOD(bhnd_bus_read_stream_1, bhndb_bus_read_stream_1), DEVMETHOD(bhnd_bus_read_stream_2, bhndb_bus_read_stream_2), DEVMETHOD(bhnd_bus_read_stream_4, bhndb_bus_read_stream_4), DEVMETHOD(bhnd_bus_write_stream_1, bhndb_bus_write_stream_1), DEVMETHOD(bhnd_bus_write_stream_2, bhndb_bus_write_stream_2), DEVMETHOD(bhnd_bus_write_stream_4, bhndb_bus_write_stream_4), DEVMETHOD(bhnd_bus_read_multi_1, bhndb_bus_read_multi_1), DEVMETHOD(bhnd_bus_read_multi_2, bhndb_bus_read_multi_2), DEVMETHOD(bhnd_bus_read_multi_4, bhndb_bus_read_multi_4), DEVMETHOD(bhnd_bus_write_multi_1, bhndb_bus_write_multi_1), DEVMETHOD(bhnd_bus_write_multi_2, bhndb_bus_write_multi_2), DEVMETHOD(bhnd_bus_write_multi_4, bhndb_bus_write_multi_4), DEVMETHOD(bhnd_bus_read_multi_stream_1, bhndb_bus_read_multi_stream_1), DEVMETHOD(bhnd_bus_read_multi_stream_2, bhndb_bus_read_multi_stream_2), DEVMETHOD(bhnd_bus_read_multi_stream_4, bhndb_bus_read_multi_stream_4), DEVMETHOD(bhnd_bus_write_multi_stream_1,bhndb_bus_write_multi_stream_1), DEVMETHOD(bhnd_bus_write_multi_stream_2,bhndb_bus_write_multi_stream_2), DEVMETHOD(bhnd_bus_write_multi_stream_4,bhndb_bus_write_multi_stream_4), DEVMETHOD(bhnd_bus_set_multi_1, bhndb_bus_set_multi_1), DEVMETHOD(bhnd_bus_set_multi_2, bhndb_bus_set_multi_2), DEVMETHOD(bhnd_bus_set_multi_4, bhndb_bus_set_multi_4), DEVMETHOD(bhnd_bus_set_region_1, bhndb_bus_set_region_1), DEVMETHOD(bhnd_bus_set_region_2, bhndb_bus_set_region_2), DEVMETHOD(bhnd_bus_set_region_4, bhndb_bus_set_region_4), DEVMETHOD(bhnd_bus_read_region_1, bhndb_bus_read_region_1), DEVMETHOD(bhnd_bus_read_region_2, bhndb_bus_read_region_2), DEVMETHOD(bhnd_bus_read_region_4, bhndb_bus_read_region_4), DEVMETHOD(bhnd_bus_write_region_1, bhndb_bus_write_region_1), DEVMETHOD(bhnd_bus_write_region_2, bhndb_bus_write_region_2), DEVMETHOD(bhnd_bus_write_region_4, bhndb_bus_write_region_4), DEVMETHOD(bhnd_bus_read_region_stream_1,bhndb_bus_read_region_stream_1), DEVMETHOD(bhnd_bus_read_region_stream_2,bhndb_bus_read_region_stream_2), DEVMETHOD(bhnd_bus_read_region_stream_4,bhndb_bus_read_region_stream_4), DEVMETHOD(bhnd_bus_write_region_stream_1,bhndb_bus_write_region_stream_1), DEVMETHOD(bhnd_bus_write_region_stream_2,bhndb_bus_write_region_stream_2), DEVMETHOD(bhnd_bus_write_region_stream_4,bhndb_bus_write_region_stream_4), DEVMETHOD(bhnd_bus_barrier, bhndb_bus_barrier), DEVMETHOD_END }; DEFINE_CLASS_0(bhndb, bhndb_driver, bhndb_methods, sizeof(struct bhndb_softc)); MODULE_VERSION(bhndb, 1); MODULE_DEPEND(bhndb, bhnd, 1, 1, 1); diff --git a/sys/dev/bhnd/cores/chipc/chipc.c b/sys/dev/bhnd/cores/chipc/chipc.c index f99f42fb3ac3..2d1440e5c987 100644 --- a/sys/dev/bhnd/cores/chipc/chipc.c +++ b/sys/dev/bhnd/cores/chipc/chipc.c @@ -1,1399 +1,1398 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2016 Michael Zhilin * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include /* * Broadcom ChipCommon driver. * * With the exception of some very early chipsets, the ChipCommon core * has been included in all HND SoCs and chipsets based on the siba(4) * and bcma(4) interconnects, providing a common interface to chipset * identification, bus enumeration, UARTs, clocks, watchdog interrupts, * GPIO, flash, etc. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "chipcreg.h" #include "chipcvar.h" #include "chipc_private.h" static struct bhnd_device_quirk chipc_quirks[]; /* Supported device identifiers */ static const struct bhnd_device chipc_devices[] = { BHND_DEVICE(BCM, CC, NULL, chipc_quirks), BHND_DEVICE(BCM, 4706_CC, NULL, chipc_quirks), BHND_DEVICE_END }; /* Device quirks table */ static struct bhnd_device_quirk chipc_quirks[] = { /* HND OTP controller revisions */ BHND_CORE_QUIRK (HWREV_EQ (12), CHIPC_QUIRK_OTP_HND), /* (?) */ BHND_CORE_QUIRK (HWREV_EQ (17), CHIPC_QUIRK_OTP_HND), /* BCM4311 */ BHND_CORE_QUIRK (HWREV_EQ (22), CHIPC_QUIRK_OTP_HND), /* BCM4312 */ /* IPX OTP controller revisions */ BHND_CORE_QUIRK (HWREV_EQ (21), CHIPC_QUIRK_OTP_IPX), BHND_CORE_QUIRK (HWREV_GTE(23), CHIPC_QUIRK_OTP_IPX), BHND_CORE_QUIRK (HWREV_GTE(32), CHIPC_QUIRK_SUPPORTS_SPROM), BHND_CORE_QUIRK (HWREV_GTE(35), CHIPC_QUIRK_SUPPORTS_CAP_EXT), BHND_CORE_QUIRK (HWREV_GTE(49), CHIPC_QUIRK_IPX_OTPL_SIZE), /* 4706 variant quirks */ BHND_CORE_QUIRK (HWREV_EQ (38), CHIPC_QUIRK_4706_NFLASH), /* BCM5357? */ BHND_CHIP_QUIRK (4706, HWREV_ANY, CHIPC_QUIRK_4706_NFLASH), /* 4331 quirks*/ BHND_CHIP_QUIRK (4331, HWREV_ANY, CHIPC_QUIRK_4331_EXTPA_MUX_SPROM), BHND_PKG_QUIRK (4331, TN, CHIPC_QUIRK_4331_GPIO2_5_MUX_SPROM), BHND_PKG_QUIRK (4331, TNA0, CHIPC_QUIRK_4331_GPIO2_5_MUX_SPROM), BHND_PKG_QUIRK (4331, TT, CHIPC_QUIRK_4331_EXTPA2_MUX_SPROM), /* 4360 quirks */ BHND_CHIP_QUIRK (4352, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_CHIP_QUIRK (43460, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_CHIP_QUIRK (43462, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_CHIP_QUIRK (43602, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_DEVICE_QUIRK_END }; static int chipc_add_children(struct chipc_softc *sc); static bhnd_nvram_src chipc_find_nvram_src(struct chipc_softc *sc, struct chipc_caps *caps); static int chipc_read_caps(struct chipc_softc *sc, struct chipc_caps *caps); static bool chipc_should_enable_muxed_sprom( struct chipc_softc *sc); static int chipc_enable_otp_power(struct chipc_softc *sc); static void chipc_disable_otp_power(struct chipc_softc *sc); static int chipc_enable_sprom_pins(struct chipc_softc *sc); static void chipc_disable_sprom_pins(struct chipc_softc *sc); static int chipc_try_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r, bool req_direct); static int chipc_init_rman(struct chipc_softc *sc); static void chipc_free_rman(struct chipc_softc *sc); static struct rman *chipc_get_rman(device_t dev, int type, u_int flags); /* quirk and capability flag convenience macros */ #define CHIPC_QUIRK(_sc, _name) \ ((_sc)->quirks & CHIPC_QUIRK_ ## _name) #define CHIPC_CAP(_sc, _name) \ ((_sc)->caps._name) #define CHIPC_ASSERT_QUIRK(_sc, name) \ KASSERT(CHIPC_QUIRK((_sc), name), ("quirk " __STRING(_name) " not set")) #define CHIPC_ASSERT_CAP(_sc, name) \ KASSERT(CHIPC_CAP((_sc), name), ("capability " __STRING(_name) " not set")) static int chipc_probe(device_t dev) { const struct bhnd_device *id; id = bhnd_device_lookup(dev, chipc_devices, sizeof(chipc_devices[0])); if (id == NULL) return (ENXIO); bhnd_set_default_core_desc(dev); return (BUS_PROBE_DEFAULT); } static int chipc_attach(device_t dev) { struct chipc_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; sc->quirks = bhnd_device_quirks(dev, chipc_devices, sizeof(chipc_devices[0])); sc->sprom_refcnt = 0; CHIPC_LOCK_INIT(sc); STAILQ_INIT(&sc->mem_regions); /* Set up resource management */ if ((error = chipc_init_rman(sc))) { device_printf(sc->dev, "failed to initialize chipc resource state: %d\n", error); goto failed; } /* Allocate the region containing the chipc register block */ if ((sc->core_region = chipc_find_region_by_rid(sc, 0)) == NULL) { error = ENXIO; goto failed; } error = chipc_retain_region(sc, sc->core_region, RF_ALLOCATED|RF_ACTIVE); if (error) { sc->core_region = NULL; goto failed; } /* Save a direct reference to our chipc registers */ sc->core = sc->core_region->cr_res; /* Fetch and parse capability register(s) */ if ((error = chipc_read_caps(sc, &sc->caps))) goto failed; if (bootverbose) chipc_print_caps(sc->dev, &sc->caps); /* Attach all supported child devices */ if ((error = chipc_add_children(sc))) goto failed; /* * Register ourselves with the bus; we're fully initialized and can * response to ChipCommin API requests. * * Since our children may need access to ChipCommon, this must be done * before attaching our children below (via bus_generic_attach). */ if ((error = bhnd_register_provider(dev, BHND_SERVICE_CHIPC))) goto failed; if ((error = bus_generic_attach(dev))) goto failed; return (0); failed: device_delete_children(sc->dev); if (sc->core_region != NULL) { chipc_release_region(sc, sc->core_region, RF_ALLOCATED|RF_ACTIVE); } chipc_free_rman(sc); CHIPC_LOCK_DESTROY(sc); return (error); } static int chipc_detach(device_t dev) { struct chipc_softc *sc; int error; sc = device_get_softc(dev); if ((error = bus_generic_detach(dev))) return (error); if ((error = device_delete_children(dev))) return (error); if ((error = bhnd_deregister_provider(dev, BHND_SERVICE_ANY))) return (error); chipc_release_region(sc, sc->core_region, RF_ALLOCATED|RF_ACTIVE); chipc_free_rman(sc); CHIPC_LOCK_DESTROY(sc); return (0); } static int chipc_add_children(struct chipc_softc *sc) { device_t child; const char *flash_bus; int error; /* SPROM/OTP */ if (sc->caps.nvram_src == BHND_NVRAM_SRC_SPROM || sc->caps.nvram_src == BHND_NVRAM_SRC_OTP) { child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_nvram", -1); if (child == NULL) { device_printf(sc->dev, "failed to add nvram device\n"); return (ENXIO); } /* Both OTP and external SPROM are mapped at CHIPC_SPROM_OTP */ error = chipc_set_mem_resource(sc, child, 0, CHIPC_SPROM_OTP, CHIPC_SPROM_OTP_SIZE, 0, 0); if (error) { device_printf(sc->dev, "failed to set OTP memory " "resource: %d\n", error); return (error); } } /* * PMU/PWR_CTRL * * On AOB ("Always on Bus") devices, the PMU core (if it exists) is * attached directly to the bhnd(4) bus -- not chipc. */ if (sc->caps.pmu && !sc->caps.aob) { child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_pmu", -1); if (child == NULL) { device_printf(sc->dev, "failed to add pmu\n"); return (ENXIO); } } else if (sc->caps.pwr_ctrl) { child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_pwrctl", -1); if (child == NULL) { device_printf(sc->dev, "failed to add pwrctl\n"); return (ENXIO); } } /* GPIO */ child = BUS_ADD_CHILD(sc->dev, 0, "gpio", -1); if (child == NULL) { device_printf(sc->dev, "failed to add gpio\n"); return (ENXIO); } error = chipc_set_mem_resource(sc, child, 0, 0, RM_MAX_END, 0, 0); if (error) { device_printf(sc->dev, "failed to set gpio memory resource: " "%d\n", error); return (error); } /* All remaining devices are SoC-only */ if (bhnd_get_attach_type(sc->dev) != BHND_ATTACH_NATIVE) return (0); /* UARTs */ for (u_int i = 0; i < min(sc->caps.num_uarts, CHIPC_UART_MAX); i++) { int irq_rid, mem_rid; irq_rid = 0; mem_rid = 0; child = BUS_ADD_CHILD(sc->dev, 0, "uart", -1); if (child == NULL) { device_printf(sc->dev, "failed to add uart%u\n", i); return (ENXIO); } /* Shared IRQ */ error = chipc_set_irq_resource(sc, child, irq_rid, 0); if (error) { device_printf(sc->dev, "failed to set uart%u irq %u\n", i, 0); return (error); } /* UART registers are mapped sequentially */ error = chipc_set_mem_resource(sc, child, mem_rid, CHIPC_UART(i), CHIPC_UART_SIZE, 0, 0); if (error) { device_printf(sc->dev, "failed to set uart%u memory " "resource: %d\n", i, error); return (error); } } /* Flash */ flash_bus = chipc_flash_bus_name(sc->caps.flash_type); if (flash_bus != NULL) { int rid; child = BUS_ADD_CHILD(sc->dev, 0, flash_bus, -1); if (child == NULL) { device_printf(sc->dev, "failed to add %s device\n", flash_bus); return (ENXIO); } /* flash memory mapping */ rid = 0; error = chipc_set_mem_resource(sc, child, rid, 0, RM_MAX_END, 1, 1); if (error) { device_printf(sc->dev, "failed to set flash memory " "resource %d: %d\n", rid, error); return (error); } /* flashctrl registers */ rid++; error = chipc_set_mem_resource(sc, child, rid, CHIPC_SFLASH_BASE, CHIPC_SFLASH_SIZE, 0, 0); if (error) { device_printf(sc->dev, "failed to set flash memory " "resource %d: %d\n", rid, error); return (error); } } return (0); } /** * Determine the NVRAM data source for this device. * * The SPROM, OTP, and flash capability flags must be fully populated in * @p caps. * * @param sc chipc driver state. * @param caps capability flags to be used to derive NVRAM configuration. */ static bhnd_nvram_src chipc_find_nvram_src(struct chipc_softc *sc, struct chipc_caps *caps) { uint32_t otp_st, srom_ctrl; /* * We check for hardware presence in order of precedence. For example, * SPROM is always used in preference to internal OTP if found. */ if (CHIPC_QUIRK(sc, SUPPORTS_SPROM) && caps->sprom) { srom_ctrl = bhnd_bus_read_4(sc->core, CHIPC_SPROM_CTRL); if (srom_ctrl & CHIPC_SRC_PRESENT) return (BHND_NVRAM_SRC_SPROM); } /* Check for programmed OTP H/W subregion (contains SROM data) */ if (CHIPC_QUIRK(sc, SUPPORTS_OTP) && caps->otp_size > 0) { /* TODO: need access to HND-OTP device */ if (!CHIPC_QUIRK(sc, OTP_HND)) { device_printf(sc->dev, "NVRAM unavailable: unsupported OTP controller.\n"); return (BHND_NVRAM_SRC_UNKNOWN); } otp_st = bhnd_bus_read_4(sc->core, CHIPC_OTPST); if (otp_st & CHIPC_OTPS_GUP_HW) return (BHND_NVRAM_SRC_OTP); } /* Check for flash */ if (caps->flash_type != CHIPC_FLASH_NONE) return (BHND_NVRAM_SRC_FLASH); /* No NVRAM hardware capability declared */ return (BHND_NVRAM_SRC_UNKNOWN); } /* Read and parse chipc capabilities */ static int chipc_read_caps(struct chipc_softc *sc, struct chipc_caps *caps) { uint32_t cap_reg; uint32_t cap_ext_reg; uint32_t regval; /* Fetch cap registers */ cap_reg = bhnd_bus_read_4(sc->core, CHIPC_CAPABILITIES); cap_ext_reg = 0; if (CHIPC_QUIRK(sc, SUPPORTS_CAP_EXT)) cap_ext_reg = bhnd_bus_read_4(sc->core, CHIPC_CAPABILITIES_EXT); /* Extract values */ caps->num_uarts = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_NUM_UART); caps->mipseb = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_MIPSEB); caps->uart_gpio = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_UARTGPIO); caps->uart_clock = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_UCLKSEL); caps->extbus_type = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_EXTBUS); caps->pwr_ctrl = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_PWR_CTL); caps->jtag_master = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_JTAGP); caps->pll_type = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_PLL); caps->backplane_64 = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_BKPLN64); caps->boot_rom = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_ROM); caps->pmu = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_PMU); caps->eci = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_ECI); caps->sprom = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_SPROM); caps->otp_size = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_OTP_SIZE); caps->seci = CHIPC_GET_FLAG(cap_ext_reg, CHIPC_CAP2_SECI); caps->gsio = CHIPC_GET_FLAG(cap_ext_reg, CHIPC_CAP2_GSIO); caps->aob = CHIPC_GET_FLAG(cap_ext_reg, CHIPC_CAP2_AOB); /* Fetch OTP size for later IPX controller revisions */ if (CHIPC_QUIRK(sc, IPX_OTPL_SIZE)) { regval = bhnd_bus_read_4(sc->core, CHIPC_OTPLAYOUT); caps->otp_size = CHIPC_GET_BITS(regval, CHIPC_OTPL_SIZE); } /* Determine flash type and parameters */ caps->cfi_width = 0; switch (CHIPC_GET_BITS(cap_reg, CHIPC_CAP_FLASH)) { case CHIPC_CAP_SFLASH_ST: caps->flash_type = CHIPC_SFLASH_ST; break; case CHIPC_CAP_SFLASH_AT: caps->flash_type = CHIPC_SFLASH_AT; break; case CHIPC_CAP_NFLASH: /* unimplemented */ caps->flash_type = CHIPC_NFLASH; break; case CHIPC_CAP_PFLASH: caps->flash_type = CHIPC_PFLASH_CFI; /* determine cfi width */ regval = bhnd_bus_read_4(sc->core, CHIPC_FLASH_CFG); if (CHIPC_GET_FLAG(regval, CHIPC_FLASH_CFG_DS)) caps->cfi_width = 2; else caps->cfi_width = 1; break; case CHIPC_CAP_FLASH_NONE: caps->flash_type = CHIPC_FLASH_NONE; break; } /* Handle 4706_NFLASH fallback */ if (CHIPC_QUIRK(sc, 4706_NFLASH) && CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_4706_NFLASH)) { caps->flash_type = CHIPC_NFLASH_4706; } /* Determine NVRAM source. Must occur after the SPROM/OTP/flash * capability flags have been populated. */ caps->nvram_src = chipc_find_nvram_src(sc, caps); /* Determine the SPROM offset within OTP (if any). SPROM-formatted * data is placed within the OTP general use region. */ caps->sprom_offset = 0; if (caps->nvram_src == BHND_NVRAM_SRC_OTP) { CHIPC_ASSERT_QUIRK(sc, OTP_IPX); /* Bit offset to GUP HW subregion containing SPROM data */ regval = bhnd_bus_read_4(sc->core, CHIPC_OTPLAYOUT); caps->sprom_offset = CHIPC_GET_BITS(regval, CHIPC_OTPL_GUP); /* Convert to bytes */ caps->sprom_offset /= 8; } return (0); } static int chipc_suspend(device_t dev) { return (bus_generic_suspend(dev)); } static int chipc_resume(device_t dev) { return (bus_generic_resume(dev)); } static void chipc_probe_nomatch(device_t dev, device_t child) { struct resource_list *rl; const char *name; name = device_get_name(child); if (name == NULL) name = "unknown device"; device_printf(dev, "<%s> at", name); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) { resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); } printf(" (no driver attached)\n"); } static int chipc_print_child(device_t dev, device_t child) { struct resource_list *rl; int retval = 0; retval += bus_print_child_header(dev, child); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) { retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); } retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } static device_t chipc_add_child(device_t dev, u_int order, const char *name, int unit) { struct chipc_devinfo *dinfo; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (NULL); dinfo = malloc(sizeof(struct chipc_devinfo), M_BHND, M_NOWAIT); if (dinfo == NULL) { device_delete_child(dev, child); return (NULL); } resource_list_init(&dinfo->resources); dinfo->irq_mapped = false; device_set_ivars(child, dinfo); return (child); } static void chipc_child_deleted(device_t dev, device_t child) { struct chipc_devinfo *dinfo = device_get_ivars(child); if (dinfo != NULL) { /* Free the child's resource list */ resource_list_free(&dinfo->resources); /* Unmap the child's IRQ */ if (dinfo->irq_mapped) { bhnd_unmap_intr(dev, dinfo->irq); dinfo->irq_mapped = false; } free(dinfo, M_BHND); } device_set_ivars(child, NULL); } static struct resource_list * chipc_get_resource_list(device_t dev, device_t child) { struct chipc_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } /* Allocate region records for the given port, and add the port's memory * range to the mem_rman */ static int chipc_rman_init_regions (struct chipc_softc *sc, bhnd_port_type type, u_int port) { struct chipc_region *cr; rman_res_t start, end; u_int num_regions; int error; num_regions = bhnd_get_region_count(sc->dev, type, port); for (u_int region = 0; region < num_regions; region++) { /* Allocate new region record */ cr = chipc_alloc_region(sc, type, port, region); if (cr == NULL) return (ENODEV); /* Can't manage regions that cannot be allocated */ if (cr->cr_rid < 0) { BHND_DEBUG_DEV(sc->dev, "no rid for chipc region " "%s%u.%u", bhnd_port_type_name(type), port, region); chipc_free_region(sc, cr); continue; } /* Add to rman's managed range */ start = cr->cr_addr; end = cr->cr_end; if ((error = rman_manage_region(&sc->mem_rman, start, end))) { chipc_free_region(sc, cr); return (error); } /* Add to region list */ STAILQ_INSERT_TAIL(&sc->mem_regions, cr, cr_link); } return (0); } /* Initialize memory state for all chipc port regions */ static int chipc_init_rman(struct chipc_softc *sc) { u_int num_ports; int error; /* Port types for which we'll register chipc_region mappings */ bhnd_port_type types[] = { BHND_PORT_DEVICE }; /* Initialize resource manager */ sc->mem_rman.rm_start = 0; sc->mem_rman.rm_end = BUS_SPACE_MAXADDR; sc->mem_rman.rm_type = RMAN_ARRAY; sc->mem_rman.rm_descr = "ChipCommon Device Memory"; if ((error = rman_init(&sc->mem_rman))) { device_printf(sc->dev, "could not initialize mem_rman: %d\n", error); return (error); } /* Populate per-port-region state */ for (u_int i = 0; i < nitems(types); i++) { num_ports = bhnd_get_port_count(sc->dev, types[i]); for (u_int port = 0; port < num_ports; port++) { error = chipc_rman_init_regions(sc, types[i], port); if (error) { device_printf(sc->dev, "region init failed for %s%u: %d\n", bhnd_port_type_name(types[i]), port, error); goto failed; } } } return (0); failed: chipc_free_rman(sc); return (error); } /* Free memory management state */ static void chipc_free_rman(struct chipc_softc *sc) { struct chipc_region *cr, *cr_next; STAILQ_FOREACH_SAFE(cr, &sc->mem_regions, cr_link, cr_next) chipc_free_region(sc, cr); rman_fini(&sc->mem_rman); } /** * Return the rman instance for a given resource @p type, if any. * * @param sc The chipc device state. * @param type The resource type (e.g. SYS_RES_MEMORY, SYS_RES_IRQ, ...) * @param flags Resource flags (e.g. RF_PREFETCHABLE) */ static struct rman * chipc_get_rman(device_t dev, int type, u_int flags) { struct chipc_softc *sc = device_get_softc(dev); switch (type) { case SYS_RES_MEMORY: return (&sc->mem_rman); case SYS_RES_IRQ: /* We delegate IRQ resource management to the parent bus */ return (NULL); default: return (NULL); }; } static struct resource * chipc_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct chipc_softc *sc; struct chipc_region *cr; struct resource_list_entry *rle; struct resource *rv; struct rman *rm; int error; bool passthrough, isdefault; sc = device_get_softc(dev); passthrough = (device_get_parent(child) != dev); isdefault = RMAN_IS_DEFAULT_RANGE(start, end); rle = NULL; /* Fetch the resource manager, delegate request if necessary */ rm = chipc_get_rman(dev, type, flags); if (rm == NULL) { /* Requested resource type is delegated to our parent */ rv = bus_generic_rl_alloc_resource(dev, child, type, rid, start, end, count, flags); return (rv); } /* Populate defaults */ if (!passthrough && isdefault) { /* Fetch the resource list entry. */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, *rid); if (rle == NULL) { device_printf(dev, "default resource %#x type %d for child %s " "not found\n", *rid, type, device_get_nameunit(child)); return (NULL); } if (rle->res != NULL) { device_printf(dev, "resource entry %#x type %d for child %s is busy " "[%d]\n", *rid, type, device_get_nameunit(child), rman_get_flags(rle->res)); return (NULL); } start = rle->start; end = rle->end; count = ulmax(count, rle->count); } /* Locate a mapping region */ if ((cr = chipc_find_region(sc, start, end)) == NULL) { /* Resource requests outside our shared port regions can be * delegated to our parent. */ rv = bus_generic_rl_alloc_resource(dev, child, type, rid, start, end, count, flags); return (rv); } /* * As a special case, children that map the complete ChipCommon register * block are delegated to our parent. * * The rman API does not support sharing resources that are not * identical in size; since we allocate subregions to various children, * any children that need to map the entire register block (e.g. because * they require access to discontiguous register ranges) must make the * allocation through our parent, where we hold a compatible * RF_SHAREABLE allocation. */ if (cr == sc->core_region && cr->cr_addr == start && cr->cr_end == end && cr->cr_count == count) { rv = bus_generic_rl_alloc_resource(dev, child, type, rid, start, end, count, flags); return (rv); } /* Try to retain a region reference */ if ((error = chipc_retain_region(sc, cr, RF_ALLOCATED))) return (NULL); /* Make our rman reservation */ rv = bus_generic_rman_alloc_resource(dev, child, type, rid, start, end, count, flags); if (rv == NULL) { chipc_release_region(sc, cr, RF_ALLOCATED); return (NULL); } /* Update child's resource list entry */ if (rle != NULL) { rle->res = rv; rle->start = rman_get_start(rv); rle->end = rman_get_end(rv); rle->count = rman_get_size(rv); } return (rv); } static int chipc_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct chipc_softc *sc; struct chipc_region *cr; struct rman *rm; struct resource_list_entry *rle; int error; sc = device_get_softc(dev); /* Handled by parent bus? */ rm = chipc_get_rman(dev, type, rman_get_flags(r)); if (rm == NULL || !rman_is_region_manager(r, rm)) { return (bus_generic_rl_release_resource(dev, child, type, rid, r)); } /* Locate the mapping region */ cr = chipc_find_region(sc, rman_get_start(r), rman_get_end(r)); if (cr == NULL) return (EINVAL); /* Deactivate resources */ error = bus_generic_rman_release_resource(dev, child, type, rid, r); if (error != 0) return (error); /* Drop allocation reference */ chipc_release_region(sc, cr, RF_ALLOCATED); /* Clear reference from the resource list entry if exists */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, rid); if (rle != NULL) rle->res = NULL; return (0); } static int -chipc_adjust_resource(device_t dev, device_t child, int type, +chipc_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct chipc_softc *sc; struct chipc_region *cr; struct rman *rm; sc = device_get_softc(dev); /* Handled by parent bus? */ - rm = chipc_get_rman(dev, type, rman_get_flags(r)); + rm = chipc_get_rman(dev, rman_get_type(r), rman_get_flags(r)); if (rm == NULL || !rman_is_region_manager(r, rm)) { - return (bus_generic_adjust_resource(dev, child, type, r, start, - end)); + return (bus_generic_adjust_resource(dev, child, r, start, end)); } /* The range is limited to the existing region mapping */ cr = chipc_find_region(sc, rman_get_start(r), rman_get_end(r)); if (cr == NULL) return (EINVAL); if (end <= start) return (EINVAL); if (start < cr->cr_addr || end > cr->cr_end) return (EINVAL); /* Range falls within the existing region */ return (rman_adjust_resource(r, start, end)); } /** * Retain an RF_ACTIVE reference to the region mapping @p r, and * configure @p r with its subregion values. * * @param sc Driver instance state. * @param child Requesting child device. * @param type resource type of @p r. * @param rid resource id of @p r * @param r resource to be activated. * @param req_direct If true, failure to allocate a direct bhnd resource * will be treated as an error. If false, the resource will not be marked * as RF_ACTIVE if bhnd direct resource allocation fails. */ static int chipc_try_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r, bool req_direct) { struct chipc_softc *sc = device_get_softc(dev); struct rman *rm; struct chipc_region *cr; bhnd_size_t cr_offset; rman_res_t r_start, r_end, r_size; int error; rm = chipc_get_rman(dev, type, rman_get_flags(r)); if (rm == NULL || !rman_is_region_manager(r, rm)) return (EINVAL); r_start = rman_get_start(r); r_end = rman_get_end(r); r_size = rman_get_size(r); /* Find the corresponding chipc region */ cr = chipc_find_region(sc, r_start, r_end); if (cr == NULL) return (EINVAL); /* Calculate subregion offset within the chipc region */ cr_offset = r_start - cr->cr_addr; /* Retain (and activate, if necessary) the chipc region */ if ((error = chipc_retain_region(sc, cr, RF_ACTIVE))) return (error); /* Configure child resource with its subregion values. */ if (cr->cr_res->direct) { error = chipc_init_child_resource(r, cr->cr_res->res, cr_offset, r_size); if (error) goto cleanup; /* Mark active */ if ((error = rman_activate_resource(r))) goto cleanup; } else if (req_direct) { error = ENOMEM; goto cleanup; } return (0); cleanup: chipc_release_region(sc, cr, RF_ACTIVE); return (error); } static int chipc_activate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { struct rman *rm; int error; /* Delegate non-locally managed resources to parent */ rm = chipc_get_rman(dev, type, rman_get_flags(r->res)); if (rm == NULL || !rman_is_region_manager(r->res, rm)) { return (bhnd_bus_generic_activate_resource(dev, child, type, rid, r)); } /* Try activating the chipc region resource */ error = chipc_try_activate_resource(dev, child, type, rid, r->res, false); if (error) return (error); /* Mark the child resource as direct according to the returned resource * state */ if (rman_get_flags(r->res) & RF_ACTIVE) r->direct = true; return (0); } static int chipc_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct rman *rm; /* Delegate non-locally managed resources to parent */ rm = chipc_get_rman(dev, type, rman_get_flags(r)); if (rm == NULL || !rman_is_region_manager(r, rm)) { return (bus_generic_activate_resource(dev, child, type, rid, r)); } /* Try activating the chipc region-based resource */ return (chipc_try_activate_resource(dev, child, type, rid, r, true)); } /** * Default bhndb(4) implementation of BUS_DEACTIVATE_RESOURCE(). */ static int chipc_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct chipc_softc *sc; struct chipc_region *cr; struct rman *rm; int error; sc = device_get_softc(dev); /* Handled by parent bus? */ rm = chipc_get_rman(dev, type, rman_get_flags(r)); if (rm == NULL || !rman_is_region_manager(r, rm)) { return (bus_generic_deactivate_resource(dev, child, type, rid, r)); } /* Find the corresponding chipc region */ cr = chipc_find_region(sc, rman_get_start(r), rman_get_end(r)); if (cr == NULL) return (EINVAL); /* Mark inactive */ if ((error = rman_deactivate_resource(r))) return (error); /* Drop associated RF_ACTIVE reference */ chipc_release_region(sc, cr, RF_ACTIVE); return (0); } /** * Examine bus state and make a best effort determination of whether it's * likely safe to enable the muxed SPROM pins. * * On devices that do not use SPROM pin muxing, always returns true. * * @param sc chipc driver state. */ static bool chipc_should_enable_muxed_sprom(struct chipc_softc *sc) { device_t *devs; device_t hostb; device_t parent; int devcount; int error; bool result; /* Nothing to do? */ if (!CHIPC_QUIRK(sc, MUX_SPROM)) return (true); bus_topo_lock(); parent = device_get_parent(sc->dev); hostb = bhnd_bus_find_hostb_device(parent); if ((error = device_get_children(parent, &devs, &devcount))) { bus_topo_unlock(); return (false); } /* Reject any active devices other than ChipCommon, or the * host bridge (if any). */ result = true; for (int i = 0; i < devcount; i++) { if (devs[i] == hostb || devs[i] == sc->dev) continue; if (!device_is_attached(devs[i])) continue; if (device_is_suspended(devs[i])) continue; /* Active device; assume SPROM is busy */ result = false; break; } free(devs, M_TEMP); bus_topo_unlock(); return (result); } static int chipc_enable_sprom(device_t dev) { struct chipc_softc *sc; int error; sc = device_get_softc(dev); CHIPC_LOCK(sc); /* Already enabled? */ if (sc->sprom_refcnt >= 1) { sc->sprom_refcnt++; CHIPC_UNLOCK(sc); return (0); } switch (sc->caps.nvram_src) { case BHND_NVRAM_SRC_SPROM: error = chipc_enable_sprom_pins(sc); break; case BHND_NVRAM_SRC_OTP: error = chipc_enable_otp_power(sc); break; default: error = 0; break; } /* Bump the reference count */ if (error == 0) sc->sprom_refcnt++; CHIPC_UNLOCK(sc); return (error); } static void chipc_disable_sprom(device_t dev) { struct chipc_softc *sc; sc = device_get_softc(dev); CHIPC_LOCK(sc); /* Check reference count, skip disable if in-use. */ KASSERT(sc->sprom_refcnt > 0, ("sprom refcnt overrelease")); sc->sprom_refcnt--; if (sc->sprom_refcnt > 0) { CHIPC_UNLOCK(sc); return; } switch (sc->caps.nvram_src) { case BHND_NVRAM_SRC_SPROM: chipc_disable_sprom_pins(sc); break; case BHND_NVRAM_SRC_OTP: chipc_disable_otp_power(sc); break; default: break; } CHIPC_UNLOCK(sc); } static int chipc_enable_otp_power(struct chipc_softc *sc) { // TODO: Enable OTP resource via PMU, and wait up to 100 usec for // OTPS_READY to be set in `optstatus`. return (0); } static void chipc_disable_otp_power(struct chipc_softc *sc) { // TODO: Disable OTP resource via PMU } /** * If required by this device, enable access to the SPROM. * * @param sc chipc driver state. */ static int chipc_enable_sprom_pins(struct chipc_softc *sc) { uint32_t cctrl; CHIPC_LOCK_ASSERT(sc, MA_OWNED); KASSERT(sc->sprom_refcnt == 0, ("sprom pins already enabled")); /* Nothing to do? */ if (!CHIPC_QUIRK(sc, MUX_SPROM)) return (0); /* Check whether bus is busy */ if (!chipc_should_enable_muxed_sprom(sc)) return (EBUSY); cctrl = bhnd_bus_read_4(sc->core, CHIPC_CHIPCTRL); /* 4331 devices */ if (CHIPC_QUIRK(sc, 4331_EXTPA_MUX_SPROM)) { cctrl &= ~CHIPC_CCTRL4331_EXTPA_EN; if (CHIPC_QUIRK(sc, 4331_GPIO2_5_MUX_SPROM)) cctrl &= ~CHIPC_CCTRL4331_EXTPA_ON_GPIO2_5; if (CHIPC_QUIRK(sc, 4331_EXTPA2_MUX_SPROM)) cctrl &= ~CHIPC_CCTRL4331_EXTPA_EN2; bhnd_bus_write_4(sc->core, CHIPC_CHIPCTRL, cctrl); return (0); } /* 4360 devices */ if (CHIPC_QUIRK(sc, 4360_FEM_MUX_SPROM)) { /* Unimplemented */ } /* Refuse to proceed on unsupported devices with muxed SPROM pins */ device_printf(sc->dev, "muxed sprom lines on unrecognized device\n"); return (ENXIO); } /** * If required by this device, revert any GPIO/pin configuration applied * to allow SPROM access. * * @param sc chipc driver state. */ static void chipc_disable_sprom_pins(struct chipc_softc *sc) { uint32_t cctrl; /* Nothing to do? */ if (!CHIPC_QUIRK(sc, MUX_SPROM)) return; CHIPC_LOCK_ASSERT(sc, MA_OWNED); KASSERT(sc->sprom_refcnt == 0, ("sprom pins in use")); cctrl = bhnd_bus_read_4(sc->core, CHIPC_CHIPCTRL); /* 4331 devices */ if (CHIPC_QUIRK(sc, 4331_EXTPA_MUX_SPROM)) { cctrl |= CHIPC_CCTRL4331_EXTPA_EN; if (CHIPC_QUIRK(sc, 4331_GPIO2_5_MUX_SPROM)) cctrl |= CHIPC_CCTRL4331_EXTPA_ON_GPIO2_5; if (CHIPC_QUIRK(sc, 4331_EXTPA2_MUX_SPROM)) cctrl |= CHIPC_CCTRL4331_EXTPA_EN2; bhnd_bus_write_4(sc->core, CHIPC_CHIPCTRL, cctrl); return; } /* 4360 devices */ if (CHIPC_QUIRK(sc, 4360_FEM_MUX_SPROM)) { /* Unimplemented */ } } static uint32_t chipc_read_chipst(device_t dev) { struct chipc_softc *sc = device_get_softc(dev); return (bhnd_bus_read_4(sc->core, CHIPC_CHIPST)); } static void chipc_write_chipctrl(device_t dev, uint32_t value, uint32_t mask) { struct chipc_softc *sc; uint32_t cctrl; sc = device_get_softc(dev); CHIPC_LOCK(sc); cctrl = bhnd_bus_read_4(sc->core, CHIPC_CHIPCTRL); cctrl = (cctrl & ~mask) | (value | mask); bhnd_bus_write_4(sc->core, CHIPC_CHIPCTRL, cctrl); CHIPC_UNLOCK(sc); } static struct chipc_caps * chipc_get_caps(device_t dev) { struct chipc_softc *sc; sc = device_get_softc(dev); return (&sc->caps); } static device_method_t chipc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, chipc_probe), DEVMETHOD(device_attach, chipc_attach), DEVMETHOD(device_detach, chipc_detach), DEVMETHOD(device_suspend, chipc_suspend), DEVMETHOD(device_resume, chipc_resume), /* Bus interface */ DEVMETHOD(bus_probe_nomatch, chipc_probe_nomatch), DEVMETHOD(bus_print_child, chipc_print_child), DEVMETHOD(bus_add_child, chipc_add_child), DEVMETHOD(bus_child_deleted, chipc_child_deleted), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_alloc_resource, chipc_alloc_resource), DEVMETHOD(bus_release_resource, chipc_release_resource), DEVMETHOD(bus_adjust_resource, chipc_adjust_resource), DEVMETHOD(bus_activate_resource, chipc_activate_resource), DEVMETHOD(bus_deactivate_resource, chipc_deactivate_resource), DEVMETHOD(bus_get_resource_list, chipc_get_resource_list), DEVMETHOD(bus_get_rman, chipc_get_rman), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_config_intr, bus_generic_config_intr), DEVMETHOD(bus_bind_intr, bus_generic_bind_intr), DEVMETHOD(bus_describe_intr, bus_generic_describe_intr), /* BHND bus inteface */ DEVMETHOD(bhnd_bus_activate_resource, chipc_activate_bhnd_resource), /* ChipCommon interface */ DEVMETHOD(bhnd_chipc_read_chipst, chipc_read_chipst), DEVMETHOD(bhnd_chipc_write_chipctrl, chipc_write_chipctrl), DEVMETHOD(bhnd_chipc_enable_sprom, chipc_enable_sprom), DEVMETHOD(bhnd_chipc_disable_sprom, chipc_disable_sprom), DEVMETHOD(bhnd_chipc_get_caps, chipc_get_caps), DEVMETHOD_END }; DEFINE_CLASS_0(bhnd_chipc, bhnd_chipc_driver, chipc_methods, sizeof(struct chipc_softc)); EARLY_DRIVER_MODULE(bhnd_chipc, bhnd, bhnd_chipc_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_DEPEND(bhnd_chipc, bhnd, 1, 1, 1); MODULE_VERSION(bhnd_chipc, 1); diff --git a/sys/dev/dpaa2/dpaa2_mc.c b/sys/dev/dpaa2/dpaa2_mc.c index 6d11e50b1f98..0dbb282399ae 100644 --- a/sys/dev/dpaa2/dpaa2_mc.c +++ b/sys/dev/dpaa2/dpaa2_mc.c @@ -1,892 +1,892 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * The DPAA2 Management Complex (MC) bus driver. * * MC is a hardware resource manager which can be found in several NXP * SoCs (LX2160A, for example) and provides an access to the specialized * hardware objects used in network-oriented packet processing applications. */ #include "opt_acpi.h" #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEV_ACPI #include #include #endif #ifdef FDT #include #include #include #include #endif #include "pcib_if.h" #include "pci_if.h" #include "dpaa2_mc.h" /* Macros to read/write MC registers */ #define mcreg_read_4(_sc, _r) bus_read_4(&(_sc)->map[1], (_r)) #define mcreg_write_4(_sc, _r, _v) bus_write_4(&(_sc)->map[1], (_r), (_v)) #define IORT_DEVICE_NAME "MCE" /* MC Registers */ #define MC_REG_GCR1 0x0000u #define MC_REG_GCR2 0x0004u /* TODO: Does it exist? */ #define MC_REG_GSR 0x0008u #define MC_REG_FAPR 0x0028u /* General Control Register 1 (GCR1) */ #define GCR1_P1_STOP 0x80000000u #define GCR1_P2_STOP 0x40000000u /* General Status Register (GSR) */ #define GSR_HW_ERR(v) (((v) & 0x80000000u) >> 31) #define GSR_CAT_ERR(v) (((v) & 0x40000000u) >> 30) #define GSR_DPL_OFFSET(v) (((v) & 0x3FFFFF00u) >> 8) #define GSR_MCS(v) (((v) & 0xFFu) >> 0) /* Timeouts to wait for the MC status. */ #define MC_STAT_TIMEOUT 1000u /* us */ #define MC_STAT_ATTEMPTS 100u /** * @brief Structure to describe a DPAA2 device as a managed resource. */ struct dpaa2_mc_devinfo { STAILQ_ENTRY(dpaa2_mc_devinfo) link; device_t dpaa2_dev; uint32_t flags; uint32_t owners; }; MALLOC_DEFINE(M_DPAA2_MC, "dpaa2_mc", "DPAA2 Management Complex"); static struct resource_spec dpaa2_mc_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE | RF_UNMAPPED }, { SYS_RES_MEMORY, 1, RF_ACTIVE | RF_UNMAPPED | RF_OPTIONAL }, RESOURCE_SPEC_END }; static u_int dpaa2_mc_get_xref(device_t, device_t); static u_int dpaa2_mc_map_id(device_t, device_t, uintptr_t *); static int dpaa2_mc_alloc_msi_impl(device_t, device_t, int, int, int *); static int dpaa2_mc_release_msi_impl(device_t, device_t, int, int *); static int dpaa2_mc_map_msi_impl(device_t, device_t, int, uint64_t *, uint32_t *); /* * For device interface. */ int dpaa2_mc_attach(device_t dev) { struct dpaa2_mc_softc *sc; struct resource_map_request req; uint32_t val; int error; sc = device_get_softc(dev); sc->dev = dev; sc->msi_allocated = false; sc->msi_owner = NULL; error = bus_alloc_resources(sc->dev, dpaa2_mc_spec, sc->res); if (error) { device_printf(dev, "%s: failed to allocate resources\n", __func__); return (ENXIO); } if (sc->res[1]) { resource_init_map_request(&req); req.memattr = VM_MEMATTR_DEVICE; error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[1], &req, &sc->map[1]); if (error) { device_printf(dev, "%s: failed to map control " "registers\n", __func__); dpaa2_mc_detach(dev); return (ENXIO); } if (bootverbose) device_printf(dev, "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n", mcreg_read_4(sc, MC_REG_GCR1), mcreg_read_4(sc, MC_REG_GCR2), mcreg_read_4(sc, MC_REG_GSR), mcreg_read_4(sc, MC_REG_FAPR)); /* Reset P1_STOP and P2_STOP bits to resume MC processor. */ val = mcreg_read_4(sc, MC_REG_GCR1) & ~(GCR1_P1_STOP | GCR1_P2_STOP); mcreg_write_4(sc, MC_REG_GCR1, val); /* Poll MC status. */ if (bootverbose) device_printf(dev, "polling MC status...\n"); for (int i = 0; i < MC_STAT_ATTEMPTS; i++) { val = mcreg_read_4(sc, MC_REG_GSR); if (GSR_MCS(val) != 0u) break; DELAY(MC_STAT_TIMEOUT); } if (bootverbose) device_printf(dev, "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n", mcreg_read_4(sc, MC_REG_GCR1), mcreg_read_4(sc, MC_REG_GCR2), mcreg_read_4(sc, MC_REG_GSR), mcreg_read_4(sc, MC_REG_FAPR)); } /* At least 64 bytes of the command portal should be available. */ if (rman_get_size(sc->res[0]) < DPAA2_MCP_MEM_WIDTH) { device_printf(dev, "%s: MC portal memory region too small: " "%jd\n", __func__, rman_get_size(sc->res[0])); dpaa2_mc_detach(dev); return (ENXIO); } /* Map MC portal memory resource. */ resource_init_map_request(&req); req.memattr = VM_MEMATTR_DEVICE; error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[0], &req, &sc->map[0]); if (error) { device_printf(dev, "Failed to map MC portal memory\n"); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a resource manager for the DPAA2 I/O objects. */ sc->dpio_rman.rm_type = RMAN_ARRAY; sc->dpio_rman.rm_descr = "DPAA2 DPIO objects"; error = rman_init(&sc->dpio_rman); if (error) { device_printf(dev, "Failed to initialize a resource manager for " "the DPAA2 I/O objects: error=%d\n", error); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a resource manager for the DPAA2 buffer pools. */ sc->dpbp_rman.rm_type = RMAN_ARRAY; sc->dpbp_rman.rm_descr = "DPAA2 DPBP objects"; error = rman_init(&sc->dpbp_rman); if (error) { device_printf(dev, "Failed to initialize a resource manager for " "the DPAA2 buffer pools: error=%d\n", error); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a resource manager for the DPAA2 concentrators. */ sc->dpcon_rman.rm_type = RMAN_ARRAY; sc->dpcon_rman.rm_descr = "DPAA2 DPCON objects"; error = rman_init(&sc->dpcon_rman); if (error) { device_printf(dev, "Failed to initialize a resource manager for " "the DPAA2 concentrators: error=%d\n", error); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a resource manager for the DPAA2 MC portals. */ sc->dpmcp_rman.rm_type = RMAN_ARRAY; sc->dpmcp_rman.rm_descr = "DPAA2 DPMCP objects"; error = rman_init(&sc->dpmcp_rman); if (error) { device_printf(dev, "Failed to initialize a resource manager for " "the DPAA2 MC portals: error=%d\n", error); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a list of non-allocatable DPAA2 devices. */ mtx_init(&sc->mdev_lock, "MC portal mdev lock", NULL, MTX_DEF); STAILQ_INIT(&sc->mdev_list); mtx_init(&sc->msi_lock, "MC MSI lock", NULL, MTX_DEF); /* * Add a root resource container as the only child of the bus. All of * the direct descendant containers will be attached to the root one * instead of the MC device. */ sc->rcdev = device_add_child(dev, "dpaa2_rc", 0); if (sc->rcdev == NULL) { dpaa2_mc_detach(dev); return (ENXIO); } bus_generic_probe(dev); bus_generic_attach(dev); return (0); } int dpaa2_mc_detach(device_t dev) { struct dpaa2_mc_softc *sc; struct dpaa2_devinfo *dinfo = NULL; int error; bus_generic_detach(dev); sc = device_get_softc(dev); if (sc->rcdev) device_delete_child(dev, sc->rcdev); bus_release_resources(dev, dpaa2_mc_spec, sc->res); dinfo = device_get_ivars(dev); if (dinfo) free(dinfo, M_DPAA2_MC); error = bus_generic_detach(dev); if (error != 0) return (error); return (device_delete_children(dev)); } /* * For bus interface. */ struct resource * dpaa2_mc_alloc_resource(device_t mcdev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; struct rman *rm; int error; rm = dpaa2_mc_rman(mcdev, type, flags); if (rm == NULL) return (bus_generic_alloc_resource(mcdev, child, type, rid, start, end, count, flags)); /* * Skip managing DPAA2-specific resource. It must be provided to MC by * calling DPAA2_MC_MANAGE_DEV() beforehand. */ if (type <= DPAA2_DEV_MC) { error = rman_manage_region(rm, start, end); if (error) { device_printf(mcdev, "rman_manage_region() failed: " "start=%#jx, end=%#jx, error=%d\n", start, end, error); goto fail; } } res = bus_generic_rman_alloc_resource(mcdev, child, type, rid, start, end, count, flags); if (res == NULL) goto fail; return (res); fail: device_printf(mcdev, "%s() failed: type=%d, rid=%d, start=%#jx, " "end=%#jx, count=%#jx, flags=%x\n", __func__, type, *rid, start, end, count, flags); return (NULL); } int -dpaa2_mc_adjust_resource(device_t mcdev, device_t child, int type, +dpaa2_mc_adjust_resource(device_t mcdev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct rman *rm; - rm = dpaa2_mc_rman(mcdev, type, rman_get_flags(r)); + rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r)); if (rm) - return (bus_generic_rman_adjust_resource(mcdev, child, type, r, + return (bus_generic_rman_adjust_resource(mcdev, child, r, start, end)); - return (bus_generic_adjust_resource(mcdev, child, type, r, start, end)); + return (bus_generic_adjust_resource(mcdev, child, r, start, end)); } int dpaa2_mc_release_resource(device_t mcdev, device_t child, int type, int rid, struct resource *r) { struct rman *rm; rm = dpaa2_mc_rman(mcdev, type, rman_get_flags(r)); if (rm) return (bus_generic_rman_release_resource(mcdev, child, type, rid, r)); return (bus_generic_release_resource(mcdev, child, type, rid, r)); } int dpaa2_mc_activate_resource(device_t mcdev, device_t child, int type, int rid, struct resource *r) { struct rman *rm; rm = dpaa2_mc_rman(mcdev, type, rman_get_flags(r)); if (rm) return (bus_generic_rman_activate_resource(mcdev, child, type, rid, r)); return (bus_generic_activate_resource(mcdev, child, type, rid, r)); } int dpaa2_mc_deactivate_resource(device_t mcdev, device_t child, int type, int rid, struct resource *r) { struct rman *rm; rm = dpaa2_mc_rman(mcdev, type, rman_get_flags(r)); if (rm) return (bus_generic_rman_deactivate_resource(mcdev, child, type, rid, r)); return (bus_generic_deactivate_resource(mcdev, child, type, rid, r)); } /* * For pseudo-pcib interface. */ int dpaa2_mc_alloc_msi(device_t mcdev, device_t child, int count, int maxcount, int *irqs) { #if defined(INTRNG) return (dpaa2_mc_alloc_msi_impl(mcdev, child, count, maxcount, irqs)); #else return (ENXIO); #endif } int dpaa2_mc_release_msi(device_t mcdev, device_t child, int count, int *irqs) { #if defined(INTRNG) return (dpaa2_mc_release_msi_impl(mcdev, child, count, irqs)); #else return (ENXIO); #endif } int dpaa2_mc_map_msi(device_t mcdev, device_t child, int irq, uint64_t *addr, uint32_t *data) { #if defined(INTRNG) return (dpaa2_mc_map_msi_impl(mcdev, child, irq, addr, data)); #else return (ENXIO); #endif } int dpaa2_mc_get_id(device_t mcdev, device_t child, enum pci_id_type type, uintptr_t *id) { struct dpaa2_devinfo *dinfo; dinfo = device_get_ivars(child); if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (ENXIO); if (type == PCI_ID_MSI) return (dpaa2_mc_map_id(mcdev, child, id)); *id = dinfo->icid; return (0); } /* * For DPAA2 Management Complex bus driver interface. */ int dpaa2_mc_manage_dev(device_t mcdev, device_t dpaa2_dev, uint32_t flags) { struct dpaa2_mc_softc *sc; struct dpaa2_devinfo *dinfo; struct dpaa2_mc_devinfo *di; struct rman *rm; int error; sc = device_get_softc(mcdev); dinfo = device_get_ivars(dpaa2_dev); if (!sc || !dinfo || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); di = malloc(sizeof(*di), M_DPAA2_MC, M_WAITOK | M_ZERO); if (!di) return (ENOMEM); di->dpaa2_dev = dpaa2_dev; di->flags = flags; di->owners = 0; /* Append a new managed DPAA2 device to the queue. */ mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_INSERT_TAIL(&sc->mdev_list, di, link); mtx_unlock(&sc->mdev_lock); if (flags & DPAA2_MC_DEV_ALLOCATABLE) { /* Select rman based on a type of the DPAA2 device. */ rm = dpaa2_mc_rman(mcdev, dinfo->dtype, 0); if (!rm) return (ENOENT); /* Manage DPAA2 device as an allocatable resource. */ error = rman_manage_region(rm, (rman_res_t) dpaa2_dev, (rman_res_t) dpaa2_dev); if (error) return (error); } return (0); } int dpaa2_mc_get_free_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype) { struct rman *rm; rman_res_t start, end; int error; if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); /* Select resource manager based on a type of the DPAA2 device. */ rm = dpaa2_mc_rman(mcdev, devtype, 0); if (!rm) return (ENOENT); /* Find first free DPAA2 device of the given type. */ error = rman_first_free_region(rm, &start, &end); if (error) return (error); KASSERT(start == end, ("start != end, but should be the same pointer " "to the DPAA2 device: start=%jx, end=%jx", start, end)); *dpaa2_dev = (device_t) start; return (0); } int dpaa2_mc_get_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype, uint32_t obj_id) { struct dpaa2_mc_softc *sc; struct dpaa2_devinfo *dinfo; struct dpaa2_mc_devinfo *di; int error = ENOENT; sc = device_get_softc(mcdev); if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_FOREACH(di, &sc->mdev_list, link) { dinfo = device_get_ivars(di->dpaa2_dev); if (dinfo->dtype == devtype && dinfo->id == obj_id) { *dpaa2_dev = di->dpaa2_dev; error = 0; break; } } mtx_unlock(&sc->mdev_lock); return (error); } int dpaa2_mc_get_shared_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype) { struct dpaa2_mc_softc *sc; struct dpaa2_devinfo *dinfo; struct dpaa2_mc_devinfo *di; device_t dev = NULL; uint32_t owners = UINT32_MAX; int error = ENOENT; sc = device_get_softc(mcdev); if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_FOREACH(di, &sc->mdev_list, link) { dinfo = device_get_ivars(di->dpaa2_dev); if ((dinfo->dtype == devtype) && (di->flags & DPAA2_MC_DEV_SHAREABLE) && (di->owners < owners)) { dev = di->dpaa2_dev; owners = di->owners; } } if (dev) { *dpaa2_dev = dev; error = 0; } mtx_unlock(&sc->mdev_lock); return (error); } int dpaa2_mc_reserve_dev(device_t mcdev, device_t dpaa2_dev, enum dpaa2_dev_type devtype) { struct dpaa2_mc_softc *sc; struct dpaa2_mc_devinfo *di; int error = ENOENT; sc = device_get_softc(mcdev); if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_FOREACH(di, &sc->mdev_list, link) { if (di->dpaa2_dev == dpaa2_dev && (di->flags & DPAA2_MC_DEV_SHAREABLE)) { di->owners++; error = 0; break; } } mtx_unlock(&sc->mdev_lock); return (error); } int dpaa2_mc_release_dev(device_t mcdev, device_t dpaa2_dev, enum dpaa2_dev_type devtype) { struct dpaa2_mc_softc *sc; struct dpaa2_mc_devinfo *di; int error = ENOENT; sc = device_get_softc(mcdev); if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_FOREACH(di, &sc->mdev_list, link) { if (di->dpaa2_dev == dpaa2_dev && (di->flags & DPAA2_MC_DEV_SHAREABLE)) { di->owners -= di->owners > 0 ? 1 : 0; error = 0; break; } } mtx_unlock(&sc->mdev_lock); return (error); } /** * @internal */ static u_int dpaa2_mc_get_xref(device_t mcdev, device_t child) { struct dpaa2_mc_softc *sc = device_get_softc(mcdev); struct dpaa2_devinfo *dinfo = device_get_ivars(child); #ifdef DEV_ACPI u_int xref, devid; #endif #ifdef FDT phandle_t msi_parent; #endif int error; if (sc && dinfo) { #ifdef DEV_ACPI if (sc->acpi_based) { /* * NOTE: The first named component from the IORT table * with the given name (as a substring) will be used. */ error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, dinfo->icid, &xref, &devid); if (error) return (0); return (xref); } #endif #ifdef FDT if (!sc->acpi_based) { /* FDT-based driver. */ error = ofw_bus_msimap(sc->ofw_node, dinfo->icid, &msi_parent, NULL); if (error) return (0); return ((u_int) msi_parent); } #endif } return (0); } /** * @internal */ static u_int dpaa2_mc_map_id(device_t mcdev, device_t child, uintptr_t *id) { struct dpaa2_devinfo *dinfo; #ifdef DEV_ACPI u_int xref, devid; int error; #endif dinfo = device_get_ivars(child); if (dinfo) { /* * The first named components from IORT table with the given * name (as a substring) will be used. */ #ifdef DEV_ACPI error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, dinfo->icid, &xref, &devid); if (error == 0) *id = devid; else #endif *id = dinfo->icid; /* RID not in IORT, likely FW bug */ return (0); } return (ENXIO); } /** * @internal * @brief Obtain a resource manager based on the given type of the resource. */ struct rman * dpaa2_mc_rman(device_t mcdev, int type, u_int flags) { struct dpaa2_mc_softc *sc; sc = device_get_softc(mcdev); switch (type) { case DPAA2_DEV_IO: return (&sc->dpio_rman); case DPAA2_DEV_BP: return (&sc->dpbp_rman); case DPAA2_DEV_CON: return (&sc->dpcon_rman); case DPAA2_DEV_MCP: return (&sc->dpmcp_rman); default: break; } return (NULL); } #if defined(INTRNG) && !defined(IOMMU) /** * @internal * @brief Allocates requested number of MSIs. * * NOTE: This function is a part of fallback solution when IOMMU isn't available. * Total number of IRQs is limited to 32. */ static int dpaa2_mc_alloc_msi_impl(device_t mcdev, device_t child, int count, int maxcount, int *irqs) { struct dpaa2_mc_softc *sc = device_get_softc(mcdev); int msi_irqs[DPAA2_MC_MSI_COUNT]; int error; /* Pre-allocate a bunch of MSIs for MC to be used by its children. */ if (!sc->msi_allocated) { error = intr_alloc_msi(mcdev, child, dpaa2_mc_get_xref(mcdev, child), DPAA2_MC_MSI_COUNT, DPAA2_MC_MSI_COUNT, msi_irqs); if (error) { device_printf(mcdev, "failed to pre-allocate %d MSIs: " "error=%d\n", DPAA2_MC_MSI_COUNT, error); return (error); } mtx_assert(&sc->msi_lock, MA_NOTOWNED); mtx_lock(&sc->msi_lock); for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { sc->msi[i].child = NULL; sc->msi[i].irq = msi_irqs[i]; } sc->msi_owner = child; sc->msi_allocated = true; mtx_unlock(&sc->msi_lock); } error = ENOENT; /* Find the first free MSIs from the pre-allocated pool. */ mtx_assert(&sc->msi_lock, MA_NOTOWNED); mtx_lock(&sc->msi_lock); for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { if (sc->msi[i].child != NULL) continue; error = 0; for (int j = 0; j < count; j++) { if (i + j >= DPAA2_MC_MSI_COUNT) { device_printf(mcdev, "requested %d MSIs exceed " "limit of %d available\n", count, DPAA2_MC_MSI_COUNT); error = E2BIG; break; } sc->msi[i + j].child = child; irqs[j] = sc->msi[i + j].irq; } break; } mtx_unlock(&sc->msi_lock); return (error); } /** * @internal * @brief Marks IRQs as free in the pre-allocated pool of MSIs. * * NOTE: This function is a part of fallback solution when IOMMU isn't available. * Total number of IRQs is limited to 32. * NOTE: MSIs are kept allocated in the kernel as a part of the pool. */ static int dpaa2_mc_release_msi_impl(device_t mcdev, device_t child, int count, int *irqs) { struct dpaa2_mc_softc *sc = device_get_softc(mcdev); mtx_assert(&sc->msi_lock, MA_NOTOWNED); mtx_lock(&sc->msi_lock); for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { if (sc->msi[i].child != child) continue; for (int j = 0; j < count; j++) { if (sc->msi[i].irq == irqs[j]) { sc->msi[i].child = NULL; break; } } } mtx_unlock(&sc->msi_lock); return (0); } /** * @internal * @brief Provides address to write to and data according to the given MSI from * the pre-allocated pool. * * NOTE: This function is a part of fallback solution when IOMMU isn't available. * Total number of IRQs is limited to 32. */ static int dpaa2_mc_map_msi_impl(device_t mcdev, device_t child, int irq, uint64_t *addr, uint32_t *data) { struct dpaa2_mc_softc *sc = device_get_softc(mcdev); int error = EINVAL; mtx_assert(&sc->msi_lock, MA_NOTOWNED); mtx_lock(&sc->msi_lock); for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { if (sc->msi[i].child == child && sc->msi[i].irq == irq) { error = 0; break; } } mtx_unlock(&sc->msi_lock); if (error) return (error); return (intr_map_msi(mcdev, sc->msi_owner, dpaa2_mc_get_xref(mcdev, sc->msi_owner), irq, addr, data)); } #endif /* defined(INTRNG) && !defined(IOMMU) */ static device_method_t dpaa2_mc_methods[] = { DEVMETHOD_END }; DEFINE_CLASS_0(dpaa2_mc, dpaa2_mc_driver, dpaa2_mc_methods, sizeof(struct dpaa2_mc_softc)); diff --git a/sys/dev/dpaa2/dpaa2_mc.h b/sys/dev/dpaa2/dpaa2_mc.h index 40b318c4c9e7..23b18f8d2ca6 100644 --- a/sys/dev/dpaa2/dpaa2_mc.h +++ b/sys/dev/dpaa2/dpaa2_mc.h @@ -1,219 +1,219 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _DPAA2_MC_H #define _DPAA2_MC_H #include #include #include #include #include #include "pci_if.h" #include "dpaa2_types.h" #include "dpaa2_mcp.h" #include "dpaa2_swp.h" #include "dpaa2_ni.h" #include "dpaa2_io.h" #include "dpaa2_mac.h" #include "dpaa2_con.h" #include "dpaa2_bp.h" /* * Maximum number of MSIs supported by the MC for its children without IOMMU. * * TODO: Should be much more with IOMMU translation. */ #define DPAA2_MC_MSI_COUNT 32 /* Flags for DPAA2 devices as resources. */ #define DPAA2_MC_DEV_ALLOCATABLE 0x01u /* to be managed by DPAA2-specific rman */ #define DPAA2_MC_DEV_ASSOCIATED 0x02u /* to obtain info about DPAA2 device */ #define DPAA2_MC_DEV_SHAREABLE 0x04u /* to be shared among DPAA2 devices */ struct dpaa2_mc_devinfo; /* about managed DPAA2 devices */ /** * @brief Software context for the DPAA2 Management Complex (MC) driver. * * dev: Device associated with this software context. * rcdev: Child device associated with the root resource container. * acpi_based: Attached using ACPI (true) or FDT (false). * ofw_node: FDT node of the Management Complex (acpi_based == false). * * res: Unmapped MC command portal and control registers resources. * map: Mapped MC command portal and control registers resources. * * dpio_rman: I/O objects resource manager. * dpbp_rman: Buffer Pools resource manager. * dpcon_rman: Concentrators resource manager. * dpmcp_rman: MC portals resource manager. */ struct dpaa2_mc_softc { device_t dev; device_t rcdev; bool acpi_based; phandle_t ofw_node; struct resource *res[2]; struct resource_map map[2]; /* For allocatable managed DPAA2 objects. */ struct rman dpio_rman; struct rman dpbp_rman; struct rman dpcon_rman; struct rman dpmcp_rman; /* For managed DPAA2 objects. */ struct mtx mdev_lock; STAILQ_HEAD(, dpaa2_mc_devinfo) mdev_list; /* NOTE: Workaround in case of no IOMMU available. */ #ifndef IOMMU device_t msi_owner; bool msi_allocated; struct mtx msi_lock; struct { device_t child; int irq; } msi[DPAA2_MC_MSI_COUNT]; #endif }; /** * @brief Software context for the DPAA2 Resource Container (RC) driver. * * dev: Device associated with this software context. * portal: Helper object to send commands to the MC portal. * unit: Helps to distinguish between root (0) and child DRPCs. * cont_id: Container ID. */ struct dpaa2_rc_softc { device_t dev; int unit; uint32_t cont_id; }; /** * @brief Information about MSI messages supported by the DPAA2 object. * * msi_msgnum: Number of MSI messages supported by the DPAA2 object. * msi_alloc: Number of MSI messages allocated for the DPAA2 object. * msi_handlers: Number of MSI message handlers configured. */ struct dpaa2_msinfo { uint8_t msi_msgnum; uint8_t msi_alloc; uint32_t msi_handlers; }; /** * @brief Information about DPAA2 device. * * pdev: Parent device. * dev: Device this devinfo is associated with. * * id: ID of a logical DPAA2 object resource. * portal_id: ID of the MC portal which belongs to the object's container. * icid: Isolation context ID of the DPAA2 object. It is shared * between a resource container and all of its children. * * dtype: Type of the DPAA2 object. * resources: Resources available for this DPAA2 device. * msi: Information about MSI messages supported by the DPAA2 object. */ struct dpaa2_devinfo { device_t pdev; device_t dev; uint32_t id; uint32_t portal_id; uint32_t icid; enum dpaa2_dev_type dtype; struct resource_list resources; struct dpaa2_msinfo msi; /* * DPAA2 object might or might not have its own portal allocated to * execute MC commands. If the portal has been allocated, it takes * precedence over the portal owned by the resource container. */ struct dpaa2_mcp *portal; }; DECLARE_CLASS(dpaa2_mc_driver); /* For device interface. */ int dpaa2_mc_attach(device_t dev); int dpaa2_mc_detach(device_t dev); /* For bus interface. */ struct rman *dpaa2_mc_rman(device_t mcdev, int type, u_int flags); struct resource * dpaa2_mc_alloc_resource(device_t mcdev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); -int dpaa2_mc_adjust_resource(device_t mcdev, device_t child, int type, +int dpaa2_mc_adjust_resource(device_t mcdev, device_t child, struct resource *r, rman_res_t start, rman_res_t end); int dpaa2_mc_release_resource(device_t mcdev, device_t child, int type, int rid, struct resource *r); int dpaa2_mc_activate_resource(device_t mcdev, device_t child, int type, int rid, struct resource *r); int dpaa2_mc_deactivate_resource(device_t mcdev, device_t child, int type, int rid, struct resource *r); /* For pseudo-pcib interface. */ int dpaa2_mc_alloc_msi(device_t mcdev, device_t child, int count, int maxcount, int *irqs); int dpaa2_mc_release_msi(device_t mcdev, device_t child, int count, int *irqs); int dpaa2_mc_map_msi(device_t mcdev, device_t child, int irq, uint64_t *addr, uint32_t *data); int dpaa2_mc_get_id(device_t mcdev, device_t child, enum pci_id_type type, uintptr_t *id); /* For DPAA2 MC bus interface. */ int dpaa2_mc_manage_dev(device_t mcdev, device_t dpaa2_dev, uint32_t flags); int dpaa2_mc_get_free_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype); int dpaa2_mc_get_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype, uint32_t obj_id); int dpaa2_mc_get_shared_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype); int dpaa2_mc_reserve_dev(device_t mcdev, device_t dpaa2_dev, enum dpaa2_dev_type devtype); int dpaa2_mc_release_dev(device_t mcdev, device_t dpaa2_dev, enum dpaa2_dev_type devtype); #endif /* _DPAA2_MC_H */ diff --git a/sys/dev/fdt/simplebus.c b/sys/dev/fdt/simplebus.c index ceb5fdde4bb7..940f93f56274 100644 --- a/sys/dev/fdt/simplebus.c +++ b/sys/dev/fdt/simplebus.c @@ -1,607 +1,595 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2013 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include /* * Bus interface. */ static int simplebus_probe(device_t dev); static struct resource *simplebus_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); -static int simplebus_adjust_resource(device_t bus, device_t child, - int type, struct resource *r, rman_res_t start, rman_res_t end); static int simplebus_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r); static int simplebus_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r); static int simplebus_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *r); static int simplebus_map_resource(device_t bus, device_t child, int type, struct resource *r, struct resource_map_request *args, struct resource_map *map); static int simplebus_unmap_resource(device_t bus, device_t child, int type, struct resource *r, struct resource_map *map); static void simplebus_probe_nomatch(device_t bus, device_t child); static int simplebus_print_child(device_t bus, device_t child); static device_t simplebus_add_child(device_t dev, u_int order, const char *name, int unit); static struct resource_list *simplebus_get_resource_list(device_t bus, device_t child); static ssize_t simplebus_get_property(device_t bus, device_t child, const char *propname, void *propvalue, size_t size, device_property_type_t type); /* * ofw_bus interface */ static const struct ofw_bus_devinfo *simplebus_get_devinfo(device_t bus, device_t child); /* * Driver methods. */ static device_method_t simplebus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, simplebus_probe), DEVMETHOD(device_attach, simplebus_attach), DEVMETHOD(device_detach, simplebus_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_add_child, simplebus_add_child), DEVMETHOD(bus_print_child, simplebus_print_child), DEVMETHOD(bus_probe_nomatch, simplebus_probe_nomatch), DEVMETHOD(bus_read_ivar, bus_generic_read_ivar), DEVMETHOD(bus_write_ivar, bus_generic_write_ivar), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, simplebus_alloc_resource), DEVMETHOD(bus_release_resource, simplebus_release_resource), DEVMETHOD(bus_activate_resource, simplebus_activate_resource), DEVMETHOD(bus_deactivate_resource, simplebus_deactivate_resource), - DEVMETHOD(bus_adjust_resource, simplebus_adjust_resource), + DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_map_resource, simplebus_map_resource), DEVMETHOD(bus_unmap_resource, simplebus_unmap_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), DEVMETHOD(bus_get_resource_list, simplebus_get_resource_list), DEVMETHOD(bus_get_property, simplebus_get_property), DEVMETHOD(bus_get_device_path, ofw_bus_gen_get_device_path), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, simplebus_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; DEFINE_CLASS_0(simplebus, simplebus_driver, simplebus_methods, sizeof(struct simplebus_softc)); EARLY_DRIVER_MODULE(simplebus, ofwbus, simplebus_driver, 0, 0, BUS_PASS_BUS); EARLY_DRIVER_MODULE(simplebus, simplebus, simplebus_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); static int simplebus_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); /* * XXX We should attach only to pure' compatible = "simple-bus"', * without any other compatible string. * For now, filter only know cases: * "syscon", "simple-bus"; is handled by fdt/syscon driver * "simple-mfd", "simple-bus"; is handled by fdt/simple-mfd driver */ if (ofw_bus_is_compatible(dev, "syscon") || ofw_bus_is_compatible(dev, "simple-mfd")) return (ENXIO); /* * FDT data puts a "simple-bus" compatible string on many things that * have children but aren't really buses in our world. Without a * ranges property we will fail to attach, so just fail to probe too. */ if (!(ofw_bus_is_compatible(dev, "simple-bus") && ofw_bus_has_prop(dev, "ranges")) && (ofw_bus_get_type(dev) == NULL || strcmp(ofw_bus_get_type(dev), "soc") != 0)) return (ENXIO); device_set_desc(dev, "Flattened device tree simple bus"); return (BUS_PROBE_GENERIC); } int simplebus_attach_impl(device_t dev) { struct simplebus_softc *sc; phandle_t node; sc = device_get_softc(dev); simplebus_init(dev, 0); if ((sc->flags & SB_FLAG_NO_RANGES) == 0 && simplebus_fill_ranges(sc->node, sc) < 0) { device_printf(dev, "could not get ranges\n"); return (ENXIO); } /* * In principle, simplebus could have an interrupt map, but ignore that * for now */ for (node = OF_child(sc->node); node > 0; node = OF_peer(node)) simplebus_add_device(dev, node, 0, NULL, -1, NULL); return (0); } int simplebus_attach(device_t dev) { int rv; rv = simplebus_attach_impl(dev); if (rv != 0) return (rv); return (bus_generic_attach(dev)); } int simplebus_detach(device_t dev) { struct simplebus_softc *sc; sc = device_get_softc(dev); if (sc->ranges != NULL) free(sc->ranges, M_DEVBUF); return (bus_generic_detach(dev)); } void simplebus_init(device_t dev, phandle_t node) { struct simplebus_softc *sc; sc = device_get_softc(dev); if (node == 0) node = ofw_bus_get_node(dev); sc->dev = dev; sc->node = node; /* * Some important numbers */ sc->acells = 2; OF_getencprop(node, "#address-cells", &sc->acells, sizeof(sc->acells)); sc->scells = 1; OF_getencprop(node, "#size-cells", &sc->scells, sizeof(sc->scells)); } int simplebus_fill_ranges(phandle_t node, struct simplebus_softc *sc) { int host_address_cells; cell_t *base_ranges; ssize_t nbase_ranges; int err; int i, j, k; err = OF_searchencprop(OF_parent(node), "#address-cells", &host_address_cells, sizeof(host_address_cells)); if (err <= 0) return (-1); nbase_ranges = OF_getproplen(node, "ranges"); if (nbase_ranges < 0) return (-1); sc->nranges = nbase_ranges / sizeof(cell_t) / (sc->acells + host_address_cells + sc->scells); if (sc->nranges == 0) return (0); sc->ranges = malloc(sc->nranges * sizeof(sc->ranges[0]), M_DEVBUF, M_WAITOK); base_ranges = malloc(nbase_ranges, M_DEVBUF, M_WAITOK); OF_getencprop(node, "ranges", base_ranges, nbase_ranges); for (i = 0, j = 0; i < sc->nranges; i++) { sc->ranges[i].bus = 0; for (k = 0; k < sc->acells; k++) { sc->ranges[i].bus <<= 32; sc->ranges[i].bus |= base_ranges[j++]; } sc->ranges[i].host = 0; for (k = 0; k < host_address_cells; k++) { sc->ranges[i].host <<= 32; sc->ranges[i].host |= base_ranges[j++]; } sc->ranges[i].size = 0; for (k = 0; k < sc->scells; k++) { sc->ranges[i].size <<= 32; sc->ranges[i].size |= base_ranges[j++]; } } free(base_ranges, M_DEVBUF); return (sc->nranges); } struct simplebus_devinfo * simplebus_setup_dinfo(device_t dev, phandle_t node, struct simplebus_devinfo *di) { struct simplebus_softc *sc; struct simplebus_devinfo *ndi; sc = device_get_softc(dev); if (di == NULL) ndi = malloc(sizeof(*ndi), M_DEVBUF, M_WAITOK | M_ZERO); else ndi = di; if (ofw_bus_gen_setup_devinfo(&ndi->obdinfo, node) != 0) { if (di == NULL) free(ndi, M_DEVBUF); return (NULL); } resource_list_init(&ndi->rl); ofw_bus_reg_to_rl(dev, node, sc->acells, sc->scells, &ndi->rl); ofw_bus_intr_to_rl(dev, node, &ndi->rl, NULL); return (ndi); } device_t simplebus_add_device(device_t dev, phandle_t node, u_int order, const char *name, int unit, struct simplebus_devinfo *di) { struct simplebus_devinfo *ndi; device_t cdev; if ((ndi = simplebus_setup_dinfo(dev, node, di)) == NULL) return (NULL); cdev = device_add_child_ordered(dev, order, name, unit); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", ndi->obdinfo.obd_name); resource_list_free(&ndi->rl); ofw_bus_gen_destroy_devinfo(&ndi->obdinfo); if (di == NULL) free(ndi, M_DEVBUF); return (NULL); } device_set_ivars(cdev, ndi); return(cdev); } static device_t simplebus_add_child(device_t dev, u_int order, const char *name, int unit) { device_t cdev; struct simplebus_devinfo *ndi; cdev = device_add_child_ordered(dev, order, name, unit); if (cdev == NULL) return (NULL); ndi = malloc(sizeof(*ndi), M_DEVBUF, M_WAITOK | M_ZERO); ndi->obdinfo.obd_node = -1; resource_list_init(&ndi->rl); device_set_ivars(cdev, ndi); return (cdev); } static const struct ofw_bus_devinfo * simplebus_get_devinfo(device_t bus __unused, device_t child) { struct simplebus_devinfo *ndi; ndi = device_get_ivars(child); if (ndi == NULL) return (NULL); return (&ndi->obdinfo); } static struct resource_list * simplebus_get_resource_list(device_t bus __unused, device_t child) { struct simplebus_devinfo *ndi; ndi = device_get_ivars(child); if (ndi == NULL) return (NULL); return (&ndi->rl); } static ssize_t simplebus_get_property(device_t bus, device_t child, const char *propname, void *propvalue, size_t size, device_property_type_t type) { phandle_t node, xref; ssize_t ret, i; uint32_t *buffer; uint64_t val; switch (type) { case DEVICE_PROP_ANY: case DEVICE_PROP_BUFFER: case DEVICE_PROP_UINT32: case DEVICE_PROP_UINT64: case DEVICE_PROP_HANDLE: break; default: return (-1); } node = ofw_bus_get_node(child); if (propvalue == NULL || size == 0) return (OF_getproplen(node, propname)); /* * Integer values are stored in BE format. * If caller declared that the underlying property type is uint32_t * we need to do the conversion to match host endianness. */ if (type == DEVICE_PROP_UINT32) return (OF_getencprop(node, propname, propvalue, size)); /* * uint64_t also requires endianness handling. * In FDT every 8 byte value is stored using two uint32_t variables * in BE format. Now, since the upper bits are stored as the first * of the pair, both halves require swapping. */ if (type == DEVICE_PROP_UINT64) { ret = OF_getencprop(node, propname, propvalue, size); if (ret <= 0) { return (ret); } buffer = (uint32_t *)propvalue; for (i = 0; i < size / 4; i += 2) { val = (uint64_t)buffer[i] << 32 | buffer[i + 1]; ((uint64_t *)buffer)[i / 2] = val; } return (ret); } if (type == DEVICE_PROP_HANDLE) { if (size < sizeof(node)) return (-1); ret = OF_getencprop(node, propname, &xref, sizeof(xref)); if (ret <= 0) return (ret); node = OF_node_from_xref(xref); if (propvalue != NULL) *(uint32_t *)propvalue = node; return (ret); } return (OF_getprop(node, propname, propvalue, size)); } static struct resource * simplebus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct simplebus_softc *sc; struct simplebus_devinfo *di; struct resource_list_entry *rle; int j; sc = device_get_softc(bus); /* * Request for the default allocation with a given rid: use resource * list stored in the local device info. */ if (RMAN_IS_DEFAULT_RANGE(start, end)) { if ((di = device_get_ivars(child)) == NULL) return (NULL); rle = resource_list_find(&di->rl, type, *rid); if (rle == NULL) { if (bootverbose) device_printf(bus, "no default resources for " "rid = %d, type = %d\n", *rid, type); return (NULL); } start = rle->start; end = rle->end; count = rle->count; } if (type == SYS_RES_IOPORT) type = SYS_RES_MEMORY; if (type == SYS_RES_MEMORY) { /* Remap through ranges property */ for (j = 0; j < sc->nranges; j++) { if (start >= sc->ranges[j].bus && end < sc->ranges[j].bus + sc->ranges[j].size) { start -= sc->ranges[j].bus; start += sc->ranges[j].host; end -= sc->ranges[j].bus; end += sc->ranges[j].host; break; } } if (j == sc->nranges && sc->nranges != 0) { if (bootverbose) device_printf(bus, "Could not map resource " "%#jx-%#jx\n", start, end); return (NULL); } } return (bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags)); } -static int -simplebus_adjust_resource(device_t bus, device_t child, int type, - struct resource *r, rman_res_t start, rman_res_t end) -{ - - if (type == SYS_RES_IOPORT) - type = SYS_RES_MEMORY; - return (bus_generic_adjust_resource(bus, child, type, r, start, end)); -} - static int simplebus_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { if (type == SYS_RES_IOPORT) type = SYS_RES_MEMORY; return (bus_generic_release_resource(bus, child, type, rid, r)); } static int simplebus_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { if (type == SYS_RES_IOPORT) type = SYS_RES_MEMORY; return (bus_generic_activate_resource(bus, child, type, rid, r)); } static int simplebus_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { if (type == SYS_RES_IOPORT) type = SYS_RES_MEMORY; return (bus_generic_deactivate_resource(bus, child, type, rid, r)); } static int simplebus_map_resource(device_t bus, device_t child, int type, struct resource *r, struct resource_map_request *args, struct resource_map *map) { if (type == SYS_RES_IOPORT) type = SYS_RES_MEMORY; return (bus_generic_map_resource(bus, child, type, r, args, map)); } static int simplebus_unmap_resource(device_t bus, device_t child, int type, struct resource *r, struct resource_map *map) { if (type == SYS_RES_IOPORT) type = SYS_RES_MEMORY; return (bus_generic_unmap_resource(bus, child, type, r, map)); } static int simplebus_print_res(struct simplebus_devinfo *di) { int rv; if (di == NULL) return (0); rv = 0; rv += resource_list_print_type(&di->rl, "mem", SYS_RES_MEMORY, "%#jx"); rv += resource_list_print_type(&di->rl, "irq", SYS_RES_IRQ, "%jd"); return (rv); } static void simplebus_probe_nomatch(device_t bus, device_t child) { const char *name, *type, *compat; if (!bootverbose) return; compat = ofw_bus_get_compat(child); if (compat == NULL) return; name = ofw_bus_get_name(child); type = ofw_bus_get_type(child); device_printf(bus, "<%s>", name != NULL ? name : "unknown"); simplebus_print_res(device_get_ivars(child)); if (!ofw_bus_status_okay(child)) printf(" disabled"); if (type) printf(" type %s", type); printf(" compat %s (no driver attached)\n", compat); } static int simplebus_print_child(device_t bus, device_t child) { int rv; rv = bus_print_child_header(bus, child); rv += simplebus_print_res(device_get_ivars(child)); if (!ofw_bus_status_okay(child)) rv += printf(" disabled"); rv += bus_print_child_footer(bus, child); return (rv); } diff --git a/sys/dev/hyperv/pcib/vmbus_pcib.c b/sys/dev/hyperv/pcib/vmbus_pcib.c index 3fa349b0c0c5..3d3041ee76b3 100644 --- a/sys/dev/hyperv/pcib/vmbus_pcib.c +++ b/sys/dev/hyperv/pcib/vmbus_pcib.c @@ -1,2049 +1,2049 @@ /*- * Copyright (c) 2016-2017 Microsoft Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #ifdef NEW_PCIB #include "opt_acpi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__aarch64__) #include #endif #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #if defined(__i386__) || defined(__amd64__) #include #include #include #endif #if defined(__aarch64__) #include #include #include #include #endif #include #include #include #include #include "vmbus_if.h" struct completion { unsigned int done; struct mtx lock; }; static void init_completion(struct completion *c) { memset(c, 0, sizeof(*c)); mtx_init(&c->lock, "hvcmpl", NULL, MTX_DEF); c->done = 0; } static void reinit_completion(struct completion *c) { c->done = 0; } static void free_completion(struct completion *c) { mtx_destroy(&c->lock); } static void complete(struct completion *c) { mtx_lock(&c->lock); c->done++; mtx_unlock(&c->lock); wakeup(c); } static void wait_for_completion(struct completion *c) { mtx_lock(&c->lock); while (c->done == 0) mtx_sleep(c, &c->lock, 0, "hvwfc", 0); c->done--; mtx_unlock(&c->lock); } /* * Return: 0 if completed, a non-zero value if timed out. */ static int wait_for_completion_timeout(struct completion *c, int timeout) { int ret; mtx_lock(&c->lock); if (c->done == 0) mtx_sleep(c, &c->lock, 0, "hvwfc", timeout); if (c->done > 0) { c->done--; ret = 0; } else { ret = 1; } mtx_unlock(&c->lock); return (ret); } #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #define PCI_MAKE_VERSION(major, minor) ((uint32_t)(((major) << 16) | (minor))) enum pci_protocol_version_t { PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), PCI_PROTOCOL_VERSION_1_4 = PCI_MAKE_VERSION(1, 4), }; static enum pci_protocol_version_t pci_protocol_versions[] = { PCI_PROTOCOL_VERSION_1_4, PCI_PROTOCOL_VERSION_1_1, }; #define PCI_CONFIG_MMIO_LENGTH 0x2000 #define CFG_PAGE_OFFSET 0x1000 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET) /* * Message Types */ enum pci_message_type { /* * Version 1.1 */ PCI_MESSAGE_BASE = 0x42490000, PCI_BUS_RELATIONS = PCI_MESSAGE_BASE + 0, PCI_QUERY_BUS_RELATIONS = PCI_MESSAGE_BASE + 1, PCI_POWER_STATE_CHANGE = PCI_MESSAGE_BASE + 4, PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5, PCI_QUERY_RESOURCE_RESOURCES = PCI_MESSAGE_BASE + 6, PCI_BUS_D0ENTRY = PCI_MESSAGE_BASE + 7, PCI_BUS_D0EXIT = PCI_MESSAGE_BASE + 8, PCI_READ_BLOCK = PCI_MESSAGE_BASE + 9, PCI_WRITE_BLOCK = PCI_MESSAGE_BASE + 0xA, PCI_EJECT = PCI_MESSAGE_BASE + 0xB, PCI_QUERY_STOP = PCI_MESSAGE_BASE + 0xC, PCI_REENABLE = PCI_MESSAGE_BASE + 0xD, PCI_QUERY_STOP_FAILED = PCI_MESSAGE_BASE + 0xE, PCI_EJECTION_COMPLETE = PCI_MESSAGE_BASE + 0xF, PCI_RESOURCES_ASSIGNED = PCI_MESSAGE_BASE + 0x10, PCI_RESOURCES_RELEASED = PCI_MESSAGE_BASE + 0x11, PCI_INVALIDATE_BLOCK = PCI_MESSAGE_BASE + 0x12, PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13, PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14, PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15, PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16, PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17, PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */ PCI_BUS_RELATIONS2 = PCI_MESSAGE_BASE + 0x19, PCI_RESOURCES_ASSIGNED3 = PCI_MESSAGE_BASE + 0x1A, PCI_CREATE_INTERRUPT_MESSAGE3 = PCI_MESSAGE_BASE + 0x1B, PCI_MESSAGE_MAXIMUM }; #define STATUS_REVISION_MISMATCH 0xC0000059 /* * Structures defining the virtual PCI Express protocol. */ union pci_version { struct { uint16_t minor_version; uint16_t major_version; } parts; uint32_t version; } __packed; /* * This representation is the one used in Windows, which is * what is expected when sending this back and forth with * the Hyper-V parent partition. */ union win_slot_encoding { struct { uint32_t slot:5; uint32_t func:3; uint32_t reserved:24; } bits; uint32_t val; } __packed; struct pci_func_desc { uint16_t v_id; /* vendor ID */ uint16_t d_id; /* device ID */ uint8_t rev; uint8_t prog_intf; uint8_t subclass; uint8_t base_class; uint32_t subsystem_id; union win_slot_encoding wslot; uint32_t ser; /* serial number */ } __packed; struct pci_func_desc2 { uint16_t v_id; /* vendor ID */ uint16_t d_id; /* device ID */ uint8_t rev; uint8_t prog_intf; uint8_t subclass; uint8_t base_class; uint32_t subsystem_id; union win_slot_encoding wslot; uint32_t ser; /* serial number */ uint32_t flags; uint16_t virtual_numa_node; uint16_t reserved; } __packed; struct hv_msi_desc { uint8_t vector; uint8_t delivery_mode; uint16_t vector_count; uint32_t reserved; uint64_t cpu_mask; } __packed; struct hv_msi_desc3 { uint32_t vector; uint8_t delivery_mode; uint8_t reserved; uint16_t vector_count; uint16_t processor_count; uint16_t processor_array[32]; } __packed; struct tran_int_desc { uint16_t reserved; uint16_t vector_count; uint32_t data; uint64_t address; } __packed; struct pci_message { uint32_t type; } __packed; struct pci_child_message { struct pci_message message_type; union win_slot_encoding wslot; } __packed; struct pci_incoming_message { struct vmbus_chanpkt_hdr hdr; struct pci_message message_type; } __packed; struct pci_response { struct vmbus_chanpkt_hdr hdr; int32_t status; /* negative values are failures */ } __packed; struct pci_packet { void (*completion_func)(void *context, struct pci_response *resp, int resp_packet_size); void *compl_ctxt; struct pci_message message[0]; }; /* * Specific message types supporting the PCI protocol. */ struct pci_version_request { struct pci_message message_type; uint32_t protocol_version; uint32_t reservedz:31; } __packed; struct pci_bus_d0_entry { struct pci_message message_type; uint32_t reserved; uint64_t mmio_base; } __packed; struct pci_bus_relations { struct pci_incoming_message incoming; uint32_t device_count; struct pci_func_desc func[0]; } __packed; struct pci_bus_relations2 { struct pci_incoming_message incoming; uint32_t device_count; struct pci_func_desc2 func[0]; } __packed; #define MAX_NUM_BARS (PCIR_MAX_BAR_0 + 1) struct pci_q_res_req_response { struct vmbus_chanpkt_hdr hdr; int32_t status; /* negative values are failures */ uint32_t probed_bar[MAX_NUM_BARS]; } __packed; struct pci_resources_assigned { struct pci_message message_type; union win_slot_encoding wslot; uint8_t memory_range[0x14][MAX_NUM_BARS]; /* unused here */ uint32_t msi_descriptors; uint32_t reserved[4]; } __packed; struct pci_resources_assigned2 { struct pci_message message_type; union win_slot_encoding wslot; uint8_t memory_range[0x14][6]; /* not used here */ uint32_t msi_descriptor_count; uint8_t reserved[70]; } __packed; struct pci_create_interrupt { struct pci_message message_type; union win_slot_encoding wslot; struct hv_msi_desc int_desc; } __packed; struct pci_create_interrupt3 { struct pci_message message_type; union win_slot_encoding wslot; struct hv_msi_desc3 int_desc; } __packed; struct pci_create_int_response { struct pci_response response; uint32_t reserved; struct tran_int_desc int_desc; } __packed; struct pci_delete_interrupt { struct pci_message message_type; union win_slot_encoding wslot; struct tran_int_desc int_desc; } __packed; struct pci_dev_incoming { struct pci_incoming_message incoming; union win_slot_encoding wslot; } __packed; struct pci_eject_response { struct pci_message message_type; union win_slot_encoding wslot; uint32_t status; } __packed; /* * Driver specific state. */ enum hv_pcibus_state { hv_pcibus_init = 0, hv_pcibus_installed, }; struct hv_pcibus { device_t pcib; device_t pci_bus; struct vmbus_pcib_softc *sc; uint16_t pci_domain; enum hv_pcibus_state state; struct resource *cfg_res; struct completion query_completion, *query_comp; struct mtx config_lock; /* Avoid two threads writing index page */ struct mtx device_list_lock; /* Protect lists below */ uint32_t protocol_version; TAILQ_HEAD(, hv_pci_dev) children; TAILQ_HEAD(, hv_dr_state) dr_list; volatile int detaching; }; struct hv_pcidev_desc { uint16_t v_id; /* vendor ID */ uint16_t d_id; /* device ID */ uint8_t rev; uint8_t prog_intf; uint8_t subclass; uint8_t base_class; uint32_t subsystem_id; union win_slot_encoding wslot; uint32_t ser; /* serial number */ uint32_t flags; uint16_t virtual_numa_node; } __packed; struct hv_pci_dev { TAILQ_ENTRY(hv_pci_dev) link; struct hv_pcidev_desc desc; bool reported_missing; struct hv_pcibus *hbus; struct task eject_task; TAILQ_HEAD(, hv_irq_desc) irq_desc_list; /* * What would be observed if one wrote 0xFFFFFFFF to a BAR and then * read it back, for each of the BAR offsets within config space. */ uint32_t probed_bar[MAX_NUM_BARS]; }; /* * Tracks "Device Relations" messages from the host, which must be both * processed in order. */ struct hv_dr_work { struct task task; struct hv_pcibus *bus; }; struct hv_dr_state { TAILQ_ENTRY(hv_dr_state) link; uint32_t device_count; struct hv_pcidev_desc func[0]; }; struct hv_irq_desc { TAILQ_ENTRY(hv_irq_desc) link; struct tran_int_desc desc; int irq; }; #define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07)) #define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f) #define PCI_FUNC(devfn) ((devfn) & 0x07) static uint32_t devfn_to_wslot(unsigned int devfn) { union win_slot_encoding wslot; wslot.val = 0; wslot.bits.slot = PCI_SLOT(devfn); wslot.bits.func = PCI_FUNC(devfn); return (wslot.val); } static unsigned int wslot_to_devfn(uint32_t wslot) { union win_slot_encoding encoding; unsigned int slot; unsigned int func; encoding.val = wslot; slot = encoding.bits.slot; func = encoding.bits.func; return (PCI_DEVFN(slot, func)); } struct vmbus_pcib_softc { struct vmbus_channel *chan; void *rx_buf; struct taskqueue *taskq; struct hv_pcibus *hbus; }; /* {44C4F61D-4444-4400-9D52-802E27EDE19F} */ static const struct hyperv_guid g_pass_through_dev_type = { .hv_guid = {0x1D, 0xF6, 0xC4, 0x44, 0x44, 0x44, 0x00, 0x44, 0x9D, 0x52, 0x80, 0x2E, 0x27, 0xED, 0xE1, 0x9F} }; struct hv_pci_compl { struct completion host_event; int32_t completion_status; }; struct q_res_req_compl { struct completion host_event; struct hv_pci_dev *hpdev; }; struct compose_comp_ctxt { struct hv_pci_compl comp_pkt; struct tran_int_desc int_desc; }; /* * It is possible the device is revoked during initialization. * Check if this happens during wait. * Return: 0 if response arrived, ENODEV if device revoked. */ static int wait_for_response(struct hv_pcibus *hbus, struct completion *c) { do { if (vmbus_chan_is_revoked(hbus->sc->chan)) { device_printf(hbus->pcib, "The device is revoked.\n"); return (ENODEV); } } while (wait_for_completion_timeout(c, hz /10) != 0); return 0; } static void hv_pci_generic_compl(void *context, struct pci_response *resp, int resp_packet_size) { struct hv_pci_compl *comp_pkt = context; if (resp_packet_size >= sizeof(struct pci_response)) comp_pkt->completion_status = resp->status; else comp_pkt->completion_status = -1; complete(&comp_pkt->host_event); } static void q_resource_requirements(void *context, struct pci_response *resp, int resp_packet_size) { struct q_res_req_compl *completion = context; struct pci_q_res_req_response *q_res_req = (struct pci_q_res_req_response *)resp; int i; if (resp->status < 0) { printf("vmbus_pcib: failed to query resource requirements\n"); } else { for (i = 0; i < MAX_NUM_BARS; i++) completion->hpdev->probed_bar[i] = q_res_req->probed_bar[i]; } complete(&completion->host_event); } static void hv_pci_compose_compl(void *context, struct pci_response *resp, int resp_packet_size) { struct compose_comp_ctxt *comp_pkt = context; struct pci_create_int_response *int_resp = (struct pci_create_int_response *)resp; comp_pkt->comp_pkt.completion_status = resp->status; comp_pkt->int_desc = int_resp->int_desc; complete(&comp_pkt->comp_pkt.host_event); } static void hv_int_desc_free(struct hv_pci_dev *hpdev, struct hv_irq_desc *hid) { struct pci_delete_interrupt *int_pkt; struct { struct pci_packet pkt; uint8_t buffer[sizeof(struct pci_delete_interrupt)]; } ctxt; memset(&ctxt, 0, sizeof(ctxt)); int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message; int_pkt->message_type.type = PCI_DELETE_INTERRUPT_MESSAGE; int_pkt->wslot.val = hpdev->desc.wslot.val; int_pkt->int_desc = hid->desc; vmbus_chan_send(hpdev->hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, 0, int_pkt, sizeof(*int_pkt), 0); free(hid, M_DEVBUF); } static void hv_pci_delete_device(struct hv_pci_dev *hpdev) { struct hv_pcibus *hbus = hpdev->hbus; struct hv_irq_desc *hid, *tmp_hid; device_t pci_dev; int devfn; devfn = wslot_to_devfn(hpdev->desc.wslot.val); bus_topo_lock(); pci_dev = pci_find_dbsf(hbus->pci_domain, 0, PCI_SLOT(devfn), PCI_FUNC(devfn)); if (pci_dev) device_delete_child(hbus->pci_bus, pci_dev); bus_topo_unlock(); mtx_lock(&hbus->device_list_lock); TAILQ_REMOVE(&hbus->children, hpdev, link); mtx_unlock(&hbus->device_list_lock); TAILQ_FOREACH_SAFE(hid, &hpdev->irq_desc_list, link, tmp_hid) hv_int_desc_free(hpdev, hid); free(hpdev, M_DEVBUF); } static struct hv_pci_dev * new_pcichild_device(struct hv_pcibus *hbus, struct hv_pcidev_desc *desc) { struct hv_pci_dev *hpdev; struct pci_child_message *res_req; struct q_res_req_compl comp_pkt; struct { struct pci_packet pkt; uint8_t buffer[sizeof(struct pci_child_message)]; } ctxt; int ret; hpdev = malloc(sizeof(*hpdev), M_DEVBUF, M_WAITOK | M_ZERO); hpdev->hbus = hbus; TAILQ_INIT(&hpdev->irq_desc_list); init_completion(&comp_pkt.host_event); comp_pkt.hpdev = hpdev; ctxt.pkt.compl_ctxt = &comp_pkt; ctxt.pkt.completion_func = q_resource_requirements; res_req = (struct pci_child_message *)&ctxt.pkt.message; res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS; res_req->wslot.val = desc->wslot.val; ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC, res_req, sizeof(*res_req), (uint64_t)(uintptr_t)&ctxt.pkt); if (ret) goto err; if (wait_for_response(hbus, &comp_pkt.host_event)) goto err; free_completion(&comp_pkt.host_event); hpdev->desc = *desc; mtx_lock(&hbus->device_list_lock); if (TAILQ_EMPTY(&hbus->children)) hbus->pci_domain = desc->ser & 0xFFFF; TAILQ_INSERT_TAIL(&hbus->children, hpdev, link); mtx_unlock(&hbus->device_list_lock); return (hpdev); err: free_completion(&comp_pkt.host_event); free(hpdev, M_DEVBUF); return (NULL); } static int pci_rescan(device_t dev) { return (BUS_RESCAN(dev)); } static void pci_devices_present_work(void *arg, int pending __unused) { struct hv_dr_work *dr_wrk = arg; struct hv_dr_state *dr = NULL; struct hv_pcibus *hbus; uint32_t child_no; bool found; struct hv_pcidev_desc *new_desc; struct hv_pci_dev *hpdev, *tmp_hpdev; struct completion *query_comp; bool need_rescan = false; hbus = dr_wrk->bus; free(dr_wrk, M_DEVBUF); /* Pull this off the queue and process it if it was the last one. */ mtx_lock(&hbus->device_list_lock); while (!TAILQ_EMPTY(&hbus->dr_list)) { dr = TAILQ_FIRST(&hbus->dr_list); TAILQ_REMOVE(&hbus->dr_list, dr, link); /* Throw this away if the list still has stuff in it. */ if (!TAILQ_EMPTY(&hbus->dr_list)) { free(dr, M_DEVBUF); continue; } } mtx_unlock(&hbus->device_list_lock); if (!dr) return; /* First, mark all existing children as reported missing. */ mtx_lock(&hbus->device_list_lock); TAILQ_FOREACH(hpdev, &hbus->children, link) hpdev->reported_missing = true; mtx_unlock(&hbus->device_list_lock); /* Next, add back any reported devices. */ for (child_no = 0; child_no < dr->device_count; child_no++) { found = false; new_desc = &dr->func[child_no]; mtx_lock(&hbus->device_list_lock); TAILQ_FOREACH(hpdev, &hbus->children, link) { if ((hpdev->desc.wslot.val == new_desc->wslot.val) && (hpdev->desc.v_id == new_desc->v_id) && (hpdev->desc.d_id == new_desc->d_id) && (hpdev->desc.ser == new_desc->ser)) { hpdev->reported_missing = false; found = true; break; } } mtx_unlock(&hbus->device_list_lock); if (!found) { if (!need_rescan) need_rescan = true; hpdev = new_pcichild_device(hbus, new_desc); if (!hpdev) printf("vmbus_pcib: failed to add a child\n"); } } /* Remove missing device(s), if any */ TAILQ_FOREACH_SAFE(hpdev, &hbus->children, link, tmp_hpdev) { if (hpdev->reported_missing) hv_pci_delete_device(hpdev); } /* Rescan the bus to find any new device, if necessary. */ if (hbus->state == hv_pcibus_installed && need_rescan) pci_rescan(hbus->pci_bus); /* Wake up hv_pci_query_relations(), if it's waiting. */ query_comp = hbus->query_comp; if (query_comp) { hbus->query_comp = NULL; complete(query_comp); } free(dr, M_DEVBUF); } static struct hv_pci_dev * get_pcichild_wslot(struct hv_pcibus *hbus, uint32_t wslot) { struct hv_pci_dev *hpdev, *ret = NULL; mtx_lock(&hbus->device_list_lock); TAILQ_FOREACH(hpdev, &hbus->children, link) { if (hpdev->desc.wslot.val == wslot) { ret = hpdev; break; } } mtx_unlock(&hbus->device_list_lock); return (ret); } static void hv_pci_devices_present(struct hv_pcibus *hbus, struct pci_bus_relations *relations) { struct hv_dr_state *dr; struct hv_dr_work *dr_wrk; unsigned long dr_size; if (hbus->detaching && relations->device_count > 0) return; dr_size = offsetof(struct hv_dr_state, func) + (sizeof(struct pci_func_desc) * relations->device_count); dr = malloc(dr_size, M_DEVBUF, M_WAITOK | M_ZERO); dr->device_count = relations->device_count; if (dr->device_count != 0) memcpy(dr->func, relations->func, sizeof(struct hv_pcidev_desc) * dr->device_count); mtx_lock(&hbus->device_list_lock); TAILQ_INSERT_TAIL(&hbus->dr_list, dr, link); mtx_unlock(&hbus->device_list_lock); dr_wrk = malloc(sizeof(*dr_wrk), M_DEVBUF, M_WAITOK | M_ZERO); dr_wrk->bus = hbus; TASK_INIT(&dr_wrk->task, 0, pci_devices_present_work, dr_wrk); taskqueue_enqueue(hbus->sc->taskq, &dr_wrk->task); } static void hv_pci_devices_present2(struct hv_pcibus *hbus, struct pci_bus_relations2 *relations) { struct hv_dr_state *dr; struct hv_dr_work *dr_wrk; unsigned long dr_size; if (hbus->detaching && relations->device_count > 0) return; dr_size = offsetof(struct hv_dr_state, func) + (sizeof(struct pci_func_desc2) * relations->device_count); dr = malloc(dr_size, M_DEVBUF, M_WAITOK | M_ZERO); dr->device_count = relations->device_count; if (dr->device_count != 0) memcpy(dr->func, relations->func, sizeof(struct pci_func_desc2) * dr->device_count); mtx_lock(&hbus->device_list_lock); TAILQ_INSERT_TAIL(&hbus->dr_list, dr, link); mtx_unlock(&hbus->device_list_lock); dr_wrk = malloc(sizeof(*dr_wrk), M_DEVBUF, M_WAITOK | M_ZERO); dr_wrk->bus = hbus; TASK_INIT(&dr_wrk->task, 0, pci_devices_present_work, dr_wrk); taskqueue_enqueue(hbus->sc->taskq, &dr_wrk->task); } static void hv_eject_device_work(void *arg, int pending __unused) { struct hv_pci_dev *hpdev = arg; union win_slot_encoding wslot = hpdev->desc.wslot; struct hv_pcibus *hbus = hpdev->hbus; struct pci_eject_response *eject_pkt; struct { struct pci_packet pkt; uint8_t buffer[sizeof(struct pci_eject_response)]; } ctxt; hv_pci_delete_device(hpdev); memset(&ctxt, 0, sizeof(ctxt)); eject_pkt = (struct pci_eject_response *)&ctxt.pkt.message; eject_pkt->message_type.type = PCI_EJECTION_COMPLETE; eject_pkt->wslot.val = wslot.val; vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, 0, eject_pkt, sizeof(*eject_pkt), 0); } static void hv_pci_eject_device(struct hv_pci_dev *hpdev) { struct hv_pcibus *hbus = hpdev->hbus; struct taskqueue *taskq; if (hbus->detaching) return; /* * Push this task into the same taskqueue on which * vmbus_pcib_attach() runs, so we're sure this task can't run * concurrently with vmbus_pcib_attach(). */ TASK_INIT(&hpdev->eject_task, 0, hv_eject_device_work, hpdev); taskq = vmbus_chan_mgmt_tq(hbus->sc->chan); taskqueue_enqueue(taskq, &hpdev->eject_task); } #define PCIB_PACKET_SIZE 0x100 static void vmbus_pcib_on_channel_callback(struct vmbus_channel *chan, void *arg) { struct vmbus_pcib_softc *sc = arg; struct hv_pcibus *hbus = sc->hbus; void *buffer; int bufferlen = PCIB_PACKET_SIZE; struct pci_packet *comp_packet; struct pci_response *response; struct pci_incoming_message *new_msg; struct pci_bus_relations *bus_rel; struct pci_bus_relations2 *bus_rel2; struct pci_dev_incoming *dev_msg; struct hv_pci_dev *hpdev; buffer = sc->rx_buf; do { struct vmbus_chanpkt_hdr *pkt = buffer; uint32_t bytes_rxed; int ret; bytes_rxed = bufferlen; ret = vmbus_chan_recv_pkt(chan, pkt, &bytes_rxed); if (ret == ENOBUFS) { /* Handle large packet */ if (bufferlen > PCIB_PACKET_SIZE) { free(buffer, M_DEVBUF); buffer = NULL; } /* alloc new buffer */ buffer = malloc(bytes_rxed, M_DEVBUF, M_WAITOK | M_ZERO); bufferlen = bytes_rxed; continue; } if (ret != 0) { /* ignore EIO or EAGAIN */ break; } if (bytes_rxed <= sizeof(struct pci_response)) continue; switch (pkt->cph_type) { case VMBUS_CHANPKT_TYPE_COMP: comp_packet = (struct pci_packet *)(uintptr_t)pkt->cph_xactid; response = (struct pci_response *)pkt; comp_packet->completion_func(comp_packet->compl_ctxt, response, bytes_rxed); break; case VMBUS_CHANPKT_TYPE_INBAND: new_msg = (struct pci_incoming_message *)buffer; switch (new_msg->message_type.type) { case PCI_BUS_RELATIONS: bus_rel = (struct pci_bus_relations *)buffer; if (bus_rel->device_count == 0) break; if (bytes_rxed < offsetof(struct pci_bus_relations, func) + (sizeof(struct pci_func_desc) * (bus_rel->device_count))) break; hv_pci_devices_present(hbus, bus_rel); break; case PCI_BUS_RELATIONS2: bus_rel2 = (struct pci_bus_relations2 *)buffer; if (bus_rel2->device_count == 0) break; if (bytes_rxed < offsetof(struct pci_bus_relations2, func) + (sizeof(struct pci_func_desc2) * (bus_rel2->device_count))) break; hv_pci_devices_present2(hbus, bus_rel2); case PCI_EJECT: dev_msg = (struct pci_dev_incoming *)buffer; hpdev = get_pcichild_wslot(hbus, dev_msg->wslot.val); if (hpdev) hv_pci_eject_device(hpdev); break; default: printf("vmbus_pcib: Unknown msg type 0x%x\n", new_msg->message_type.type); break; } break; default: printf("vmbus_pcib: Unknown VMBus msg type %hd\n", pkt->cph_type); break; } } while (1); if (bufferlen > PCIB_PACKET_SIZE) free(buffer, M_DEVBUF); } static int hv_pci_protocol_negotiation(struct hv_pcibus *hbus, enum pci_protocol_version_t version[], int num_version) { struct pci_version_request *version_req; struct hv_pci_compl comp_pkt; struct { struct pci_packet pkt; uint8_t buffer[sizeof(struct pci_version_request)]; } ctxt; int ret; int i; init_completion(&comp_pkt.host_event); ctxt.pkt.completion_func = hv_pci_generic_compl; ctxt.pkt.compl_ctxt = &comp_pkt; version_req = (struct pci_version_request *)&ctxt.pkt.message; version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION; for(i=0; i< num_version; i++) { version_req->protocol_version = version[i]; ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC, version_req, sizeof(*version_req), (uint64_t)(uintptr_t)&ctxt.pkt); if (!ret) ret = wait_for_response(hbus, &comp_pkt.host_event); if (ret) { device_printf(hbus->pcib, "vmbus_pcib failed to request version: %d\n", ret); goto out; } if (comp_pkt.completion_status >= 0) { hbus->protocol_version = version[i]; device_printf(hbus->pcib, "PCI VMBus using version 0x%x\n", hbus->protocol_version); ret = 0; goto out; } if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) { device_printf(hbus->pcib, "vmbus_pcib version negotiation failed: %x\n", comp_pkt.completion_status); ret = EPROTO; goto out; } reinit_completion(&comp_pkt.host_event); } device_printf(hbus->pcib, "PCI pass-trhpugh VSP failed to find supported version\n"); out: free_completion(&comp_pkt.host_event); return (ret); } /* Ask the host to send along the list of child devices */ static int hv_pci_query_relations(struct hv_pcibus *hbus) { struct pci_message message; int ret; message.type = PCI_QUERY_BUS_RELATIONS; ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, 0, &message, sizeof(message), 0); return (ret); } static int hv_pci_enter_d0(struct hv_pcibus *hbus) { struct pci_bus_d0_entry *d0_entry; struct hv_pci_compl comp_pkt; struct { struct pci_packet pkt; uint8_t buffer[sizeof(struct pci_bus_d0_entry)]; } ctxt; int ret; /* * Tell the host that the bus is ready to use, and moved into the * powered-on state. This includes telling the host which region * of memory-mapped I/O space has been chosen for configuration space * access. */ init_completion(&comp_pkt.host_event); ctxt.pkt.completion_func = hv_pci_generic_compl; ctxt.pkt.compl_ctxt = &comp_pkt; d0_entry = (struct pci_bus_d0_entry *)&ctxt.pkt.message; memset(d0_entry, 0, sizeof(*d0_entry)); d0_entry->message_type.type = PCI_BUS_D0ENTRY; d0_entry->mmio_base = rman_get_start(hbus->cfg_res); ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC, d0_entry, sizeof(*d0_entry), (uint64_t)(uintptr_t)&ctxt.pkt); if (!ret) ret = wait_for_response(hbus, &comp_pkt.host_event); if (ret) goto out; if (comp_pkt.completion_status < 0) { device_printf(hbus->pcib, "vmbus_pcib failed to enable D0\n"); ret = EPROTO; } else { ret = 0; } out: free_completion(&comp_pkt.host_event); return (ret); } /* * It looks this is only needed by Windows VM, but let's send the message too * just to make the host happy. */ static int hv_send_resources_allocated(struct hv_pcibus *hbus) { struct pci_resources_assigned *res_assigned; struct pci_resources_assigned2 *res_assigned2; struct hv_pci_compl comp_pkt; struct hv_pci_dev *hpdev; struct pci_packet *pkt; uint32_t wslot; int ret = 0; size_t size_res; size_res = (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_4) ? sizeof(*res_assigned) : sizeof(*res_assigned2); pkt = malloc(sizeof(*pkt) + size_res, M_DEVBUF, M_WAITOK | M_ZERO); for (wslot = 0; wslot < 256; wslot++) { hpdev = get_pcichild_wslot(hbus, wslot); if (!hpdev) continue; init_completion(&comp_pkt.host_event); memset(pkt, 0, sizeof(*pkt) + size_res); pkt->completion_func = hv_pci_generic_compl; pkt->compl_ctxt = &comp_pkt; if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_4) { res_assigned = (struct pci_resources_assigned *)&pkt->message; res_assigned->message_type.type = PCI_RESOURCES_ASSIGNED; res_assigned->wslot.val = hpdev->desc.wslot.val; } else { res_assigned2 = (struct pci_resources_assigned2 *)&pkt->message; res_assigned2->message_type.type = PCI_RESOURCES_ASSIGNED2; res_assigned2->wslot.val = hpdev->desc.wslot.val; } ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC, &pkt->message, size_res, (uint64_t)(uintptr_t)pkt); if (!ret) ret = wait_for_response(hbus, &comp_pkt.host_event); free_completion(&comp_pkt.host_event); if (ret) break; if (comp_pkt.completion_status < 0) { ret = EPROTO; device_printf(hbus->pcib, "failed to send PCI_RESOURCES_ASSIGNED\n"); break; } } free(pkt, M_DEVBUF); return (ret); } static int hv_send_resources_released(struct hv_pcibus *hbus) { struct pci_child_message pkt; struct hv_pci_dev *hpdev; uint32_t wslot; int ret; for (wslot = 0; wslot < 256; wslot++) { hpdev = get_pcichild_wslot(hbus, wslot); if (!hpdev) continue; pkt.message_type.type = PCI_RESOURCES_RELEASED; pkt.wslot.val = hpdev->desc.wslot.val; ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, 0, &pkt, sizeof(pkt), 0); if (ret) return (ret); } return (0); } #define hv_cfg_read(x, s) \ static inline uint##x##_t hv_cfg_read_##s(struct hv_pcibus *bus, \ bus_size_t offset) \ { \ return (bus_read_##s(bus->cfg_res, offset)); \ } #define hv_cfg_write(x, s) \ static inline void hv_cfg_write_##s(struct hv_pcibus *bus, \ bus_size_t offset, uint##x##_t val) \ { \ return (bus_write_##s(bus->cfg_res, offset, val)); \ } hv_cfg_read(8, 1) hv_cfg_read(16, 2) hv_cfg_read(32, 4) hv_cfg_write(8, 1) hv_cfg_write(16, 2) hv_cfg_write(32, 4) static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where, int size, uint32_t *val) { struct hv_pcibus *hbus = hpdev->hbus; bus_size_t addr = CFG_PAGE_OFFSET + where; /* * If the attempt is to read the IDs or the ROM BAR, simulate that. */ if (where + size <= PCIR_COMMAND) { memcpy(val, ((uint8_t *)&hpdev->desc.v_id) + where, size); } else if (where >= PCIR_REVID && where + size <= PCIR_CACHELNSZ) { memcpy(val, ((uint8_t *)&hpdev->desc.rev) + where - PCIR_REVID, size); } else if (where >= PCIR_SUBVEND_0 && where + size <= PCIR_BIOS) { memcpy(val, (uint8_t *)&hpdev->desc.subsystem_id + where - PCIR_SUBVEND_0, size); } else if (where >= PCIR_BIOS && where + size <= PCIR_CAP_PTR) { /* ROM BARs are unimplemented */ *val = 0; } else if ((where >= PCIR_INTLINE && where + size <= PCIR_INTPIN) ||(where == PCIR_INTPIN && size == 1)) { /* * Interrupt Line and Interrupt PIN are hard-wired to zero * because this front-end only supports message-signaled * interrupts. */ *val = 0; } else if (where + size <= CFG_PAGE_SIZE) { mtx_lock(&hbus->config_lock); /* Choose the function to be read. */ hv_cfg_write_4(hbus, 0, hpdev->desc.wslot.val); /* Make sure the function was chosen before we start reading.*/ mb(); /* Read from that function's config space. */ switch (size) { case 1: *((uint8_t *)val) = hv_cfg_read_1(hbus, addr); break; case 2: *((uint16_t *)val) = hv_cfg_read_2(hbus, addr); break; default: *((uint32_t *)val) = hv_cfg_read_4(hbus, addr); break; } /* * Make sure the write was done before we release the lock, * allowing consecutive reads/writes. */ mb(); mtx_unlock(&hbus->config_lock); } else { /* Invalid config read: it's unlikely to reach here. */ memset(val, 0, size); } } static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where, int size, uint32_t val) { struct hv_pcibus *hbus = hpdev->hbus; bus_size_t addr = CFG_PAGE_OFFSET + where; /* SSIDs and ROM BARs are read-only */ if (where >= PCIR_SUBVEND_0 && where + size <= PCIR_CAP_PTR) return; if (where >= PCIR_COMMAND && where + size <= CFG_PAGE_SIZE) { mtx_lock(&hbus->config_lock); /* Choose the function to be written. */ hv_cfg_write_4(hbus, 0, hpdev->desc.wslot.val); /* Make sure the function was chosen before we start writing.*/ wmb(); /* Write to that function's config space. */ switch (size) { case 1: hv_cfg_write_1(hbus, addr, (uint8_t)val); break; case 2: hv_cfg_write_2(hbus, addr, (uint16_t)val); break; default: hv_cfg_write_4(hbus, addr, (uint32_t)val); break; } /* * Make sure the write was done before we release the lock, * allowing consecutive reads/writes. */ mb(); mtx_unlock(&hbus->config_lock); } else { /* Invalid config write: it's unlikely to reach here. */ return; } } /* * The vPCI in some Hyper-V releases do not initialize the last 4 * bit of BAR registers. This could result weird problems causing PCI * code fail to configure BAR correctly. * * Just write all 1's to those BARs whose probed values are not zero. * This seems to make the Hyper-V vPCI and pci_write_bar() to cooperate * correctly. */ static void vmbus_pcib_prepopulate_bars(struct hv_pcibus *hbus) { struct hv_pci_dev *hpdev; int i; mtx_lock(&hbus->device_list_lock); TAILQ_FOREACH(hpdev, &hbus->children, link) { for (i = 0; i < 6; i++) { /* Ignore empty bar */ if (hpdev->probed_bar[i] == 0) continue; uint32_t bar_val = 0; _hv_pcifront_read_config(hpdev, PCIR_BAR(i), 4, &bar_val); if (hpdev->probed_bar[i] != bar_val) { if (bootverbose) printf("vmbus_pcib: initialize bar %d " "by writing all 1s\n", i); _hv_pcifront_write_config(hpdev, PCIR_BAR(i), 4, 0xffffffff); /* Now write the original value back */ _hv_pcifront_write_config(hpdev, PCIR_BAR(i), 4, bar_val); } } } mtx_unlock(&hbus->device_list_lock); } static void vmbus_pcib_set_detaching(void *arg, int pending __unused) { struct hv_pcibus *hbus = arg; atomic_set_int(&hbus->detaching, 1); } static void vmbus_pcib_pre_detach(struct hv_pcibus *hbus) { struct task task; TASK_INIT(&task, 0, vmbus_pcib_set_detaching, hbus); /* * Make sure the channel callback won't push any possible new * PCI_BUS_RELATIONS and PCI_EJECT tasks to sc->taskq. */ vmbus_chan_run_task(hbus->sc->chan, &task); taskqueue_drain_all(hbus->sc->taskq); } /* * Standard probe entry point. * */ static int vmbus_pcib_probe(device_t dev) { if (VMBUS_PROBE_GUID(device_get_parent(dev), dev, &g_pass_through_dev_type) == 0) { device_set_desc(dev, "Hyper-V PCI Express Pass Through"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } /* * Standard attach entry point. * */ static int vmbus_pcib_attach(device_t dev) { const int pci_ring_size = (4 * PAGE_SIZE); const struct hyperv_guid *inst_guid; struct vmbus_channel *channel; struct vmbus_pcib_softc *sc; struct hv_pcibus *hbus; int rid = 0; int ret; hbus = malloc(sizeof(*hbus), M_DEVBUF, M_WAITOK | M_ZERO); hbus->pcib = dev; channel = vmbus_get_channel(dev); inst_guid = vmbus_chan_guid_inst(channel); hbus->pci_domain = inst_guid->hv_guid[9] | (inst_guid->hv_guid[8] << 8); mtx_init(&hbus->config_lock, "hbcfg", NULL, MTX_DEF); mtx_init(&hbus->device_list_lock, "hbdl", NULL, MTX_DEF); TAILQ_INIT(&hbus->children); TAILQ_INIT(&hbus->dr_list); hbus->cfg_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, RM_MAX_END, PCI_CONFIG_MMIO_LENGTH, RF_ACTIVE | rman_make_alignment_flags(PAGE_SIZE)); if (!hbus->cfg_res) { device_printf(dev, "failed to get resource for cfg window\n"); ret = ENXIO; goto free_bus; } sc = device_get_softc(dev); sc->chan = channel; sc->rx_buf = malloc(PCIB_PACKET_SIZE, M_DEVBUF, M_WAITOK | M_ZERO); sc->hbus = hbus; /* * The taskq is used to handle PCI_BUS_RELATIONS and PCI_EJECT * messages. NB: we can't handle the messages in the channel callback * directly, because the message handlers need to send new messages * to the host and waits for the host's completion messages, which * must also be handled by the channel callback. */ sc->taskq = taskqueue_create("vmbus_pcib_tq", M_WAITOK, taskqueue_thread_enqueue, &sc->taskq); taskqueue_start_threads(&sc->taskq, 1, PI_NET, "vmbus_pcib_tq"); hbus->sc = sc; init_completion(&hbus->query_completion); hbus->query_comp = &hbus->query_completion; ret = vmbus_chan_open(sc->chan, pci_ring_size, pci_ring_size, NULL, 0, vmbus_pcib_on_channel_callback, sc); if (ret) goto free_res; ret = hv_pci_protocol_negotiation(hbus, pci_protocol_versions, ARRAY_SIZE(pci_protocol_versions)); if (ret) goto vmbus_close; ret = hv_pci_query_relations(hbus); if (!ret) ret = wait_for_response(hbus, hbus->query_comp); if (ret) goto vmbus_close; ret = hv_pci_enter_d0(hbus); if (ret) goto vmbus_close; ret = hv_send_resources_allocated(hbus); if (ret) goto vmbus_close; vmbus_pcib_prepopulate_bars(hbus); hbus->pci_bus = device_add_child(dev, "pci", -1); if (!hbus->pci_bus) { device_printf(dev, "failed to create pci bus\n"); ret = ENXIO; goto vmbus_close; } bus_generic_attach(dev); hbus->state = hv_pcibus_installed; return (0); vmbus_close: vmbus_pcib_pre_detach(hbus); vmbus_chan_close(sc->chan); free_res: taskqueue_free(sc->taskq); free_completion(&hbus->query_completion); free(sc->rx_buf, M_DEVBUF); bus_release_resource(dev, SYS_RES_MEMORY, 0, hbus->cfg_res); free_bus: mtx_destroy(&hbus->device_list_lock); mtx_destroy(&hbus->config_lock); free(hbus, M_DEVBUF); return (ret); } /* * Standard detach entry point */ static int vmbus_pcib_detach(device_t dev) { struct vmbus_pcib_softc *sc = device_get_softc(dev); struct hv_pcibus *hbus = sc->hbus; struct pci_message teardown_packet; struct pci_bus_relations relations; int ret; vmbus_pcib_pre_detach(hbus); if (hbus->state == hv_pcibus_installed) bus_generic_detach(dev); /* Delete any children which might still exist. */ memset(&relations, 0, sizeof(relations)); hv_pci_devices_present(hbus, &relations); ret = hv_send_resources_released(hbus); if (ret) device_printf(dev, "failed to send PCI_RESOURCES_RELEASED\n"); teardown_packet.type = PCI_BUS_D0EXIT; ret = vmbus_chan_send(sc->chan, VMBUS_CHANPKT_TYPE_INBAND, 0, &teardown_packet, sizeof(struct pci_message), 0); if (ret) device_printf(dev, "failed to send PCI_BUS_D0EXIT\n"); taskqueue_drain_all(hbus->sc->taskq); vmbus_chan_close(sc->chan); taskqueue_free(sc->taskq); free_completion(&hbus->query_completion); free(sc->rx_buf, M_DEVBUF); bus_release_resource(dev, SYS_RES_MEMORY, 0, hbus->cfg_res); mtx_destroy(&hbus->device_list_lock); mtx_destroy(&hbus->config_lock); free(hbus, M_DEVBUF); return (0); } static int vmbus_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *val) { struct vmbus_pcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *val = sc->hbus->pci_domain; return (0); case PCIB_IVAR_BUS: /* There is only bus 0. */ *val = 0; return (0); } return (ENOENT); } static int vmbus_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t val) { return (ENOENT); } static struct resource * vmbus_pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { unsigned int bar_no; struct hv_pci_dev *hpdev; struct vmbus_pcib_softc *sc = device_get_softc(dev); struct resource *res; unsigned int devfn; if (type == PCI_RES_BUS) return (pci_domain_alloc_bus(sc->hbus->pci_domain, child, rid, start, end, count, flags)); /* Devices with port I/O BAR are not supported. */ if (type == SYS_RES_IOPORT) return (NULL); if (type == SYS_RES_MEMORY) { devfn = PCI_DEVFN(pci_get_slot(child), pci_get_function(child)); hpdev = get_pcichild_wslot(sc->hbus, devfn_to_wslot(devfn)); if (!hpdev) return (NULL); bar_no = PCI_RID2BAR(*rid); if (bar_no >= MAX_NUM_BARS) return (NULL); /* Make sure a 32-bit BAR gets a 32-bit address */ if (!(hpdev->probed_bar[bar_no] & PCIM_BAR_MEM_64)) end = ulmin(end, 0xFFFFFFFF); } res = bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags); /* * If this is a request for a specific range, assume it is * correct and pass it up to the parent. */ if (res == NULL && start + count - 1 == end) res = bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags); if (res == NULL) device_printf(dev, "vmbus_pcib_alloc_resource failed\n"); return (res); } static int -vmbus_pcib_adjust_resource(device_t dev, device_t child, int type, +vmbus_pcib_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct vmbus_pcib_softc *sc = device_get_softc(dev); - if (type == PCI_RES_BUS) + if (rman_get_type(r) == PCI_RES_BUS) return (pci_domain_adjust_bus(sc->hbus->pci_domain, child, r, start, end)); - return (bus_generic_adjust_resource(dev, child, type, r, start, end)); + return (bus_generic_adjust_resource(dev, child, r, start, end)); } static int vmbus_pcib_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct vmbus_pcib_softc *sc = device_get_softc(dev); if (type == PCI_RES_BUS) return (pci_domain_release_bus(sc->hbus->pci_domain, child, rid, r)); if (type == SYS_RES_IOPORT) return (EINVAL); return (bus_generic_release_resource(dev, child, type, rid, r)); } static int vmbus_pcib_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct vmbus_pcib_softc *sc = device_get_softc(dev); if (type == PCI_RES_BUS) return (pci_domain_activate_bus(sc->hbus->pci_domain, child, rid, r)); return (bus_generic_activate_resource(dev, child, type, rid, r)); } static int vmbus_pcib_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct vmbus_pcib_softc *sc = device_get_softc(dev); if (type == PCI_RES_BUS) return (pci_domain_deactivate_bus(sc->hbus->pci_domain, child, rid, r)); return (bus_generic_deactivate_resource(dev, child, type, rid, r)); } static int vmbus_pcib_get_cpus(device_t pcib, device_t dev, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { return (bus_get_cpus(pcib, op, setsize, cpuset)); } static uint32_t vmbus_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct vmbus_pcib_softc *sc = device_get_softc(dev); struct hv_pci_dev *hpdev; unsigned int devfn = PCI_DEVFN(slot, func); uint32_t data = 0; KASSERT(bus == 0, ("bus should be 0, but is %u", bus)); hpdev = get_pcichild_wslot(sc->hbus, devfn_to_wslot(devfn)); if (!hpdev) return (~0); _hv_pcifront_read_config(hpdev, reg, bytes, &data); return (data); } static void vmbus_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { struct vmbus_pcib_softc *sc = device_get_softc(dev); struct hv_pci_dev *hpdev; unsigned int devfn = PCI_DEVFN(slot, func); KASSERT(bus == 0, ("bus should be 0, but is %u", bus)); hpdev = get_pcichild_wslot(sc->hbus, devfn_to_wslot(devfn)); if (!hpdev) return; _hv_pcifront_write_config(hpdev, reg, bytes, data); } static int vmbus_pcib_route_intr(device_t pcib, device_t dev, int pin) { /* We only support MSI/MSI-X and don't support INTx interrupt. */ return (PCI_INVALID_IRQ); } static int vmbus_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs) { #if defined(__amd64__) || defined(__i386__) return (PCIB_ALLOC_MSI(device_get_parent(pcib), dev, count, maxcount, irqs)); #endif #if defined(__aarch64__) return (intr_alloc_msi(pcib, dev, ACPI_MSI_XREF, count, maxcount, irqs)); #endif } static int vmbus_pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs) { #if defined(__amd64__) || defined(__i386__) return (PCIB_RELEASE_MSI(device_get_parent(pcib), dev, count, irqs)); #endif #if defined(__aarch64__) return(intr_release_msi(pcib, dev, ACPI_MSI_XREF, count, irqs)); #endif } static int vmbus_pcib_alloc_msix(device_t pcib, device_t dev, int *irq) { #if defined(__aarch64__) int ret; #if defined(INTRNG) ret = intr_alloc_msix(pcib, dev, ACPI_MSI_XREF, irq); return ret; #else return (ENXIO); #endif #else return (PCIB_ALLOC_MSIX(device_get_parent(pcib), dev, irq)); #endif /* __aarch64__ */ } static int vmbus_pcib_release_msix(device_t pcib, device_t dev, int irq) { #if defined(__aarch64__) return (intr_release_msix(pcib, dev, ACPI_MSI_XREF, irq)); #else return (PCIB_RELEASE_MSIX(device_get_parent(pcib), dev, irq)); #endif /* __aarch64__ */ } #if defined(__aarch64__) #define MSI_INTEL_ADDR_DEST 0x00000000 #define MSI_INTEL_DATA_DELFIXED 0x0 #endif #if defined(__amd64__) || defined(__i386__) #define MSI_INTEL_ADDR_DEST 0x000ff000 #define MSI_INTEL_DATA_INTVEC IOART_INTVEC /* Interrupt vector. */ #define MSI_INTEL_DATA_DELFIXED IOART_DELFIXED #endif static int vmbus_pcib_map_msi(device_t pcib, device_t child, int irq, uint64_t *addr, uint32_t *data) { unsigned int devfn; struct hv_pci_dev *hpdev; uint64_t v_addr; uint32_t v_data; struct hv_irq_desc *hid, *tmp_hid; unsigned int cpu, vcpu_id; unsigned int vector; struct vmbus_pcib_softc *sc = device_get_softc(pcib); struct compose_comp_ctxt comp; struct { struct pci_packet pkt; union { struct pci_create_interrupt v1; struct pci_create_interrupt3 v3; }int_pkts; } ctxt; int ret; uint32_t size; devfn = PCI_DEVFN(pci_get_slot(child), pci_get_function(child)); hpdev = get_pcichild_wslot(sc->hbus, devfn_to_wslot(devfn)); if (!hpdev) return (ENOENT); #if defined(__aarch64__) ret = intr_map_msi(pcib, child, ACPI_MSI_XREF, irq, &v_addr, &v_data); #else ret = PCIB_MAP_MSI(device_get_parent(pcib), child, irq, &v_addr, &v_data); #endif if (ret) return (ret); TAILQ_FOREACH_SAFE(hid, &hpdev->irq_desc_list, link, tmp_hid) { if (hid->irq == irq) { TAILQ_REMOVE(&hpdev->irq_desc_list, hid, link); hv_int_desc_free(hpdev, hid); break; } } #if defined(__aarch64__) cpu = 0; vcpu_id = VMBUS_GET_VCPU_ID(device_get_parent(pcib), pcib, cpu); vector = v_data; #else cpu = apic_cpuid((v_addr & MSI_INTEL_ADDR_DEST) >> 12); vcpu_id = VMBUS_GET_VCPU_ID(device_get_parent(pcib), pcib, cpu); vector = v_data & MSI_INTEL_DATA_INTVEC; #endif if (hpdev->hbus->protocol_version < PCI_PROTOCOL_VERSION_1_4 && vcpu_id > 63) { /* We only support vcpu_id < 64 before vPCI version 1.4 */ device_printf(pcib, "Error: " "vcpu_id %u overflowed on PCI VMBus version 0x%x\n", vcpu_id, hpdev->hbus->protocol_version); return (ENODEV); } init_completion(&comp.comp_pkt.host_event); memset(&ctxt, 0, sizeof(ctxt)); ctxt.pkt.completion_func = hv_pci_compose_compl; ctxt.pkt.compl_ctxt = ∁ switch (hpdev->hbus->protocol_version) { case PCI_PROTOCOL_VERSION_1_1: ctxt.int_pkts.v1.message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; ctxt.int_pkts.v1.wslot.val = hpdev->desc.wslot.val; ctxt.int_pkts.v1.int_desc.vector = vector; ctxt.int_pkts.v1.int_desc.vector_count = 1; ctxt.int_pkts.v1.int_desc.delivery_mode = MSI_INTEL_DATA_DELFIXED; ctxt.int_pkts.v1.int_desc.cpu_mask = 1ULL << vcpu_id; size = sizeof(ctxt.int_pkts.v1); break; case PCI_PROTOCOL_VERSION_1_4: ctxt.int_pkts.v3.message_type.type = PCI_CREATE_INTERRUPT_MESSAGE3; ctxt.int_pkts.v3.wslot.val = hpdev->desc.wslot.val; ctxt.int_pkts.v3.int_desc.vector = vector; ctxt.int_pkts.v3.int_desc.vector_count = 1; ctxt.int_pkts.v3.int_desc.reserved = 0; ctxt.int_pkts.v3.int_desc.delivery_mode = MSI_INTEL_DATA_DELFIXED; ctxt.int_pkts.v3.int_desc.processor_count = 1; ctxt.int_pkts.v3.int_desc.processor_array[0] = vcpu_id; size = sizeof(ctxt.int_pkts.v3); break; } ret = vmbus_chan_send(sc->chan, VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC, &ctxt.int_pkts, size, (uint64_t)(uintptr_t)&ctxt.pkt); if (ret) { free_completion(&comp.comp_pkt.host_event); return (ret); } wait_for_completion(&comp.comp_pkt.host_event); free_completion(&comp.comp_pkt.host_event); if (comp.comp_pkt.completion_status < 0) { device_printf(pcib, "vmbus_pcib_map_msi completion_status %d\n", comp.comp_pkt.completion_status); return (EPROTO); } *addr = comp.int_desc.address; *data = comp.int_desc.data; hid = malloc(sizeof(struct hv_irq_desc), M_DEVBUF, M_WAITOK | M_ZERO); hid->irq = irq; hid->desc = comp.int_desc; TAILQ_INSERT_TAIL(&hpdev->irq_desc_list, hid, link); return (0); } static device_method_t vmbus_pcib_methods[] = { /* Device interface */ DEVMETHOD(device_probe, vmbus_pcib_probe), DEVMETHOD(device_attach, vmbus_pcib_attach), DEVMETHOD(device_detach, vmbus_pcib_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, vmbus_pcib_read_ivar), DEVMETHOD(bus_write_ivar, vmbus_pcib_write_ivar), DEVMETHOD(bus_alloc_resource, vmbus_pcib_alloc_resource), DEVMETHOD(bus_adjust_resource, vmbus_pcib_adjust_resource), DEVMETHOD(bus_release_resource, vmbus_pcib_release_resource), DEVMETHOD(bus_activate_resource, vmbus_pcib_activate_resource), DEVMETHOD(bus_deactivate_resource, vmbus_pcib_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_cpus, vmbus_pcib_get_cpus), /* pcib interface */ DEVMETHOD(pcib_maxslots, pcib_maxslots), DEVMETHOD(pcib_read_config, vmbus_pcib_read_config), DEVMETHOD(pcib_write_config, vmbus_pcib_write_config), DEVMETHOD(pcib_route_interrupt, vmbus_pcib_route_intr), DEVMETHOD(pcib_alloc_msi, vmbus_pcib_alloc_msi), DEVMETHOD(pcib_release_msi, vmbus_pcib_release_msi), DEVMETHOD(pcib_alloc_msix, vmbus_pcib_alloc_msix), DEVMETHOD(pcib_release_msix, vmbus_pcib_release_msix), DEVMETHOD(pcib_map_msi, vmbus_pcib_map_msi), DEVMETHOD(pcib_request_feature, pcib_request_feature_allow), DEVMETHOD_END }; DEFINE_CLASS_0(pcib, vmbus_pcib_driver, vmbus_pcib_methods, sizeof(struct vmbus_pcib_softc)); DRIVER_MODULE(vmbus_pcib, vmbus, vmbus_pcib_driver, 0, 0); MODULE_DEPEND(vmbus_pcib, vmbus, 1, 1, 1); MODULE_DEPEND(vmbus_pcib, pci, 1, 1, 1); #endif /* NEW_PCIB */ diff --git a/sys/dev/ofw/ofw_pcib.c b/sys/dev/ofw/ofw_pcib.c index f8d0fc8e7029..e95a5f029140 100644 --- a/sys/dev/ofw/ofw_pcib.c +++ b/sys/dev/ofw/ofw_pcib.c @@ -1,760 +1,760 @@ /*- * Copyright (c) 2011 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" /* * If it is necessary to set another value of this for * some platforms it should be set at fdt.h file */ #ifndef PCI_MAP_INTR #define PCI_MAP_INTR 4 #endif #define PCI_INTR_PINS 4 /* * bus interface. */ static struct rman *ofw_pcib_get_rman(device_t, int, u_int); static struct resource * ofw_pcib_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int ofw_pcib_release_resource(device_t, device_t, int, int, struct resource *); static int ofw_pcib_activate_resource(device_t, device_t, int, int, struct resource *); static int ofw_pcib_deactivate_resource(device_t, device_t, int, int, struct resource *); -static int ofw_pcib_adjust_resource(device_t, device_t, int, +static int ofw_pcib_adjust_resource(device_t, device_t, struct resource *, rman_res_t, rman_res_t); static int ofw_pcib_map_resource(device_t, device_t, int, struct resource *, struct resource_map_request *, struct resource_map *); static int ofw_pcib_unmap_resource(device_t, device_t, int, struct resource *, struct resource_map *); static int ofw_pcib_translate_resource(device_t bus, int type, rman_res_t start, rman_res_t *newstart); #ifdef __powerpc__ static bus_space_tag_t ofw_pcib_bus_get_bus_tag(device_t, device_t); #endif /* * pcib interface */ static int ofw_pcib_maxslots(device_t); /* * ofw_bus interface */ static phandle_t ofw_pcib_get_node(device_t, device_t); /* * local methods */ static int ofw_pcib_fill_ranges(phandle_t, struct ofw_pci_range *); /* * Driver methods. */ static device_method_t ofw_pcib_methods[] = { /* Device interface */ DEVMETHOD(device_attach, ofw_pcib_attach), /* Bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_read_ivar, ofw_pcib_read_ivar), DEVMETHOD(bus_write_ivar, ofw_pcib_write_ivar), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_rman, ofw_pcib_get_rman), DEVMETHOD(bus_alloc_resource, ofw_pcib_alloc_resource), DEVMETHOD(bus_release_resource, ofw_pcib_release_resource), DEVMETHOD(bus_activate_resource, ofw_pcib_activate_resource), DEVMETHOD(bus_deactivate_resource, ofw_pcib_deactivate_resource), DEVMETHOD(bus_adjust_resource, ofw_pcib_adjust_resource), DEVMETHOD(bus_map_resource, ofw_pcib_map_resource), DEVMETHOD(bus_unmap_resource, ofw_pcib_unmap_resource), DEVMETHOD(bus_translate_resource, ofw_pcib_translate_resource), #ifdef __powerpc__ DEVMETHOD(bus_get_bus_tag, ofw_pcib_bus_get_bus_tag), #endif /* pcib interface */ DEVMETHOD(pcib_maxslots, ofw_pcib_maxslots), DEVMETHOD(pcib_route_interrupt, ofw_pcib_route_interrupt), DEVMETHOD(pcib_request_feature, pcib_request_feature_allow), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_node, ofw_pcib_get_node), DEVMETHOD_END }; DEFINE_CLASS_0(ofw_pcib, ofw_pcib_driver, ofw_pcib_methods, 0); int ofw_pcib_init(device_t dev) { struct ofw_pci_softc *sc; phandle_t node; u_int32_t busrange[2]; struct ofw_pci_range *rp; int i, error; struct ofw_pci_cell_info *cell_info; node = ofw_bus_get_node(dev); sc = device_get_softc(dev); sc->sc_initialized = 1; sc->sc_range = NULL; sc->sc_pci_domain = device_get_unit(dev); cell_info = (struct ofw_pci_cell_info *)malloc(sizeof(*cell_info), M_DEVBUF, M_WAITOK | M_ZERO); sc->sc_cell_info = cell_info; if (OF_getencprop(node, "bus-range", busrange, sizeof(busrange)) != 8) busrange[0] = 0; sc->sc_dev = dev; sc->sc_node = node; sc->sc_bus = busrange[0]; if (sc->sc_quirks & OFW_PCI_QUIRK_RANGES_ON_CHILDREN) { phandle_t c; int n, i; sc->sc_nrange = 0; for (c = OF_child(node); c != 0; c = OF_peer(c)) { n = ofw_pcib_nranges(c, cell_info); if (n > 0) sc->sc_nrange += n; } if (sc->sc_nrange == 0) { error = ENXIO; goto out; } sc->sc_range = malloc(sc->sc_nrange * sizeof(sc->sc_range[0]), M_DEVBUF, M_WAITOK); i = 0; for (c = OF_child(node); c != 0; c = OF_peer(c)) { n = ofw_pcib_fill_ranges(c, &sc->sc_range[i]); if (n > 0) i += n; } KASSERT(i == sc->sc_nrange, ("range count mismatch")); } else { sc->sc_nrange = ofw_pcib_nranges(node, cell_info); if (sc->sc_nrange <= 0) { device_printf(dev, "could not getranges\n"); error = ENXIO; goto out; } sc->sc_range = malloc(sc->sc_nrange * sizeof(sc->sc_range[0]), M_DEVBUF, M_WAITOK); ofw_pcib_fill_ranges(node, sc->sc_range); } sc->sc_io_rman.rm_type = RMAN_ARRAY; sc->sc_io_rman.rm_descr = "PCI I/O Ports"; error = rman_init(&sc->sc_io_rman); if (error != 0) { device_printf(dev, "rman_init() failed. error = %d\n", error); goto out; } sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "PCI Non Prefetchable Memory"; error = rman_init(&sc->sc_mem_rman); if (error != 0) { device_printf(dev, "rman_init() failed. error = %d\n", error); goto out_mem_rman; } sc->sc_pmem_rman.rm_type = RMAN_ARRAY; sc->sc_pmem_rman.rm_descr = "PCI Prefetchable Memory"; error = rman_init(&sc->sc_pmem_rman); if (error != 0) { device_printf(dev, "rman_init() failed. error = %d\n", error); goto out_pmem_rman; } for (i = 0; i < sc->sc_nrange; i++) { error = 0; rp = sc->sc_range + i; if (sc->sc_range_mask & ((uint64_t)1 << i)) continue; switch (rp->pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) { case OFW_PCI_PHYS_HI_SPACE_CONFIG: break; case OFW_PCI_PHYS_HI_SPACE_IO: error = rman_manage_region(&sc->sc_io_rman, rp->pci, rp->pci + rp->size - 1); break; case OFW_PCI_PHYS_HI_SPACE_MEM32: case OFW_PCI_PHYS_HI_SPACE_MEM64: if (rp->pci_hi & OFW_PCI_PHYS_HI_PREFETCHABLE) { sc->sc_have_pmem = 1; error = rman_manage_region(&sc->sc_pmem_rman, rp->pci, rp->pci + rp->size - 1); } else { error = rman_manage_region(&sc->sc_mem_rman, rp->pci, rp->pci + rp->size - 1); } break; } if (error != 0) { device_printf(dev, "rman_manage_region(%x, %#jx, %#jx) failed. " "error = %d\n", rp->pci_hi & OFW_PCI_PHYS_HI_SPACEMASK, rp->pci, rp->pci + rp->size - 1, error); goto out_full; } } ofw_bus_setup_iinfo(node, &sc->sc_pci_iinfo, sizeof(cell_t)); return (0); out_full: rman_fini(&sc->sc_pmem_rman); out_pmem_rman: rman_fini(&sc->sc_mem_rman); out_mem_rman: rman_fini(&sc->sc_io_rman); out: free(sc->sc_cell_info, M_DEVBUF); free(sc->sc_range, M_DEVBUF); return (error); } void ofw_pcib_fini(device_t dev) { struct ofw_pci_softc *sc; sc = device_get_softc(dev); free(sc->sc_cell_info, M_DEVBUF); free(sc->sc_range, M_DEVBUF); rman_fini(&sc->sc_io_rman); rman_fini(&sc->sc_mem_rman); rman_fini(&sc->sc_pmem_rman); } int ofw_pcib_attach(device_t dev) { struct ofw_pci_softc *sc; int error; sc = device_get_softc(dev); if (!sc->sc_initialized) { error = ofw_pcib_init(dev); if (error != 0) return (error); } device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int ofw_pcib_maxslots(device_t dev) { return (PCI_SLOTMAX); } int ofw_pcib_route_interrupt(device_t bus, device_t dev, int pin) { struct ofw_pci_softc *sc; struct ofw_pci_register reg; uint32_t pintr, mintr[PCI_MAP_INTR]; int intrcells; phandle_t iparent; sc = device_get_softc(bus); pintr = pin; /* Fabricate imap information in case this isn't an OFW device */ bzero(®, sizeof(reg)); reg.phys_hi = (pci_get_bus(dev) << OFW_PCI_PHYS_HI_BUSSHIFT) | (pci_get_slot(dev) << OFW_PCI_PHYS_HI_DEVICESHIFT) | (pci_get_function(dev) << OFW_PCI_PHYS_HI_FUNCTIONSHIFT); intrcells = ofw_bus_lookup_imap(ofw_bus_get_node(dev), &sc->sc_pci_iinfo, ®, sizeof(reg), &pintr, sizeof(pintr), mintr, sizeof(mintr), &iparent); if (intrcells != 0) { pintr = ofw_bus_map_intr(dev, iparent, intrcells, mintr); return (pintr); } /* * Maybe it's a real interrupt, not an intpin */ if (pin > PCI_INTR_PINS) return (pin); device_printf(bus, "could not route pin %d for device %d.%d\n", pin, pci_get_slot(dev), pci_get_function(dev)); return (PCI_INVALID_IRQ); } int ofw_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct ofw_pci_softc *sc; sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = sc->sc_pci_domain; return (0); case PCIB_IVAR_BUS: *result = sc->sc_bus; return (0); default: break; } return (ENOENT); } int ofw_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct ofw_pci_softc *sc; sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_bus = value; return (0); default: break; } return (ENOENT); } int ofw_pcib_nranges(phandle_t node, struct ofw_pci_cell_info *info) { ssize_t nbase_ranges; if (info == NULL) return (-1); info->host_address_cells = 1; info->size_cells = 2; info->pci_address_cell = 3; OF_getencprop(OF_parent(node), "#address-cells", &(info->host_address_cells), sizeof(info->host_address_cells)); OF_getencprop(node, "#address-cells", &(info->pci_address_cell), sizeof(info->pci_address_cell)); OF_getencprop(node, "#size-cells", &(info->size_cells), sizeof(info->size_cells)); nbase_ranges = OF_getproplen(node, "ranges"); if (nbase_ranges <= 0) return (-1); return (nbase_ranges / sizeof(cell_t) / (info->pci_address_cell + info->host_address_cells + info->size_cells)); } static struct resource * ofw_pcib_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct ofw_pci_softc *sc; sc = device_get_softc(bus); #endif switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_alloc_bus(sc->sc_pci_domain, child, rid, start, end, count, flags)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_alloc_resource(bus, child, type, rid, start, end, count, flags)); default: return (bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags)); } } static int ofw_pcib_release_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct ofw_pci_softc *sc; sc = device_get_softc(bus); #endif switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_release_bus(sc->sc_pci_domain, child, rid, res)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_release_resource(bus, child, type, rid, res)); default: return (bus_generic_release_resource(bus, child, type, rid, res)); } } static int ofw_pcib_translate_resource(device_t bus, int type, rman_res_t start, rman_res_t *newstart) { struct ofw_pci_softc *sc; struct ofw_pci_range *rp; int space; sc = device_get_softc(bus); /* * Map this through the ranges list */ for (rp = sc->sc_range; rp < sc->sc_range + sc->sc_nrange && rp->pci_hi != 0; rp++) { if (start < rp->pci || start >= rp->pci + rp->size) continue; switch (rp->pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) { case OFW_PCI_PHYS_HI_SPACE_IO: space = SYS_RES_IOPORT; break; case OFW_PCI_PHYS_HI_SPACE_MEM32: case OFW_PCI_PHYS_HI_SPACE_MEM64: space = SYS_RES_MEMORY; break; default: space = -1; } if (type == space) { start += (rp->host - rp->pci); *newstart = start; return (0); } } return (ENOENT); } static int ofw_pcib_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct ofw_pci_softc *sc; sc = device_get_softc(bus); #endif switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_activate_bus(sc->sc_pci_domain, child, rid, res)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_activate_resource(bus, child, type, rid, res)); default: return (bus_generic_activate_resource(bus, child, type, rid, res)); } } static int ofw_pcib_map_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; struct ofw_pci_softc *sc; struct ofw_pci_range *rp; rman_res_t length, start; int error, space; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); /* * Map this through the ranges list */ sc = device_get_softc(dev); for (rp = sc->sc_range; rp < sc->sc_range + sc->sc_nrange && rp->pci_hi != 0; rp++) { if (start < rp->pci || start >= rp->pci + rp->size) continue; switch (rp->pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) { case OFW_PCI_PHYS_HI_SPACE_IO: space = SYS_RES_IOPORT; break; case OFW_PCI_PHYS_HI_SPACE_MEM32: case OFW_PCI_PHYS_HI_SPACE_MEM64: space = SYS_RES_MEMORY; break; default: space = -1; } if (type == space) { start += (rp->host - rp->pci); break; } } if (bootverbose) printf("ofw_pci mapdev: start %jx, len %jd\n", start, length); map->r_bustag = BUS_GET_BUS_TAG(child, child); if (map->r_bustag == NULL) return (ENOMEM); error = bus_space_map(map->r_bustag, start, length, 0, &map->r_bushandle); if (error != 0) return (error); /* XXX for powerpc only? */ map->r_vaddr = (void *)map->r_bushandle; map->r_size = length; return (0); } static int ofw_pcib_unmap_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map *map) { switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: bus_space_unmap(map->r_bustag, map->r_bushandle, map->r_size); return (0); default: return (EINVAL); } } #ifdef __powerpc__ static bus_space_tag_t ofw_pcib_bus_get_bus_tag(device_t bus, device_t child) { return (&bs_le_tag); } #endif static int ofw_pcib_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct ofw_pci_softc *sc; sc = device_get_softc(bus); #endif switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_deactivate_bus(sc->sc_pci_domain, child, rid, res)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_deactivate_resource(bus, child, type, rid, res)); default: return (bus_generic_deactivate_resource(bus, child, type, rid, res)); } } static int -ofw_pcib_adjust_resource(device_t bus, device_t child, int type, +ofw_pcib_adjust_resource(device_t bus, device_t child, struct resource *res, rman_res_t start, rman_res_t end) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct ofw_pci_softc *sc; sc = device_get_softc(bus); #endif - switch (type) { + switch (rman_get_type(res)) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_adjust_bus(sc->sc_pci_domain, child, res, start, end)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: - return (bus_generic_rman_adjust_resource(bus, child, type, res, + return (bus_generic_rman_adjust_resource(bus, child, res, start, end)); default: - return (bus_generic_adjust_resource(bus, child, type, res, - start, end)); + return (bus_generic_adjust_resource(bus, child, res, start, + end)); } } static phandle_t ofw_pcib_get_node(device_t bus, device_t dev) { struct ofw_pci_softc *sc; sc = device_get_softc(bus); /* We only have one child, the PCI bus, which needs our own node. */ return (sc->sc_node); } static int ofw_pcib_fill_ranges(phandle_t node, struct ofw_pci_range *ranges) { int host_address_cells = 1, pci_address_cells = 3, size_cells = 2; cell_t *base_ranges; ssize_t nbase_ranges; int nranges; int i, j, k; OF_getencprop(OF_parent(node), "#address-cells", &host_address_cells, sizeof(host_address_cells)); OF_getencprop(node, "#address-cells", &pci_address_cells, sizeof(pci_address_cells)); OF_getencprop(node, "#size-cells", &size_cells, sizeof(size_cells)); nbase_ranges = OF_getproplen(node, "ranges"); if (nbase_ranges <= 0) return (-1); nranges = nbase_ranges / sizeof(cell_t) / (pci_address_cells + host_address_cells + size_cells); base_ranges = malloc(nbase_ranges, M_DEVBUF, M_WAITOK); OF_getencprop(node, "ranges", base_ranges, nbase_ranges); for (i = 0, j = 0; i < nranges; i++) { ranges[i].pci_hi = base_ranges[j++]; ranges[i].pci = 0; for (k = 0; k < pci_address_cells - 1; k++) { ranges[i].pci <<= 32; ranges[i].pci |= base_ranges[j++]; } ranges[i].host = 0; for (k = 0; k < host_address_cells; k++) { ranges[i].host <<= 32; ranges[i].host |= base_ranges[j++]; } ranges[i].size = 0; for (k = 0; k < size_cells; k++) { ranges[i].size <<= 32; ranges[i].size |= base_ranges[j++]; } } free(base_ranges, M_DEVBUF); return (nranges); } static struct rman * ofw_pcib_get_rman(device_t bus, int type, u_int flags) { struct ofw_pci_softc *sc; sc = device_get_softc(bus); switch (type) { case SYS_RES_IOPORT: return (&sc->sc_io_rman); case SYS_RES_MEMORY: if (sc->sc_have_pmem && (flags & RF_PREFETCHABLE)) return (&sc->sc_pmem_rman); else return (&sc->sc_mem_rman); default: break; } return (NULL); } diff --git a/sys/dev/pccbb/pccbb_pci.c b/sys/dev/pccbb/pccbb_pci.c index 1a07ff8a4e98..08014fb210ed 100644 --- a/sys/dev/pccbb/pccbb_pci.c +++ b/sys/dev/pccbb/pccbb_pci.c @@ -1,977 +1,977 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2000-2001 Jonathan Chen All rights reserved. * Copyright (c) 2002-2004 M. Warner Losh * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /*- * Copyright (c) 1998, 1999 and 2000 * HAYAKAWA Koichi. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by HAYAKAWA Koichi. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Driver for PCI to CardBus Bridge chips * * References: * TI Datasheets: * http://www-s.ti.com/cgi-bin/sc/generic2.cgi?family=PCI+CARDBUS+CONTROLLERS * * Written by Jonathan Chen * The author would like to acknowledge: * * HAYAKAWA Koichi: Author of the NetBSD code for the same thing * * Warner Losh: Newbus/newcard guru and author of the pccard side of things * * YAMAMOTO Shigeru: Author of another FreeBSD cardbus driver * * David Cross: Author of the initial ugly hack for a specific cardbus card */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "power_if.h" #include "card_if.h" #include "pcib_if.h" #define DPRINTF(x) do { if (cbb_debug) printf x; } while (0) #define DEVPRINTF(x) do { if (cbb_debug) device_printf x; } while (0) #define PCI_MASK_CONFIG(DEV,REG,MASK,SIZE) \ pci_write_config(DEV, REG, pci_read_config(DEV, REG, SIZE) MASK, SIZE) #define PCI_MASK2_CONFIG(DEV,REG,MASK1,MASK2,SIZE) \ pci_write_config(DEV, REG, ( \ pci_read_config(DEV, REG, SIZE) MASK1) MASK2, SIZE) static void cbb_chipinit(struct cbb_softc *sc); static int cbb_pci_filt(void *arg); static struct yenta_chipinfo { uint32_t yc_id; const char *yc_name; int yc_chiptype; } yc_chipsets[] = { /* Texas Instruments chips */ {PCIC_ID_TI1031, "TI1031 PCI-PC Card Bridge", CB_TI113X}, {PCIC_ID_TI1130, "TI1130 PCI-CardBus Bridge", CB_TI113X}, {PCIC_ID_TI1131, "TI1131 PCI-CardBus Bridge", CB_TI113X}, {PCIC_ID_TI1210, "TI1210 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1211, "TI1211 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1220, "TI1220 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1221, "TI1221 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1225, "TI1225 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1250, "TI1250 PCI-CardBus Bridge", CB_TI125X}, {PCIC_ID_TI1251, "TI1251 PCI-CardBus Bridge", CB_TI125X}, {PCIC_ID_TI1251B,"TI1251B PCI-CardBus Bridge",CB_TI125X}, {PCIC_ID_TI1260, "TI1260 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1260B,"TI1260B PCI-CardBus Bridge",CB_TI12XX}, {PCIC_ID_TI1410, "TI1410 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1420, "TI1420 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1421, "TI1421 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1450, "TI1450 PCI-CardBus Bridge", CB_TI125X}, /*SIC!*/ {PCIC_ID_TI1451, "TI1451 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1510, "TI1510 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1520, "TI1520 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI4410, "TI4410 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI4450, "TI4450 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI4451, "TI4451 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI4510, "TI4510 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI6411, "TI6411 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI6420, "TI6420 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI6420SC, "TI6420 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI7410, "TI7410 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI7510, "TI7510 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI7610, "TI7610 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI7610M, "TI7610 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI7610SD, "TI7610 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI7610MS, "TI7610 PCI-CardBus Bridge", CB_TI12XX}, /* ENE */ {PCIC_ID_ENE_CB710, "ENE CB710 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_ENE_CB720, "ENE CB720 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_ENE_CB1211, "ENE CB1211 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_ENE_CB1225, "ENE CB1225 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_ENE_CB1410, "ENE CB1410 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_ENE_CB1420, "ENE CB1420 PCI-CardBus Bridge", CB_TI12XX}, /* Ricoh chips */ {PCIC_ID_RICOH_RL5C465, "RF5C465 PCI-CardBus Bridge", CB_RF5C46X}, {PCIC_ID_RICOH_RL5C466, "RF5C466 PCI-CardBus Bridge", CB_RF5C46X}, {PCIC_ID_RICOH_RL5C475, "RF5C475 PCI-CardBus Bridge", CB_RF5C47X}, {PCIC_ID_RICOH_RL5C476, "RF5C476 PCI-CardBus Bridge", CB_RF5C47X}, {PCIC_ID_RICOH_RL5C477, "RF5C477 PCI-CardBus Bridge", CB_RF5C47X}, {PCIC_ID_RICOH_RL5C478, "RF5C478 PCI-CardBus Bridge", CB_RF5C47X}, /* Toshiba products */ {PCIC_ID_TOPIC95, "ToPIC95 PCI-CardBus Bridge", CB_TOPIC95}, {PCIC_ID_TOPIC95B, "ToPIC95B PCI-CardBus Bridge", CB_TOPIC95}, {PCIC_ID_TOPIC97, "ToPIC97 PCI-CardBus Bridge", CB_TOPIC97}, {PCIC_ID_TOPIC100, "ToPIC100 PCI-CardBus Bridge", CB_TOPIC97}, /* Cirrus Logic */ {PCIC_ID_CLPD6832, "CLPD6832 PCI-CardBus Bridge", CB_CIRRUS}, {PCIC_ID_CLPD6833, "CLPD6833 PCI-CardBus Bridge", CB_CIRRUS}, {PCIC_ID_CLPD6834, "CLPD6834 PCI-CardBus Bridge", CB_CIRRUS}, /* 02Micro */ {PCIC_ID_OZ6832, "O2Micro OZ6832/6833 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ6860, "O2Micro OZ6836/6860 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ6872, "O2Micro OZ6812/6872 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ6912, "O2Micro OZ6912/6972 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ6922, "O2Micro OZ6922 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ6933, "O2Micro OZ6933 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ711E1, "O2Micro OZ711E1 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ711EC1, "O2Micro OZ711EC1/M1 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ711E2, "O2Micro OZ711E2 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ711M1, "O2Micro OZ711M1 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ711M2, "O2Micro OZ711M2 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ711M3, "O2Micro OZ711M3 PCI-CardBus Bridge", CB_O2MICRO}, /* SMC */ {PCIC_ID_SMC_34C90, "SMC 34C90 PCI-CardBus Bridge", CB_CIRRUS}, /* sentinel */ {0 /* null id */, "unknown", CB_UNKNOWN}, }; /************************************************************************/ /* Probe/Attach */ /************************************************************************/ static int cbb_chipset(uint32_t pci_id, const char **namep) { struct yenta_chipinfo *ycp; for (ycp = yc_chipsets; ycp->yc_id != 0 && pci_id != ycp->yc_id; ++ycp) continue; if (namep != NULL) *namep = ycp->yc_name; return (ycp->yc_chiptype); } static int cbb_pci_probe(device_t brdev) { const char *name; uint32_t progif; uint32_t baseclass; uint32_t subclass; /* * Do we know that we support the chipset? If so, then we * accept the device. */ if (cbb_chipset(pci_get_devid(brdev), &name) != CB_UNKNOWN) { device_set_desc(brdev, name); return (BUS_PROBE_DEFAULT); } /* * We do support generic CardBus bridges. All that we've seen * to date have progif 0 (the Yenta spec, and successors mandate * this). */ baseclass = pci_get_class(brdev); subclass = pci_get_subclass(brdev); progif = pci_get_progif(brdev); if (baseclass == PCIC_BRIDGE && subclass == PCIS_BRIDGE_CARDBUS && progif == 0) { device_set_desc(brdev, "PCI-CardBus Bridge"); return (BUS_PROBE_GENERIC); } return (ENXIO); } /* * Print out the config space */ static void cbb_print_config(device_t dev) { int i; device_printf(dev, "PCI Configuration space:"); for (i = 0; i < 256; i += 4) { if (i % 16 == 0) printf("\n 0x%02x: ", i); printf("0x%08x ", pci_read_config(dev, i, 4)); } printf("\n"); } static int cbb_pci_attach(device_t brdev) { #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) static int curr_bus_number = 2; /* XXX EVILE BAD (see below) */ uint32_t pribus; #endif struct cbb_softc *sc = (struct cbb_softc *)device_get_softc(brdev); struct sysctl_ctx_list *sctx; struct sysctl_oid *soid; int rid; device_t parent; parent = device_get_parent(brdev); mtx_init(&sc->mtx, device_get_nameunit(brdev), "cbb", MTX_DEF); sc->chipset = cbb_chipset(pci_get_devid(brdev), NULL); sc->dev = brdev; sc->cbdev = NULL; sc->domain = pci_get_domain(brdev); sc->pribus = pcib_get_bus(parent); #if defined(NEW_PCIB) && defined(PCI_RES_BUS) pci_write_config(brdev, PCIR_PRIBUS_2, sc->pribus, 1); pcib_setup_secbus(brdev, &sc->bus, 1); #else sc->bus.sec = pci_read_config(brdev, PCIR_SECBUS_2, 1); sc->bus.sub = pci_read_config(brdev, PCIR_SUBBUS_2, 1); #endif SLIST_INIT(&sc->rl); rid = CBBR_SOCKBASE; sc->base_res = bus_alloc_resource_any(brdev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->base_res) { device_printf(brdev, "Could not map register memory\n"); mtx_destroy(&sc->mtx); return (ENOMEM); } else { DEVPRINTF((brdev, "Found memory at %jx\n", rman_get_start(sc->base_res))); } /* attach children */ sc->cbdev = device_add_child(brdev, "cardbus", -1); if (sc->cbdev == NULL) DEVPRINTF((brdev, "WARNING: cannot add cardbus bus.\n")); else if (device_probe_and_attach(sc->cbdev) != 0) DEVPRINTF((brdev, "WARNING: cannot attach cardbus bus!\n")); sc->bst = rman_get_bustag(sc->base_res); sc->bsh = rman_get_bushandle(sc->base_res); exca_init(&sc->exca, brdev, sc->bst, sc->bsh, CBB_EXCA_OFFSET); sc->exca.flags |= EXCA_HAS_MEMREG_WIN; sc->exca.chipset = EXCA_CARDBUS; sc->chipinit = cbb_chipinit; sc->chipinit(sc); /*Sysctls*/ sctx = device_get_sysctl_ctx(brdev); soid = device_get_sysctl_tree(brdev); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "domain", CTLFLAG_RD, &sc->domain, 0, "Domain number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "pribus", CTLFLAG_RD, &sc->pribus, 0, "Primary bus number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "secbus", CTLFLAG_RD, &sc->bus.sec, 0, "Secondary bus number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "subbus", CTLFLAG_RD, &sc->bus.sub, 0, "Subordinate bus number"); #if 0 SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "memory", CTLFLAG_RD, &sc->subbus, 0, "Memory window open"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "premem", CTLFLAG_RD, &sc->subbus, 0, "Prefetch memory window open"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "io1", CTLFLAG_RD, &sc->subbus, 0, "io range 1 open"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "io2", CTLFLAG_RD, &sc->subbus, 0, "io range 2 open"); #endif #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) /* * This is a gross hack. We should be scanning the entire pci * tree, assigning bus numbers in a way such that we (1) can * reserve 1 extra bus just in case and (2) all sub buses * are in an appropriate range. */ DEVPRINTF((brdev, "Secondary bus is %d\n", sc->bus.sec)); pribus = pci_read_config(brdev, PCIR_PRIBUS_2, 1); if (sc->bus.sec == 0 || sc->pribus != pribus) { if (curr_bus_number <= sc->pribus) curr_bus_number = sc->pribus + 1; if (pribus != sc->pribus) { DEVPRINTF((brdev, "Setting primary bus to %d\n", sc->pribus)); pci_write_config(brdev, PCIR_PRIBUS_2, sc->pribus, 1); } sc->bus.sec = curr_bus_number++; sc->bus.sub = curr_bus_number++; DEVPRINTF((brdev, "Secondary bus set to %d subbus %d\n", sc->bus.sec, sc->bus.sub)); pci_write_config(brdev, PCIR_SECBUS_2, sc->bus.sec, 1); pci_write_config(brdev, PCIR_SUBBUS_2, sc->bus.sub, 1); } #endif /* Map and establish the interrupt. */ rid = 0; sc->irq_res = bus_alloc_resource_any(brdev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(brdev, "Unable to map IRQ...\n"); goto err; } if (bus_setup_intr(brdev, sc->irq_res, INTR_TYPE_AV | INTR_MPSAFE, cbb_pci_filt, NULL, sc, &sc->intrhand)) { device_printf(brdev, "couldn't establish interrupt\n"); goto err; } /* reset 16-bit pcmcia bus */ exca_clrb(&sc->exca, EXCA_INTR, EXCA_INTR_RESET); /* turn off power */ cbb_power(brdev, CARD_OFF); /* CSC Interrupt: Card detect interrupt on */ cbb_setb(sc, CBB_SOCKET_MASK, CBB_SOCKET_MASK_CD); /* reset interrupt */ cbb_set(sc, CBB_SOCKET_EVENT, cbb_get(sc, CBB_SOCKET_EVENT)); if (bootverbose) cbb_print_config(brdev); /* Start the thread */ if (kproc_create(cbb_event_thread, sc, &sc->event_thread, 0, 0, "%s event thread", device_get_nameunit(brdev))) { device_printf(brdev, "unable to create event thread.\n"); panic("cbb_create_event_thread"); } sc->sc_root_token = root_mount_hold(device_get_nameunit(sc->dev)); return (0); err: if (sc->irq_res) bus_release_resource(brdev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->base_res) { bus_release_resource(brdev, SYS_RES_MEMORY, CBBR_SOCKBASE, sc->base_res); } mtx_destroy(&sc->mtx); return (ENOMEM); } static int cbb_pci_detach(device_t brdev) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct cbb_softc *sc = device_get_softc(brdev); #endif int error; error = cbb_detach(brdev); #if defined(NEW_PCIB) && defined(PCI_RES_BUS) if (error == 0) pcib_free_secbus(brdev, &sc->bus); #endif return (error); } static void cbb_chipinit(struct cbb_softc *sc) { uint32_t mux, sysctrl, reg; /* Set CardBus latency timer */ if (pci_read_config(sc->dev, PCIR_SECLAT_2, 1) < 0x20) pci_write_config(sc->dev, PCIR_SECLAT_2, 0x20, 1); /* Set PCI latency timer */ if (pci_read_config(sc->dev, PCIR_LATTIMER, 1) < 0x20) pci_write_config(sc->dev, PCIR_LATTIMER, 0x20, 1); /* Enable DMA, memory access for this card and I/O access for children */ pci_enable_busmaster(sc->dev); pci_enable_io(sc->dev, SYS_RES_IOPORT); pci_enable_io(sc->dev, SYS_RES_MEMORY); /* disable Legacy IO */ switch (sc->chipset) { case CB_RF5C46X: PCI_MASK_CONFIG(sc->dev, CBBR_BRIDGECTRL, & ~(CBBM_BRIDGECTRL_RL_3E0_EN | CBBM_BRIDGECTRL_RL_3E2_EN), 2); break; default: pci_write_config(sc->dev, CBBR_LEGACY, 0x0, 4); break; } /* Use PCI interrupt for interrupt routing */ PCI_MASK2_CONFIG(sc->dev, CBBR_BRIDGECTRL, & ~(CBBM_BRIDGECTRL_MASTER_ABORT | CBBM_BRIDGECTRL_INTR_IREQ_ISA_EN), | CBBM_BRIDGECTRL_WRITE_POST_EN, 2); /* * XXX this should be a function table, ala OLDCARD. This means * that we could more easily support ISA interrupts for pccard * cards if we had to. */ switch (sc->chipset) { case CB_TI113X: /* * The TI 1031, TI 1130 and TI 1131 all require another bit * be set to enable PCI routing of interrupts, and then * a bit for each of the CSC and Function interrupts we * want routed. */ PCI_MASK_CONFIG(sc->dev, CBBR_CBCTRL, | CBBM_CBCTRL_113X_PCI_INTR | CBBM_CBCTRL_113X_PCI_CSC | CBBM_CBCTRL_113X_PCI_IRQ_EN, 1); PCI_MASK_CONFIG(sc->dev, CBBR_DEVCTRL, & ~(CBBM_DEVCTRL_INT_SERIAL | CBBM_DEVCTRL_INT_PCI), 1); break; case CB_TI12XX: /* * Some TI 12xx (and [14][45]xx) based pci cards * sometimes have issues with the MFUNC register not * being initialized due to a bad EEPROM on board. * Laptops that this matters on have this register * properly initialized. * * The TI125X parts have a different register. * * Note: Only the lower two nibbles matter. When set * to 0, the MFUNC{0,1} pins are GPIO, which isn't * going to work out too well because we specifically * program these parts to parallel interrupt signalling * elsewhere. We preserve the upper bits of this * register since changing them have subtle side effects * for different variants of the card and are * extremely difficult to exaustively test. * * Also, the TI 1510/1520 changed the default for the MFUNC * register from 0x0 to 0x1000 to enable IRQSER by default. * We want to be careful to avoid overriding that, and the * below test will do that. Should this check prove to be * too permissive, we should just check against 0 and 0x1000 * and not touch it otherwise. */ mux = pci_read_config(sc->dev, CBBR_MFUNC, 4); sysctrl = pci_read_config(sc->dev, CBBR_SYSCTRL, 4); if ((mux & (CBBM_MFUNC_PIN0 | CBBM_MFUNC_PIN1)) == 0) { mux = (mux & ~CBBM_MFUNC_PIN0) | CBBM_MFUNC_PIN0_INTA; if ((sysctrl & CBBM_SYSCTRL_INTRTIE) == 0) mux = (mux & ~CBBM_MFUNC_PIN1) | CBBM_MFUNC_PIN1_INTB; pci_write_config(sc->dev, CBBR_MFUNC, mux, 4); } /*FALLTHROUGH*/ case CB_TI125X: /* * Disable zoom video. Some machines initialize this * improperly and exerpience has shown that this helps * prevent strange behavior. We don't support zoom * video anyway, so no harm can come from this. */ pci_write_config(sc->dev, CBBR_MMCTRL, 0, 4); break; case CB_O2MICRO: /* * Issue #1: INT# generated at the same time as * selected ISA IRQ. When IREQ# or STSCHG# is active, * in addition to the ISA IRQ being generated, INT# * will also be generated at the same time. * * Some of the older controllers have an issue in * which the slot's PCI INT# will be asserted whenever * IREQ# or STSCGH# is asserted even if ExCA registers * 03h or 05h have an ISA IRQ selected. * * The fix for this issue, which will work for any * controller (old or new), is to set ExCA registers * 3Ah (slot 0) & 7Ah (slot 1) bits 7:4 = 1010b. * These bits are undocumented. By setting this * register (of each slot) to '1010xxxxb' a routing of * IREQ# to INTC# and STSCHG# to INTC# is selected. * Since INTC# isn't connected there will be no * unexpected PCI INT when IREQ# or STSCHG# is active. * However, INTA# (slot 0) or INTB# (slot 1) will * still be correctly generated if NO ISA IRQ is * selected (ExCA regs 03h or 05h are cleared). */ reg = exca_getb(&sc->exca, EXCA_O2MICRO_CTRL_C); reg = (reg & 0x0f) | EXCA_O2CC_IREQ_INTC | EXCA_O2CC_STSCHG_INTC; exca_putb(&sc->exca, EXCA_O2MICRO_CTRL_C, reg); break; case CB_TOPIC97: /* * Disable Zoom Video, ToPIC 97, 100. */ pci_write_config(sc->dev, TOPIC97_ZV_CONTROL, 0, 1); /* * ToPIC 97, 100 * At offset 0xa1: INTERRUPT CONTROL register * 0x1: Turn on INT interrupts. */ PCI_MASK_CONFIG(sc->dev, TOPIC_INTCTRL, | TOPIC97_INTCTRL_INTIRQSEL, 1); /* * ToPIC97, 100 * Need to assert support for low voltage cards */ exca_setb(&sc->exca, EXCA_TOPIC97_CTRL, EXCA_TOPIC97_CTRL_LV_MASK); goto topic_common; case CB_TOPIC95: /* * SOCKETCTRL appears to be TOPIC 95/B specific */ PCI_MASK_CONFIG(sc->dev, TOPIC95_SOCKETCTRL, | TOPIC95_SOCKETCTRL_SCR_IRQSEL, 4); topic_common:; /* * At offset 0xa0: SLOT CONTROL * 0x80 Enable CardBus Functionality * 0x40 Enable CardBus and PC Card registers * 0x20 Lock ID in exca regs * 0x10 Write protect ID in config regs * Clear the rest of the bits, which defaults the slot * in legacy mode to 0x3e0 and offset 0. (legacy * mode is determined elsewhere) */ pci_write_config(sc->dev, TOPIC_SLOTCTRL, TOPIC_SLOTCTRL_SLOTON | TOPIC_SLOTCTRL_SLOTEN | TOPIC_SLOTCTRL_ID_LOCK | TOPIC_SLOTCTRL_ID_WP, 1); /* * At offset 0xa3 Card Detect Control Register * 0x80 CARDBUS enbale * 0x01 Cleared for hardware change detect */ PCI_MASK2_CONFIG(sc->dev, TOPIC_CDC, | TOPIC_CDC_CARDBUS, & ~TOPIC_CDC_SWDETECT, 4); break; } /* * Need to tell ExCA registers to CSC interrupts route via PCI * interrupts. There are two ways to do this. One is to set * INTR_ENABLE and the other is to set CSC to 0. Since both * methods are mutually compatible, we do both. */ exca_putb(&sc->exca, EXCA_INTR, EXCA_INTR_ENABLE); exca_putb(&sc->exca, EXCA_CSC_INTR, 0); cbb_disable_func_intr(sc); /* close all memory and io windows */ pci_write_config(sc->dev, CBBR_MEMBASE0, 0xffffffff, 4); pci_write_config(sc->dev, CBBR_MEMLIMIT0, 0, 4); pci_write_config(sc->dev, CBBR_MEMBASE1, 0xffffffff, 4); pci_write_config(sc->dev, CBBR_MEMLIMIT1, 0, 4); pci_write_config(sc->dev, CBBR_IOBASE0, 0xffffffff, 4); pci_write_config(sc->dev, CBBR_IOLIMIT0, 0, 4); pci_write_config(sc->dev, CBBR_IOBASE1, 0xffffffff, 4); pci_write_config(sc->dev, CBBR_IOLIMIT1, 0, 4); } static int cbb_route_interrupt(device_t pcib, device_t dev, int pin) { struct cbb_softc *sc = (struct cbb_softc *)device_get_softc(pcib); return (rman_get_start(sc->irq_res)); } static int cbb_pci_shutdown(device_t brdev) { struct cbb_softc *sc = (struct cbb_softc *)device_get_softc(brdev); /* * We're about to pull the rug out from the card, so mark it as * gone to prevent harm. */ sc->cardok = 0; /* * Place the cards in reset, turn off the interrupts and power * down the socket. */ PCI_MASK_CONFIG(brdev, CBBR_BRIDGECTRL, |CBBM_BRIDGECTRL_RESET, 2); exca_clrb(&sc->exca, EXCA_INTR, EXCA_INTR_RESET); cbb_set(sc, CBB_SOCKET_MASK, 0); cbb_set(sc, CBB_SOCKET_EVENT, 0xffffffff); cbb_power(brdev, CARD_OFF); /* * For paranoia, turn off all address decoding. Really not needed, * it seems, but it can't hurt */ exca_putb(&sc->exca, EXCA_ADDRWIN_ENABLE, 0); pci_write_config(brdev, CBBR_MEMBASE0, 0, 4); pci_write_config(brdev, CBBR_MEMLIMIT0, 0, 4); pci_write_config(brdev, CBBR_MEMBASE1, 0, 4); pci_write_config(brdev, CBBR_MEMLIMIT1, 0, 4); pci_write_config(brdev, CBBR_IOBASE0, 0, 4); pci_write_config(brdev, CBBR_IOLIMIT0, 0, 4); pci_write_config(brdev, CBBR_IOBASE1, 0, 4); pci_write_config(brdev, CBBR_IOLIMIT1, 0, 4); return (0); } static int cbb_pci_filt(void *arg) { struct cbb_softc *sc = arg; uint32_t sockevent; uint8_t csc; int retval = FILTER_STRAY; /* * Some chips also require us to read the old ExCA registe for card * status change when we route CSC vis PCI. This isn't supposed to be * required, but it clears the interrupt state on some chipsets. * Maybe there's a setting that would obviate its need. Maybe we * should test the status bits and deal with them, but so far we've * not found any machines that don't also give us the socket status * indication above. * * This call used to be unconditional. However, further research * suggests that we hit this condition when the card READY interrupt * fired. So now we only read it for 16-bit cards, and we only claim * the interrupt if READY is set. If this still causes problems, then * the next step would be to read this if we have a 16-bit card *OR* * we have no card. We treat the READY signal as if it were the power * completion signal. Some bridges may double signal things here, bit * signalling twice should be OK since we only sleep on the powerintr * in one place and a double wakeup would be benign there. */ if (sc->flags & CBB_16BIT_CARD) { csc = exca_getb(&sc->exca, EXCA_CSC); if (csc & EXCA_CSC_READY) { atomic_add_int(&sc->powerintr, 1); wakeup((void *)&sc->powerintr); retval = FILTER_HANDLED; } } /* * Read the socket event. Sometimes, the theory goes, the PCI bus is * so loaded that it cannot satisfy the read request, so we get * garbage back from the following read. We have to filter out the * garbage so that we don't spontaneously reset the card under high * load. PCI isn't supposed to act like this. No doubt this is a bug * in the PCI bridge chipset (or cbb brige) that's being used in * certain amd64 laptops today. Work around the issue by assuming * that any bits we don't know about being set means that we got * garbage. */ sockevent = cbb_get(sc, CBB_SOCKET_EVENT); if (sockevent != 0 && (sockevent & ~CBB_SOCKET_EVENT_VALID_MASK) == 0) { /* * If anything has happened to the socket, we assume that the * card is no longer OK, and we shouldn't call its ISR. We * set cardok as soon as we've attached the card. This helps * in a noisy eject, which happens all too often when users * are ejecting their PC Cards. * * We use this method in preference to checking to see if the * card is still there because the check suffers from a race * condition in the bouncing case. */ #define DELTA (CBB_SOCKET_MASK_CD) if (sockevent & DELTA) { cbb_clrb(sc, CBB_SOCKET_MASK, DELTA); cbb_set(sc, CBB_SOCKET_EVENT, DELTA); sc->cardok = 0; cbb_disable_func_intr(sc); wakeup(&sc->intrhand); } #undef DELTA /* * Wakeup anybody waiting for a power interrupt. We have to * use atomic_add_int for wakups on other cores. */ if (sockevent & CBB_SOCKET_EVENT_POWER) { cbb_clrb(sc, CBB_SOCKET_MASK, CBB_SOCKET_EVENT_POWER); cbb_set(sc, CBB_SOCKET_EVENT, CBB_SOCKET_EVENT_POWER); atomic_add_int(&sc->powerintr, 1); wakeup((void *)&sc->powerintr); } /* * Status change interrupts aren't presently used in the * rest of the driver. For now, just ACK them. */ if (sockevent & CBB_SOCKET_EVENT_CSTS) cbb_set(sc, CBB_SOCKET_EVENT, CBB_SOCKET_EVENT_CSTS); retval = FILTER_HANDLED; } return retval; } #if defined(NEW_PCIB) && defined(PCI_RES_BUS) static struct resource * cbb_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct cbb_softc *sc; sc = device_get_softc(bus); if (type == PCI_RES_BUS) return (pcib_alloc_subbus(&sc->bus, child, rid, start, end, count, flags)); return (cbb_alloc_resource(bus, child, type, rid, start, end, count, flags)); } static int -cbb_pci_adjust_resource(device_t bus, device_t child, int type, +cbb_pci_adjust_resource(device_t bus, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct cbb_softc *sc; sc = device_get_softc(bus); - if (type == PCI_RES_BUS) { + if (rman_get_type(r) == PCI_RES_BUS) { if (!rman_is_region_manager(r, &sc->bus.rman)) return (EINVAL); return (rman_adjust_resource(r, start, end)); } - return (bus_generic_adjust_resource(bus, child, type, r, start, end)); + return (bus_generic_adjust_resource(bus, child, r, start, end)); } static int cbb_pci_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { struct cbb_softc *sc; int error; sc = device_get_softc(bus); if (type == PCI_RES_BUS) { if (!rman_is_region_manager(r, &sc->bus.rman)) return (EINVAL); if (rman_get_flags(r) & RF_ACTIVE) { error = bus_deactivate_resource(child, type, rid, r); if (error) return (error); } return (rman_release_resource(r)); } return (cbb_release_resource(bus, child, type, rid, r)); } #endif /************************************************************************/ /* PCI compat methods */ /************************************************************************/ static int cbb_maxslots(device_t brdev) { return (0); } static uint32_t cbb_read_config(device_t brdev, u_int b, u_int s, u_int f, u_int reg, int width) { /* * Pass through to the next ppb up the chain (i.e. our grandparent). */ return (PCIB_READ_CONFIG(device_get_parent(device_get_parent(brdev)), b, s, f, reg, width)); } static void cbb_write_config(device_t brdev, u_int b, u_int s, u_int f, u_int reg, uint32_t val, int width) { /* * Pass through to the next ppb up the chain (i.e. our grandparent). */ PCIB_WRITE_CONFIG(device_get_parent(device_get_parent(brdev)), b, s, f, reg, val, width); } static int cbb_pci_suspend(device_t brdev) { int error = 0; struct cbb_softc *sc = device_get_softc(brdev); error = bus_generic_suspend(brdev); if (error != 0) return (error); cbb_set(sc, CBB_SOCKET_MASK, 0); /* Quiet hardware */ sc->cardok = 0; /* Card is bogus now */ return (0); } static int cbb_pci_resume(device_t brdev) { int error = 0; struct cbb_softc *sc = (struct cbb_softc *)device_get_softc(brdev); uint32_t tmp; /* * In the APM and early ACPI era, BIOSes saved the PCI config * registers. As chips became more complicated, that functionality moved * into the ACPI code / tables. We must therefore, restore the settings * we made here to make sure the device come back. Transitions to Dx * from D0 and back to D0 cause the bridge to lose its config space, so * all the bus mappings and such are preserved. * * The PCI layer handles standard PCI registers like the * command register and BARs, but cbb-specific registers are * handled here. */ sc->chipinit(sc); /* reset interrupt -- Do we really need to do this? */ tmp = cbb_get(sc, CBB_SOCKET_EVENT); cbb_set(sc, CBB_SOCKET_EVENT, tmp); /* CSC Interrupt: Card detect interrupt on */ cbb_setb(sc, CBB_SOCKET_MASK, CBB_SOCKET_MASK_CD); /* Signal the thread to wakeup. */ wakeup(&sc->intrhand); error = bus_generic_resume(brdev); return (error); } static device_method_t cbb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cbb_pci_probe), DEVMETHOD(device_attach, cbb_pci_attach), DEVMETHOD(device_detach, cbb_pci_detach), DEVMETHOD(device_shutdown, cbb_pci_shutdown), DEVMETHOD(device_suspend, cbb_pci_suspend), DEVMETHOD(device_resume, cbb_pci_resume), /* bus methods */ DEVMETHOD(bus_read_ivar, cbb_read_ivar), DEVMETHOD(bus_write_ivar, cbb_write_ivar), #if defined(NEW_PCIB) && defined(PCI_RES_BUS) DEVMETHOD(bus_alloc_resource, cbb_pci_alloc_resource), DEVMETHOD(bus_adjust_resource, cbb_pci_adjust_resource), DEVMETHOD(bus_release_resource, cbb_pci_release_resource), #else DEVMETHOD(bus_alloc_resource, cbb_alloc_resource), DEVMETHOD(bus_release_resource, cbb_release_resource), #endif DEVMETHOD(bus_activate_resource, cbb_activate_resource), DEVMETHOD(bus_deactivate_resource, cbb_deactivate_resource), DEVMETHOD(bus_driver_added, cbb_driver_added), DEVMETHOD(bus_child_detached, cbb_child_detached), DEVMETHOD(bus_setup_intr, cbb_setup_intr), DEVMETHOD(bus_teardown_intr, cbb_teardown_intr), DEVMETHOD(bus_child_present, cbb_child_present), /* 16-bit card interface */ DEVMETHOD(card_set_res_flags, cbb_pcic_set_res_flags), DEVMETHOD(card_set_memory_offset, cbb_pcic_set_memory_offset), /* power interface */ DEVMETHOD(power_enable_socket, cbb_power_enable_socket), DEVMETHOD(power_disable_socket, cbb_power_disable_socket), /* pcib compatibility interface */ DEVMETHOD(pcib_maxslots, cbb_maxslots), DEVMETHOD(pcib_read_config, cbb_read_config), DEVMETHOD(pcib_write_config, cbb_write_config), DEVMETHOD(pcib_route_interrupt, cbb_route_interrupt), DEVMETHOD_END }; static driver_t cbb_driver = { "cbb", cbb_methods, sizeof(struct cbb_softc) }; DRIVER_MODULE(cbb, pci, cbb_driver, 0, 0); MODULE_PNP_INFO("W32:vendor/device;D:#", pci, cbb, yc_chipsets, nitems(yc_chipsets) - 1); MODULE_DEPEND(cbb, exca, 1, 1, 1); diff --git a/sys/dev/pci/pci_host_generic.c b/sys/dev/pci/pci_host_generic.c index da49edcf91f5..f4fccc7b8277 100644 --- a/sys/dev/pci/pci_host_generic.c +++ b/sys/dev/pci/pci_host_generic.c @@ -1,747 +1,747 @@ /*- * Copyright (c) 2015, 2020 Ruslan Bukin * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* Generic ECAM PCIe driver */ #include #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #if defined(VM_MEMATTR_DEVICE_NP) #define PCI_UNMAPPED #define PCI_RF_FLAGS RF_UNMAPPED #else #define PCI_RF_FLAGS 0 #endif /* Forward prototypes */ static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes); static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t val, int bytes); static int generic_pcie_maxslots(device_t dev); static int generic_pcie_read_ivar(device_t dev, device_t child, int index, uintptr_t *result); static int generic_pcie_write_ivar(device_t dev, device_t child, int index, uintptr_t value); int pci_host_generic_core_attach(device_t dev) { #ifdef PCI_UNMAPPED struct resource_map_request req; struct resource_map map; #endif struct generic_pcie_core_softc *sc; uint64_t phys_base; uint64_t pci_base; uint64_t size; const char *range_descr; char buf[64]; int domain, error; int flags, rid, tuple, type; sc = device_get_softc(dev); sc->dev = dev; /* Create the parent DMA tag to pass down the coherent flag */ error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE, /* maxsegsize */ sc->coherent ? BUS_DMA_COHERENT : 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->dmat); if (error != 0) return (error); /* * Attempt to set the domain. If it's missing, or we are unable to * set it then memory allocations may be placed in the wrong domain. */ if (bus_get_domain(dev, &domain) == 0) (void)bus_dma_tag_set_domain(sc->dmat, domain); if ((sc->quirks & PCIE_CUSTOM_CONFIG_SPACE_QUIRK) == 0) { rid = 0; sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, PCI_RF_FLAGS | RF_ACTIVE); if (sc->res == NULL) { device_printf(dev, "could not allocate memory.\n"); error = ENXIO; goto err_resource; } #ifdef PCI_UNMAPPED resource_init_map_request(&req); req.memattr = VM_MEMATTR_DEVICE_NP; error = bus_map_resource(dev, SYS_RES_MEMORY, sc->res, &req, &map); if (error != 0) { device_printf(dev, "could not map memory.\n"); return (error); } rman_set_mapping(sc->res, &map); #endif } sc->has_pmem = false; sc->pmem_rman.rm_type = RMAN_ARRAY; snprintf(buf, sizeof(buf), "%s prefetch window", device_get_nameunit(dev)); sc->pmem_rman.rm_descr = strdup(buf, M_DEVBUF); sc->mem_rman.rm_type = RMAN_ARRAY; snprintf(buf, sizeof(buf), "%s memory window", device_get_nameunit(dev)); sc->mem_rman.rm_descr = strdup(buf, M_DEVBUF); sc->io_rman.rm_type = RMAN_ARRAY; snprintf(buf, sizeof(buf), "%s I/O port window", device_get_nameunit(dev)); sc->io_rman.rm_descr = strdup(buf, M_DEVBUF); /* Initialize rman and allocate memory regions */ error = rman_init(&sc->pmem_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); goto err_pmem_rman; } error = rman_init(&sc->mem_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); goto err_mem_rman; } error = rman_init(&sc->io_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); goto err_io_rman; } for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { phys_base = sc->ranges[tuple].phys_base; pci_base = sc->ranges[tuple].pci_base; size = sc->ranges[tuple].size; rid = tuple + 1; if (size == 0) continue; /* empty range element */ switch (FLAG_TYPE(sc->ranges[tuple].flags)) { case FLAG_TYPE_PMEM: sc->has_pmem = true; range_descr = "prefetch"; flags = RF_PREFETCHABLE; type = SYS_RES_MEMORY; error = rman_manage_region(&sc->pmem_rman, pci_base, pci_base + size - 1); break; case FLAG_TYPE_MEM: range_descr = "memory"; flags = 0; type = SYS_RES_MEMORY; error = rman_manage_region(&sc->mem_rman, pci_base, pci_base + size - 1); break; case FLAG_TYPE_IO: range_descr = "I/O port"; flags = 0; type = SYS_RES_IOPORT; error = rman_manage_region(&sc->io_rman, pci_base, pci_base + size - 1); break; default: continue; } if (error) { device_printf(dev, "rman_manage_region() failed." "error = %d\n", error); goto err_rman_manage; } error = bus_set_resource(dev, type, rid, phys_base, size); if (error != 0) { device_printf(dev, "failed to set resource for range %d: %d\n", tuple, error); goto err_rman_manage; } sc->ranges[tuple].res = bus_alloc_resource_any(dev, type, &rid, RF_ACTIVE | RF_UNMAPPED | flags); if (sc->ranges[tuple].res == NULL) { device_printf(dev, "failed to allocate resource for range %d\n", tuple); error = ENXIO; goto err_rman_manage; } if (bootverbose) device_printf(dev, "PCI addr: 0x%jx, CPU addr: 0x%jx, Size: 0x%jx, Type: %s\n", pci_base, phys_base, size, range_descr); } return (0); err_rman_manage: for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { if (sc->ranges[tuple].size == 0) continue; /* empty range element */ switch (FLAG_TYPE(sc->ranges[tuple].flags)) { case FLAG_TYPE_PMEM: case FLAG_TYPE_MEM: type = SYS_RES_MEMORY; break; case FLAG_TYPE_IO: type = SYS_RES_IOPORT; break; default: continue; } if (sc->ranges[tuple].res != NULL) bus_release_resource(dev, type, tuple + 1, sc->ranges[tuple].res); bus_delete_resource(dev, type, tuple + 1); } rman_fini(&sc->io_rman); err_io_rman: rman_fini(&sc->mem_rman); err_mem_rman: rman_fini(&sc->pmem_rman); err_pmem_rman: free(__DECONST(char *, sc->io_rman.rm_descr), M_DEVBUF); free(__DECONST(char *, sc->mem_rman.rm_descr), M_DEVBUF); free(__DECONST(char *, sc->pmem_rman.rm_descr), M_DEVBUF); if (sc->res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res); err_resource: bus_dma_tag_destroy(sc->dmat); return (error); } int pci_host_generic_core_detach(device_t dev) { struct generic_pcie_core_softc *sc; int error, tuple, type; sc = device_get_softc(dev); error = bus_generic_detach(dev); if (error != 0) return (error); for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { if (sc->ranges[tuple].size == 0) continue; /* empty range element */ switch (FLAG_TYPE(sc->ranges[tuple].flags)) { case FLAG_TYPE_PMEM: case FLAG_TYPE_MEM: type = SYS_RES_MEMORY; break; case FLAG_TYPE_IO: type = SYS_RES_IOPORT; break; default: continue; } if (sc->ranges[tuple].res != NULL) bus_release_resource(dev, type, tuple + 1, sc->ranges[tuple].res); bus_delete_resource(dev, type, tuple + 1); } rman_fini(&sc->io_rman); rman_fini(&sc->mem_rman); rman_fini(&sc->pmem_rman); free(__DECONST(char *, sc->io_rman.rm_descr), M_DEVBUF); free(__DECONST(char *, sc->mem_rman.rm_descr), M_DEVBUF); free(__DECONST(char *, sc->pmem_rman.rm_descr), M_DEVBUF); if (sc->res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res); bus_dma_tag_destroy(sc->dmat); return (0); } static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct generic_pcie_core_softc *sc; uint64_t offset; uint32_t data; sc = device_get_softc(dev); if ((bus < sc->bus_start) || (bus > sc->bus_end)) return (~0U); if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX)) return (~0U); if ((sc->quirks & PCIE_ECAM_DESIGNWARE_QUIRK) && bus == 0 && slot > 0) return (~0U); offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg); switch (bytes) { case 1: data = bus_read_1(sc->res, offset); break; case 2: data = le16toh(bus_read_2(sc->res, offset)); break; case 4: data = le32toh(bus_read_4(sc->res, offset)); break; default: return (~0U); } return (data); } static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t val, int bytes) { struct generic_pcie_core_softc *sc; uint64_t offset; sc = device_get_softc(dev); if ((bus < sc->bus_start) || (bus > sc->bus_end)) return; if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX)) return; offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg); switch (bytes) { case 1: bus_write_1(sc->res, offset, val); break; case 2: bus_write_2(sc->res, offset, htole16(val)); break; case 4: bus_write_4(sc->res, offset, htole32(val)); break; default: return; } } static int generic_pcie_maxslots(device_t dev) { return (31); /* max slots per bus acc. to standard */ } static int generic_pcie_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct generic_pcie_core_softc *sc; sc = device_get_softc(dev); if (index == PCIB_IVAR_BUS) { *result = sc->bus_start; return (0); } if (index == PCIB_IVAR_DOMAIN) { *result = sc->ecam; return (0); } if (bootverbose) device_printf(dev, "ERROR: Unknown index %d.\n", index); return (ENOENT); } static int generic_pcie_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { return (ENOENT); } static struct rman * generic_pcie_get_rman(device_t dev, int type, u_int flags) { struct generic_pcie_core_softc *sc = device_get_softc(dev); switch (type) { case SYS_RES_IOPORT: return (&sc->io_rman); case SYS_RES_MEMORY: if (sc->has_pmem && (flags & RF_PREFETCHABLE) != 0) return (&sc->pmem_rman); return (&sc->mem_rman); default: break; } return (NULL); } int pci_host_generic_core_release_resource(device_t dev, device_t child, int type, int rid, struct resource *res) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct generic_pcie_core_softc *sc; sc = device_get_softc(dev); #endif switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_release_bus(sc->ecam, child, rid, res)); #endif case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_release_resource(dev, child, type, rid, res)); default: return (bus_generic_release_resource(dev, child, type, rid, res)); } } static struct pcie_range * generic_pcie_containing_range(device_t dev, int type, rman_res_t start, rman_res_t end) { struct generic_pcie_core_softc *sc = device_get_softc(dev); uint64_t pci_base; uint64_t size; int i, space; switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (NULL); } for (i = 0; i < MAX_RANGES_TUPLES; i++) { pci_base = sc->ranges[i].pci_base; size = sc->ranges[i].size; if (size == 0) continue; /* empty range element */ if (start < pci_base || end >= pci_base + size) continue; switch (FLAG_TYPE(sc->ranges[i].flags)) { case FLAG_TYPE_MEM: case FLAG_TYPE_PMEM: space = SYS_RES_MEMORY; break; case FLAG_TYPE_IO: space = SYS_RES_IOPORT; break; default: continue; } if (type == space) return (&sc->ranges[i]); } return (NULL); } static int generic_pcie_translate_resource(device_t dev, int type, rman_res_t start, rman_res_t *new_start) { struct pcie_range *range; /* Translate the address from a PCI address to a physical address */ switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: range = generic_pcie_containing_range(dev, type, start, start); if (range == NULL) return (ENOENT); *new_start = start - range->pci_base + range->phys_base; break; default: /* No translation for non-memory types */ *new_start = start; break; } return (0); } struct resource * pci_host_generic_core_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct generic_pcie_core_softc *sc; #endif struct resource *res; #if defined(NEW_PCIB) && defined(PCI_RES_BUS) sc = device_get_softc(dev); #endif switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: res = pci_domain_alloc_bus(sc->ecam, child, rid, start, end, count, flags); break; #endif case SYS_RES_IOPORT: case SYS_RES_MEMORY: res = bus_generic_rman_alloc_resource(dev, child, type, rid, start, end, count, flags); break; default: res = bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags); break; } if (res == NULL) { device_printf(dev, "%s FAIL: type=%d, rid=%d, " "start=%016jx, end=%016jx, count=%016jx, flags=%x\n", __func__, type, *rid, start, end, count, flags); } return (res); } static int generic_pcie_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct generic_pcie_core_softc *sc; sc = device_get_softc(dev); #endif switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_activate_bus(sc->ecam, child, rid, r)); #endif case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_activate_resource(dev, child, type, rid, r)); default: return (bus_generic_activate_resource(dev, child, type, rid, r)); } } static int generic_pcie_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct generic_pcie_core_softc *sc; sc = device_get_softc(dev); #endif switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_deactivate_bus(sc->ecam, child, rid, r)); #endif case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_deactivate_resource(dev, child, type, rid, r)); default: return (bus_generic_deactivate_resource(dev, child, type, rid, r)); } } static int -generic_pcie_adjust_resource(device_t dev, device_t child, int type, +generic_pcie_adjust_resource(device_t dev, device_t child, struct resource *res, rman_res_t start, rman_res_t end) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct generic_pcie_core_softc *sc; sc = device_get_softc(dev); #endif - switch (type) { + switch (rman_get_type(res)) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_adjust_bus(sc->ecam, child, res, start, end)); #endif case SYS_RES_IOPORT: case SYS_RES_MEMORY: - return (bus_generic_rman_adjust_resource(dev, child, type, res, + return (bus_generic_rman_adjust_resource(dev, child, res, start, end)); default: - return (bus_generic_adjust_resource(dev, child, type, res, - start, end)); + return (bus_generic_adjust_resource(dev, child, res, start, + end)); } } static int generic_pcie_map_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; struct pcie_range *range; rman_res_t length, start; int error; switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (EINVAL); #endif case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (bus_generic_map_resource(dev, child, type, r, argsp, map)); } /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); range = generic_pcie_containing_range(dev, type, rman_get_start(r), rman_get_end(r)); if (range == NULL || range->res == NULL) return (ENOENT); args.offset = start - range->pci_base; args.length = length; return (bus_generic_map_resource(dev, child, type, range->res, &args, map)); } static int generic_pcie_unmap_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map *map) { struct pcie_range *range; switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (EINVAL); #endif case SYS_RES_IOPORT: case SYS_RES_MEMORY: range = generic_pcie_containing_range(dev, type, rman_get_start(r), rman_get_end(r)); if (range == NULL || range->res == NULL) return (ENOENT); r = range->res; break; default: break; } return (bus_generic_unmap_resource(dev, child, type, r, map)); } static bus_dma_tag_t generic_pcie_get_dma_tag(device_t dev, device_t child) { struct generic_pcie_core_softc *sc; sc = device_get_softc(dev); return (sc->dmat); } static device_method_t generic_pcie_methods[] = { DEVMETHOD(device_attach, pci_host_generic_core_attach), DEVMETHOD(device_detach, pci_host_generic_core_detach), DEVMETHOD(bus_get_rman, generic_pcie_get_rman), DEVMETHOD(bus_read_ivar, generic_pcie_read_ivar), DEVMETHOD(bus_write_ivar, generic_pcie_write_ivar), DEVMETHOD(bus_alloc_resource, pci_host_generic_core_alloc_resource), DEVMETHOD(bus_adjust_resource, generic_pcie_adjust_resource), DEVMETHOD(bus_activate_resource, generic_pcie_activate_resource), DEVMETHOD(bus_deactivate_resource, generic_pcie_deactivate_resource), DEVMETHOD(bus_release_resource, pci_host_generic_core_release_resource), DEVMETHOD(bus_translate_resource, generic_pcie_translate_resource), DEVMETHOD(bus_map_resource, generic_pcie_map_resource), DEVMETHOD(bus_unmap_resource, generic_pcie_unmap_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_dma_tag, generic_pcie_get_dma_tag), /* pcib interface */ DEVMETHOD(pcib_maxslots, generic_pcie_maxslots), DEVMETHOD(pcib_read_config, generic_pcie_read_config), DEVMETHOD(pcib_write_config, generic_pcie_write_config), DEVMETHOD_END }; DEFINE_CLASS_0(pcib, generic_pcie_core_driver, generic_pcie_methods, sizeof(struct generic_pcie_core_softc)); diff --git a/sys/dev/pci/pci_pci.c b/sys/dev/pci/pci_pci.c index da09a917b9bc..b4c02bfeca37 100644 --- a/sys/dev/pci/pci_pci.c +++ b/sys/dev/pci/pci_pci.c @@ -1,3162 +1,3162 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1994,1995 Stefan Esser, Wolfgang StanglMeier * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * PCI:PCI bridge support. */ #include "opt_pci.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" static int pcib_probe(device_t dev); static int pcib_suspend(device_t dev); static int pcib_resume(device_t dev); static bus_child_present_t pcib_child_present; static bus_alloc_resource_t pcib_alloc_resource; #ifdef NEW_PCIB static bus_adjust_resource_t pcib_adjust_resource; static bus_release_resource_t pcib_release_resource; static bus_activate_resource_t pcib_activate_resource; static bus_deactivate_resource_t pcib_deactivate_resource; static bus_map_resource_t pcib_map_resource; static bus_unmap_resource_t pcib_unmap_resource; #endif static int pcib_reset_child(device_t dev, device_t child, int flags); static int pcib_power_for_sleep(device_t pcib, device_t dev, int *pstate); static int pcib_ari_get_id(device_t pcib, device_t dev, enum pci_id_type type, uintptr_t *id); static uint32_t pcib_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width); static void pcib_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, uint32_t val, int width); static int pcib_ari_maxslots(device_t dev); static int pcib_ari_maxfuncs(device_t dev); static int pcib_try_enable_ari(device_t pcib, device_t dev); static int pcib_ari_enabled(device_t pcib); static void pcib_ari_decode_rid(device_t pcib, uint16_t rid, int *bus, int *slot, int *func); #ifdef PCI_HP static void pcib_pcie_ab_timeout(void *arg, int pending); static void pcib_pcie_cc_timeout(void *arg, int pending); static void pcib_pcie_dll_timeout(void *arg, int pending); #endif static int pcib_request_feature_default(device_t pcib, device_t dev, enum pci_feature feature); static device_method_t pcib_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pcib_probe), DEVMETHOD(device_attach, pcib_attach), DEVMETHOD(device_detach, pcib_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, pcib_suspend), DEVMETHOD(device_resume, pcib_resume), /* Bus interface */ DEVMETHOD(bus_child_present, pcib_child_present), DEVMETHOD(bus_read_ivar, pcib_read_ivar), DEVMETHOD(bus_write_ivar, pcib_write_ivar), DEVMETHOD(bus_alloc_resource, pcib_alloc_resource), #ifdef NEW_PCIB DEVMETHOD(bus_adjust_resource, pcib_adjust_resource), DEVMETHOD(bus_release_resource, pcib_release_resource), DEVMETHOD(bus_activate_resource, pcib_activate_resource), DEVMETHOD(bus_deactivate_resource, pcib_deactivate_resource), DEVMETHOD(bus_map_resource, pcib_map_resource), DEVMETHOD(bus_unmap_resource, pcib_unmap_resource), #else DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), #endif DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_reset_child, pcib_reset_child), /* pcib interface */ DEVMETHOD(pcib_maxslots, pcib_ari_maxslots), DEVMETHOD(pcib_maxfuncs, pcib_ari_maxfuncs), DEVMETHOD(pcib_read_config, pcib_read_config), DEVMETHOD(pcib_write_config, pcib_write_config), DEVMETHOD(pcib_route_interrupt, pcib_route_interrupt), DEVMETHOD(pcib_alloc_msi, pcib_alloc_msi), DEVMETHOD(pcib_release_msi, pcib_release_msi), DEVMETHOD(pcib_alloc_msix, pcib_alloc_msix), DEVMETHOD(pcib_release_msix, pcib_release_msix), DEVMETHOD(pcib_map_msi, pcib_map_msi), DEVMETHOD(pcib_power_for_sleep, pcib_power_for_sleep), DEVMETHOD(pcib_get_id, pcib_ari_get_id), DEVMETHOD(pcib_try_enable_ari, pcib_try_enable_ari), DEVMETHOD(pcib_ari_enabled, pcib_ari_enabled), DEVMETHOD(pcib_decode_rid, pcib_ari_decode_rid), DEVMETHOD(pcib_request_feature, pcib_request_feature_default), DEVMETHOD_END }; DEFINE_CLASS_0(pcib, pcib_driver, pcib_methods, sizeof(struct pcib_softc)); EARLY_DRIVER_MODULE(pcib, pci, pcib_driver, NULL, NULL, BUS_PASS_BUS); #if defined(NEW_PCIB) || defined(PCI_HP) SYSCTL_DECL(_hw_pci); #endif #ifdef NEW_PCIB static int pci_clear_pcib; SYSCTL_INT(_hw_pci, OID_AUTO, clear_pcib, CTLFLAG_RDTUN, &pci_clear_pcib, 0, "Clear firmware-assigned resources for PCI-PCI bridge I/O windows."); /* * Get the corresponding window if this resource from a child device was * sub-allocated from one of our window resource managers. */ static struct pcib_window * pcib_get_resource_window(struct pcib_softc *sc, struct resource *r) { switch (rman_get_type(r)) { case SYS_RES_IOPORT: if (rman_is_region_manager(r, &sc->io.rman)) return (&sc->io); break; case SYS_RES_MEMORY: /* Prefetchable resources may live in either memory rman. */ if (rman_get_flags(r) & RF_PREFETCHABLE && rman_is_region_manager(r, &sc->pmem.rman)) return (&sc->pmem); if (rman_is_region_manager(r, &sc->mem.rman)) return (&sc->mem); break; } return (NULL); } /* * Is a resource from a child device sub-allocated from one of our * resource managers? */ static int pcib_is_resource_managed(struct pcib_softc *sc, struct resource *r) { #ifdef PCI_RES_BUS if (rman_get_type(r) == PCI_RES_BUS) return (rman_is_region_manager(r, &sc->bus.rman)); #endif return (pcib_get_resource_window(sc, r) != NULL); } static int pcib_is_window_open(struct pcib_window *pw) { return (pw->valid && pw->base < pw->limit); } /* * XXX: If RF_ACTIVE did not also imply allocating a bus space tag and * handle for the resource, we could pass RF_ACTIVE up to the PCI bus * when allocating the resource windows and rely on the PCI bus driver * to do this for us. */ static void pcib_activate_window(struct pcib_softc *sc, int type) { PCI_ENABLE_IO(device_get_parent(sc->dev), sc->dev, type); } static void pcib_write_windows(struct pcib_softc *sc, int mask) { device_t dev; uint32_t val; dev = sc->dev; if (sc->io.valid && mask & WIN_IO) { val = pci_read_config(dev, PCIR_IOBASEL_1, 1); if ((val & PCIM_BRIO_MASK) == PCIM_BRIO_32) { pci_write_config(dev, PCIR_IOBASEH_1, sc->io.base >> 16, 2); pci_write_config(dev, PCIR_IOLIMITH_1, sc->io.limit >> 16, 2); } pci_write_config(dev, PCIR_IOBASEL_1, sc->io.base >> 8, 1); pci_write_config(dev, PCIR_IOLIMITL_1, sc->io.limit >> 8, 1); } if (mask & WIN_MEM) { pci_write_config(dev, PCIR_MEMBASE_1, sc->mem.base >> 16, 2); pci_write_config(dev, PCIR_MEMLIMIT_1, sc->mem.limit >> 16, 2); } if (sc->pmem.valid && mask & WIN_PMEM) { val = pci_read_config(dev, PCIR_PMBASEL_1, 2); if ((val & PCIM_BRPM_MASK) == PCIM_BRPM_64) { pci_write_config(dev, PCIR_PMBASEH_1, sc->pmem.base >> 32, 4); pci_write_config(dev, PCIR_PMLIMITH_1, sc->pmem.limit >> 32, 4); } pci_write_config(dev, PCIR_PMBASEL_1, sc->pmem.base >> 16, 2); pci_write_config(dev, PCIR_PMLIMITL_1, sc->pmem.limit >> 16, 2); } } /* * This is used to reject I/O port allocations that conflict with an * ISA alias range. */ static int pcib_is_isa_range(struct pcib_softc *sc, rman_res_t start, rman_res_t end, rman_res_t count) { rman_res_t next_alias; if (!(sc->bridgectl & PCIB_BCR_ISA_ENABLE)) return (0); /* Only check fixed ranges for overlap. */ if (start + count - 1 != end) return (0); /* ISA aliases are only in the lower 64KB of I/O space. */ if (start >= 65536) return (0); /* Check for overlap with 0x000 - 0x0ff as a special case. */ if (start < 0x100) goto alias; /* * If the start address is an alias, the range is an alias. * Otherwise, compute the start of the next alias range and * check if it is before the end of the candidate range. */ if ((start & 0x300) != 0) goto alias; next_alias = (start & ~0x3fful) | 0x100; if (next_alias <= end) goto alias; return (0); alias: if (bootverbose) device_printf(sc->dev, "I/O range %#jx-%#jx overlaps with an ISA alias\n", start, end); return (1); } static void pcib_add_window_resources(struct pcib_window *w, struct resource **res, int count) { struct resource **newarray; int error, i; newarray = malloc(sizeof(struct resource *) * (w->count + count), M_DEVBUF, M_WAITOK); if (w->res != NULL) bcopy(w->res, newarray, sizeof(struct resource *) * w->count); bcopy(res, newarray + w->count, sizeof(struct resource *) * count); free(w->res, M_DEVBUF); w->res = newarray; w->count += count; for (i = 0; i < count; i++) { error = rman_manage_region(&w->rman, rman_get_start(res[i]), rman_get_end(res[i])); if (error) panic("Failed to add resource to rman"); } } typedef void (nonisa_callback)(rman_res_t start, rman_res_t end, void *arg); static void pcib_walk_nonisa_ranges(rman_res_t start, rman_res_t end, nonisa_callback *cb, void *arg) { rman_res_t next_end; /* * If start is within an ISA alias range, move up to the start * of the next non-alias range. As a special case, addresses * in the range 0x000 - 0x0ff should also be skipped since * those are used for various system I/O devices in ISA * systems. */ if (start <= 65535) { if (start < 0x100 || (start & 0x300) != 0) { start &= ~0x3ff; start += 0x400; } } /* ISA aliases are only in the lower 64KB of I/O space. */ while (start <= MIN(end, 65535)) { next_end = MIN(start | 0xff, end); cb(start, next_end, arg); start += 0x400; } if (start <= end) cb(start, end, arg); } static void count_ranges(rman_res_t start, rman_res_t end, void *arg) { int *countp; countp = arg; (*countp)++; } struct alloc_state { struct resource **res; struct pcib_softc *sc; int count, error; }; static void alloc_ranges(rman_res_t start, rman_res_t end, void *arg) { struct alloc_state *as; struct pcib_window *w; int rid; as = arg; if (as->error != 0) return; w = &as->sc->io; rid = w->reg; if (bootverbose) device_printf(as->sc->dev, "allocating non-ISA range %#jx-%#jx\n", start, end); as->res[as->count] = bus_alloc_resource(as->sc->dev, SYS_RES_IOPORT, &rid, start, end, end - start + 1, RF_ACTIVE | RF_UNMAPPED); if (as->res[as->count] == NULL) as->error = ENXIO; else as->count++; } static int pcib_alloc_nonisa_ranges(struct pcib_softc *sc, rman_res_t start, rman_res_t end) { struct alloc_state as; int i, new_count; /* First, see how many ranges we need. */ new_count = 0; pcib_walk_nonisa_ranges(start, end, count_ranges, &new_count); /* Second, allocate the ranges. */ as.res = malloc(sizeof(struct resource *) * new_count, M_DEVBUF, M_WAITOK); as.sc = sc; as.count = 0; as.error = 0; pcib_walk_nonisa_ranges(start, end, alloc_ranges, &as); if (as.error != 0) { for (i = 0; i < as.count; i++) bus_release_resource(sc->dev, SYS_RES_IOPORT, sc->io.reg, as.res[i]); free(as.res, M_DEVBUF); return (as.error); } KASSERT(as.count == new_count, ("%s: count mismatch", __func__)); /* Third, add the ranges to the window. */ pcib_add_window_resources(&sc->io, as.res, as.count); free(as.res, M_DEVBUF); return (0); } static void pcib_alloc_window(struct pcib_softc *sc, struct pcib_window *w, int type, int flags, pci_addr_t max_address) { struct resource *res; char buf[64]; int error, rid; if (max_address != (rman_res_t)max_address) max_address = ~0; w->rman.rm_start = 0; w->rman.rm_end = max_address; w->rman.rm_type = RMAN_ARRAY; snprintf(buf, sizeof(buf), "%s %s window", device_get_nameunit(sc->dev), w->name); w->rman.rm_descr = strdup(buf, M_DEVBUF); error = rman_init(&w->rman); if (error) panic("Failed to initialize %s %s rman", device_get_nameunit(sc->dev), w->name); if (!pcib_is_window_open(w)) return; if (w->base > max_address || w->limit > max_address) { device_printf(sc->dev, "initial %s window has too many bits, ignoring\n", w->name); return; } if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE) (void)pcib_alloc_nonisa_ranges(sc, w->base, w->limit); else { rid = w->reg; res = bus_alloc_resource(sc->dev, type, &rid, w->base, w->limit, w->limit - w->base + 1, flags | RF_ACTIVE | RF_UNMAPPED); if (res != NULL) pcib_add_window_resources(w, &res, 1); } if (w->res == NULL) { device_printf(sc->dev, "failed to allocate initial %s window: %#jx-%#jx\n", w->name, (uintmax_t)w->base, (uintmax_t)w->limit); w->base = max_address; w->limit = 0; pcib_write_windows(sc, w->mask); return; } pcib_activate_window(sc, type); } /* * Initialize I/O windows. */ static void pcib_probe_windows(struct pcib_softc *sc) { pci_addr_t max; device_t dev; uint32_t val; dev = sc->dev; if (pci_clear_pcib) { pcib_bridge_init(dev); } /* Determine if the I/O port window is implemented. */ val = pci_read_config(dev, PCIR_IOBASEL_1, 1); if (val == 0) { /* * If 'val' is zero, then only 16-bits of I/O space * are supported. */ pci_write_config(dev, PCIR_IOBASEL_1, 0xff, 1); if (pci_read_config(dev, PCIR_IOBASEL_1, 1) != 0) { sc->io.valid = 1; pci_write_config(dev, PCIR_IOBASEL_1, 0, 1); } } else sc->io.valid = 1; /* Read the existing I/O port window. */ if (sc->io.valid) { sc->io.reg = PCIR_IOBASEL_1; sc->io.step = 12; sc->io.mask = WIN_IO; sc->io.name = "I/O port"; if ((val & PCIM_BRIO_MASK) == PCIM_BRIO_32) { sc->io.base = PCI_PPBIOBASE( pci_read_config(dev, PCIR_IOBASEH_1, 2), val); sc->io.limit = PCI_PPBIOLIMIT( pci_read_config(dev, PCIR_IOLIMITH_1, 2), pci_read_config(dev, PCIR_IOLIMITL_1, 1)); max = 0xffffffff; } else { sc->io.base = PCI_PPBIOBASE(0, val); sc->io.limit = PCI_PPBIOLIMIT(0, pci_read_config(dev, PCIR_IOLIMITL_1, 1)); max = 0xffff; } pcib_alloc_window(sc, &sc->io, SYS_RES_IOPORT, 0, max); } /* Read the existing memory window. */ sc->mem.valid = 1; sc->mem.reg = PCIR_MEMBASE_1; sc->mem.step = 20; sc->mem.mask = WIN_MEM; sc->mem.name = "memory"; sc->mem.base = PCI_PPBMEMBASE(0, pci_read_config(dev, PCIR_MEMBASE_1, 2)); sc->mem.limit = PCI_PPBMEMLIMIT(0, pci_read_config(dev, PCIR_MEMLIMIT_1, 2)); pcib_alloc_window(sc, &sc->mem, SYS_RES_MEMORY, 0, 0xffffffff); /* Determine if the prefetchable memory window is implemented. */ val = pci_read_config(dev, PCIR_PMBASEL_1, 2); if (val == 0) { /* * If 'val' is zero, then only 32-bits of memory space * are supported. */ pci_write_config(dev, PCIR_PMBASEL_1, 0xffff, 2); if (pci_read_config(dev, PCIR_PMBASEL_1, 2) != 0) { sc->pmem.valid = 1; pci_write_config(dev, PCIR_PMBASEL_1, 0, 2); } } else sc->pmem.valid = 1; /* Read the existing prefetchable memory window. */ if (sc->pmem.valid) { sc->pmem.reg = PCIR_PMBASEL_1; sc->pmem.step = 20; sc->pmem.mask = WIN_PMEM; sc->pmem.name = "prefetch"; if ((val & PCIM_BRPM_MASK) == PCIM_BRPM_64) { sc->pmem.base = PCI_PPBMEMBASE( pci_read_config(dev, PCIR_PMBASEH_1, 4), val); sc->pmem.limit = PCI_PPBMEMLIMIT( pci_read_config(dev, PCIR_PMLIMITH_1, 4), pci_read_config(dev, PCIR_PMLIMITL_1, 2)); max = 0xffffffffffffffff; } else { sc->pmem.base = PCI_PPBMEMBASE(0, val); sc->pmem.limit = PCI_PPBMEMLIMIT(0, pci_read_config(dev, PCIR_PMLIMITL_1, 2)); max = 0xffffffff; } pcib_alloc_window(sc, &sc->pmem, SYS_RES_MEMORY, RF_PREFETCHABLE, max); } } static void pcib_release_window(struct pcib_softc *sc, struct pcib_window *w, int type) { device_t dev; int error, i; if (!w->valid) return; dev = sc->dev; error = rman_fini(&w->rman); if (error) { device_printf(dev, "failed to release %s rman\n", w->name); return; } free(__DECONST(char *, w->rman.rm_descr), M_DEVBUF); for (i = 0; i < w->count; i++) { error = bus_free_resource(dev, type, w->res[i]); if (error) device_printf(dev, "failed to release %s resource: %d\n", w->name, error); } free(w->res, M_DEVBUF); } static void pcib_free_windows(struct pcib_softc *sc) { pcib_release_window(sc, &sc->pmem, SYS_RES_MEMORY); pcib_release_window(sc, &sc->mem, SYS_RES_MEMORY); pcib_release_window(sc, &sc->io, SYS_RES_IOPORT); } #ifdef PCI_RES_BUS /* * Allocate a suitable secondary bus for this bridge if needed and * initialize the resource manager for the secondary bus range. Note * that the minimum count is a desired value and this may allocate a * smaller range. */ void pcib_setup_secbus(device_t dev, struct pcib_secbus *bus, int min_count) { char buf[64]; int error, rid, sec_reg; switch (pci_read_config(dev, PCIR_HDRTYPE, 1) & PCIM_HDRTYPE) { case PCIM_HDRTYPE_BRIDGE: sec_reg = PCIR_SECBUS_1; bus->sub_reg = PCIR_SUBBUS_1; break; case PCIM_HDRTYPE_CARDBUS: sec_reg = PCIR_SECBUS_2; bus->sub_reg = PCIR_SUBBUS_2; break; default: panic("not a PCI bridge"); } bus->sec = pci_read_config(dev, sec_reg, 1); bus->sub = pci_read_config(dev, bus->sub_reg, 1); bus->dev = dev; bus->rman.rm_start = 0; bus->rman.rm_end = PCI_BUSMAX; bus->rman.rm_type = RMAN_ARRAY; snprintf(buf, sizeof(buf), "%s bus numbers", device_get_nameunit(dev)); bus->rman.rm_descr = strdup(buf, M_DEVBUF); error = rman_init(&bus->rman); if (error) panic("Failed to initialize %s bus number rman", device_get_nameunit(dev)); /* * Allocate a bus range. This will return an existing bus range * if one exists, or a new bus range if one does not. */ rid = 0; bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid, min_count, RF_ACTIVE); if (bus->res == NULL) { /* * Fall back to just allocating a range of a single bus * number. */ bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid, 1, RF_ACTIVE); } else if (rman_get_size(bus->res) < min_count) /* * Attempt to grow the existing range to satisfy the * minimum desired count. */ (void)bus_adjust_resource(dev, PCI_RES_BUS, bus->res, rman_get_start(bus->res), rman_get_start(bus->res) + min_count - 1); /* * Add the initial resource to the rman. */ if (bus->res != NULL) { error = rman_manage_region(&bus->rman, rman_get_start(bus->res), rman_get_end(bus->res)); if (error) panic("Failed to add resource to rman"); bus->sec = rman_get_start(bus->res); bus->sub = rman_get_end(bus->res); } } void pcib_free_secbus(device_t dev, struct pcib_secbus *bus) { int error; error = rman_fini(&bus->rman); if (error) { device_printf(dev, "failed to release bus number rman\n"); return; } free(__DECONST(char *, bus->rman.rm_descr), M_DEVBUF); error = bus_free_resource(dev, PCI_RES_BUS, bus->res); if (error) device_printf(dev, "failed to release bus numbers resource: %d\n", error); } static struct resource * pcib_suballoc_bus(struct pcib_secbus *bus, device_t child, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; res = rman_reserve_resource(&bus->rman, start, end, count, flags, child); if (res == NULL) return (NULL); if (bootverbose) device_printf(bus->dev, "allocated bus range (%ju-%ju) for rid %d of %s\n", rman_get_start(res), rman_get_end(res), *rid, pcib_child_name(child)); rman_set_rid(res, *rid); rman_set_type(res, PCI_RES_BUS); return (res); } /* * Attempt to grow the secondary bus range. This is much simpler than * for I/O windows as the range can only be grown by increasing * subbus. */ static int pcib_grow_subbus(struct pcib_secbus *bus, rman_res_t new_end) { rman_res_t old_end; int error; old_end = rman_get_end(bus->res); KASSERT(new_end > old_end, ("attempt to shrink subbus")); error = bus_adjust_resource(bus->dev, PCI_RES_BUS, bus->res, rman_get_start(bus->res), new_end); if (error) return (error); if (bootverbose) device_printf(bus->dev, "grew bus range to %ju-%ju\n", rman_get_start(bus->res), rman_get_end(bus->res)); error = rman_manage_region(&bus->rman, old_end + 1, rman_get_end(bus->res)); if (error) panic("Failed to add resource to rman"); bus->sub = rman_get_end(bus->res); pci_write_config(bus->dev, bus->sub_reg, bus->sub, 1); return (0); } struct resource * pcib_alloc_subbus(struct pcib_secbus *bus, device_t child, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; rman_res_t start_free, end_free, new_end; /* * First, see if the request can be satisified by the existing * bus range. */ res = pcib_suballoc_bus(bus, child, rid, start, end, count, flags); if (res != NULL) return (res); /* * Figure out a range to grow the bus range. First, find the * first bus number after the last allocated bus in the rman and * enforce that as a minimum starting point for the range. */ if (rman_last_free_region(&bus->rman, &start_free, &end_free) != 0 || end_free != bus->sub) start_free = bus->sub + 1; if (start_free < start) start_free = start; new_end = start_free + count - 1; /* * See if this new range would satisfy the request if it * succeeds. */ if (new_end > end) return (NULL); /* Finally, attempt to grow the existing resource. */ if (bootverbose) { device_printf(bus->dev, "attempting to grow bus range for %ju buses\n", count); printf("\tback candidate range: %ju-%ju\n", start_free, new_end); } if (pcib_grow_subbus(bus, new_end) == 0) return (pcib_suballoc_bus(bus, child, rid, start, end, count, flags)); return (NULL); } #endif #else /* * Is the prefetch window open (eg, can we allocate memory in it?) */ static int pcib_is_prefetch_open(struct pcib_softc *sc) { return (sc->pmembase > 0 && sc->pmembase < sc->pmemlimit); } /* * Is the nonprefetch window open (eg, can we allocate memory in it?) */ static int pcib_is_nonprefetch_open(struct pcib_softc *sc) { return (sc->membase > 0 && sc->membase < sc->memlimit); } /* * Is the io window open (eg, can we allocate ports in it?) */ static int pcib_is_io_open(struct pcib_softc *sc) { return (sc->iobase > 0 && sc->iobase < sc->iolimit); } /* * Get current I/O decode. */ static void pcib_get_io_decode(struct pcib_softc *sc) { device_t dev; uint32_t iolow; dev = sc->dev; iolow = pci_read_config(dev, PCIR_IOBASEL_1, 1); if ((iolow & PCIM_BRIO_MASK) == PCIM_BRIO_32) sc->iobase = PCI_PPBIOBASE( pci_read_config(dev, PCIR_IOBASEH_1, 2), iolow); else sc->iobase = PCI_PPBIOBASE(0, iolow); iolow = pci_read_config(dev, PCIR_IOLIMITL_1, 1); if ((iolow & PCIM_BRIO_MASK) == PCIM_BRIO_32) sc->iolimit = PCI_PPBIOLIMIT( pci_read_config(dev, PCIR_IOLIMITH_1, 2), iolow); else sc->iolimit = PCI_PPBIOLIMIT(0, iolow); } /* * Get current memory decode. */ static void pcib_get_mem_decode(struct pcib_softc *sc) { device_t dev; pci_addr_t pmemlow; dev = sc->dev; sc->membase = PCI_PPBMEMBASE(0, pci_read_config(dev, PCIR_MEMBASE_1, 2)); sc->memlimit = PCI_PPBMEMLIMIT(0, pci_read_config(dev, PCIR_MEMLIMIT_1, 2)); pmemlow = pci_read_config(dev, PCIR_PMBASEL_1, 2); if ((pmemlow & PCIM_BRPM_MASK) == PCIM_BRPM_64) sc->pmembase = PCI_PPBMEMBASE( pci_read_config(dev, PCIR_PMBASEH_1, 4), pmemlow); else sc->pmembase = PCI_PPBMEMBASE(0, pmemlow); pmemlow = pci_read_config(dev, PCIR_PMLIMITL_1, 2); if ((pmemlow & PCIM_BRPM_MASK) == PCIM_BRPM_64) sc->pmemlimit = PCI_PPBMEMLIMIT( pci_read_config(dev, PCIR_PMLIMITH_1, 4), pmemlow); else sc->pmemlimit = PCI_PPBMEMLIMIT(0, pmemlow); } /* * Restore previous I/O decode. */ static void pcib_set_io_decode(struct pcib_softc *sc) { device_t dev; uint32_t iohi; dev = sc->dev; iohi = sc->iobase >> 16; if (iohi > 0) pci_write_config(dev, PCIR_IOBASEH_1, iohi, 2); pci_write_config(dev, PCIR_IOBASEL_1, sc->iobase >> 8, 1); iohi = sc->iolimit >> 16; if (iohi > 0) pci_write_config(dev, PCIR_IOLIMITH_1, iohi, 2); pci_write_config(dev, PCIR_IOLIMITL_1, sc->iolimit >> 8, 1); } /* * Restore previous memory decode. */ static void pcib_set_mem_decode(struct pcib_softc *sc) { device_t dev; pci_addr_t pmemhi; dev = sc->dev; pci_write_config(dev, PCIR_MEMBASE_1, sc->membase >> 16, 2); pci_write_config(dev, PCIR_MEMLIMIT_1, sc->memlimit >> 16, 2); pmemhi = sc->pmembase >> 32; if (pmemhi > 0) pci_write_config(dev, PCIR_PMBASEH_1, pmemhi, 4); pci_write_config(dev, PCIR_PMBASEL_1, sc->pmembase >> 16, 2); pmemhi = sc->pmemlimit >> 32; if (pmemhi > 0) pci_write_config(dev, PCIR_PMLIMITH_1, pmemhi, 4); pci_write_config(dev, PCIR_PMLIMITL_1, sc->pmemlimit >> 16, 2); } #endif #ifdef PCI_HP /* * PCI-express HotPlug support. */ static int pci_enable_pcie_hp = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_pcie_hp, CTLFLAG_RDTUN, &pci_enable_pcie_hp, 0, "Enable support for native PCI-express HotPlug."); TASKQUEUE_DEFINE_THREAD(pci_hp); static void pcib_probe_hotplug(struct pcib_softc *sc) { device_t dev; uint32_t link_cap; uint16_t link_sta, slot_sta; if (!pci_enable_pcie_hp) return; dev = sc->dev; if (pci_find_cap(dev, PCIY_EXPRESS, NULL) != 0) return; if (!(pcie_read_config(dev, PCIER_FLAGS, 2) & PCIEM_FLAGS_SLOT)) return; sc->pcie_slot_cap = pcie_read_config(dev, PCIER_SLOT_CAP, 4); if ((sc->pcie_slot_cap & PCIEM_SLOT_CAP_HPC) == 0) return; link_cap = pcie_read_config(dev, PCIER_LINK_CAP, 4); if ((link_cap & PCIEM_LINK_CAP_DL_ACTIVE) == 0) return; /* * Some devices report that they have an MRL when they actually * do not. Since they always report that the MRL is open, child * devices would be ignored. Try to detect these devices and * ignore their claim of HotPlug support. * * If there is an open MRL but the Data Link Layer is active, * the MRL is not real. */ if ((sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP) != 0) { link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2); slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2); if ((slot_sta & PCIEM_SLOT_STA_MRLSS) != 0 && (link_sta & PCIEM_LINK_STA_DL_ACTIVE) != 0) { return; } } /* * Now that we're sure we want to do hot plug, ask the * firmware, if any, if that's OK. */ if (pcib_request_feature(dev, PCI_FEATURE_HP) != 0) { if (bootverbose) device_printf(dev, "Unable to activate hot plug feature.\n"); return; } sc->flags |= PCIB_HOTPLUG; } /* * Send a HotPlug command to the slot control register. If this slot * uses command completion interrupts and a previous command is still * in progress, then the command is dropped. Once the previous * command completes or times out, pcib_pcie_hotplug_update() will be * invoked to post a new command based on the slot's state at that * time. */ static void pcib_pcie_hotplug_command(struct pcib_softc *sc, uint16_t val, uint16_t mask) { device_t dev; uint16_t ctl, new; dev = sc->dev; if (sc->flags & PCIB_HOTPLUG_CMD_PENDING) return; ctl = pcie_read_config(dev, PCIER_SLOT_CTL, 2); new = (ctl & ~mask) | val; if (new == ctl) return; if (bootverbose) device_printf(dev, "HotPlug command: %04x -> %04x\n", ctl, new); pcie_write_config(dev, PCIER_SLOT_CTL, new, 2); if (!(sc->pcie_slot_cap & PCIEM_SLOT_CAP_NCCS) && (ctl & new) & PCIEM_SLOT_CTL_CCIE) { sc->flags |= PCIB_HOTPLUG_CMD_PENDING; if (!cold) taskqueue_enqueue_timeout(taskqueue_pci_hp, &sc->pcie_cc_task, hz); } } static void pcib_pcie_hotplug_command_completed(struct pcib_softc *sc) { device_t dev; dev = sc->dev; if (bootverbose) device_printf(dev, "Command Completed\n"); if (!(sc->flags & PCIB_HOTPLUG_CMD_PENDING)) return; taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_cc_task, NULL); sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING; wakeup(sc); } /* * Returns true if a card is fully inserted from the user's * perspective. It may not yet be ready for access, but the driver * can now start enabling access if necessary. */ static bool pcib_hotplug_inserted(struct pcib_softc *sc) { /* Pretend the card isn't present if a detach is forced. */ if (sc->flags & PCIB_DETACHING) return (false); /* Card must be present in the slot. */ if ((sc->pcie_slot_sta & PCIEM_SLOT_STA_PDS) == 0) return (false); /* A power fault implicitly turns off power to the slot. */ if (sc->pcie_slot_sta & PCIEM_SLOT_STA_PFD) return (false); /* If the MRL is disengaged, the slot is powered off. */ if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP && (sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSS) != 0) return (false); return (true); } /* * Returns -1 if the card is fully inserted, powered, and ready for * access. Otherwise, returns 0. */ static int pcib_hotplug_present(struct pcib_softc *sc) { /* Card must be inserted. */ if (!pcib_hotplug_inserted(sc)) return (0); /* Require the Data Link Layer to be active. */ if (!(sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE)) return (0); return (-1); } static int pci_enable_pcie_ei = 0; SYSCTL_INT(_hw_pci, OID_AUTO, enable_pcie_ei, CTLFLAG_RWTUN, &pci_enable_pcie_ei, 0, "Enable support for PCI-express Electromechanical Interlock."); static void pcib_pcie_hotplug_update(struct pcib_softc *sc, uint16_t val, uint16_t mask, bool schedule_task) { bool card_inserted, ei_engaged; /* Clear DETACHING if Presence Detect has cleared. */ if ((sc->pcie_slot_sta & (PCIEM_SLOT_STA_PDC | PCIEM_SLOT_STA_PDS)) == PCIEM_SLOT_STA_PDC) sc->flags &= ~PCIB_DETACHING; card_inserted = pcib_hotplug_inserted(sc); /* Turn the power indicator on if a card is inserted. */ if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PIP) { mask |= PCIEM_SLOT_CTL_PIC; if (card_inserted) val |= PCIEM_SLOT_CTL_PI_ON; else if (sc->flags & PCIB_DETACH_PENDING) val |= PCIEM_SLOT_CTL_PI_BLINK; else val |= PCIEM_SLOT_CTL_PI_OFF; } /* Turn the power on via the Power Controller if a card is inserted. */ if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PCP) { mask |= PCIEM_SLOT_CTL_PCC; if (card_inserted) val |= PCIEM_SLOT_CTL_PC_ON; else val |= PCIEM_SLOT_CTL_PC_OFF; } /* * If a card is inserted, enable the Electromechanical * Interlock. If a card is not inserted (or we are in the * process of detaching), disable the Electromechanical * Interlock. */ if ((sc->pcie_slot_cap & PCIEM_SLOT_CAP_EIP) && pci_enable_pcie_ei) { mask |= PCIEM_SLOT_CTL_EIC; ei_engaged = (sc->pcie_slot_sta & PCIEM_SLOT_STA_EIS) != 0; if (card_inserted != ei_engaged) val |= PCIEM_SLOT_CTL_EIC; } /* * Start a timer to see if the Data Link Layer times out. * Note that we only start the timer if Presence Detect or MRL Sensor * changed on this interrupt. Stop any scheduled timer if * the Data Link Layer is active. */ if (card_inserted && !(sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE) && sc->pcie_slot_sta & (PCIEM_SLOT_STA_MRLSC | PCIEM_SLOT_STA_PDC)) { if (cold) device_printf(sc->dev, "Data Link Layer inactive\n"); else taskqueue_enqueue_timeout(taskqueue_pci_hp, &sc->pcie_dll_task, hz); } else if (sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE) taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_dll_task, NULL); pcib_pcie_hotplug_command(sc, val, mask); /* * During attach the child "pci" device is added synchronously; * otherwise, the task is scheduled to manage the child * device. */ if (schedule_task && (pcib_hotplug_present(sc) != 0) != (sc->child != NULL)) taskqueue_enqueue(taskqueue_pci_hp, &sc->pcie_hp_task); } static void pcib_pcie_intr_hotplug(void *arg) { struct pcib_softc *sc; device_t dev; uint16_t old_slot_sta; sc = arg; dev = sc->dev; PCIB_HP_LOCK(sc); old_slot_sta = sc->pcie_slot_sta; sc->pcie_slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2); /* Clear the events just reported. */ pcie_write_config(dev, PCIER_SLOT_STA, sc->pcie_slot_sta, 2); if (bootverbose) device_printf(dev, "HotPlug interrupt: %#x\n", sc->pcie_slot_sta); if (sc->pcie_slot_sta & PCIEM_SLOT_STA_ABP) { if (sc->flags & PCIB_DETACH_PENDING) { device_printf(dev, "Attention Button Pressed: Detach Cancelled\n"); sc->flags &= ~PCIB_DETACH_PENDING; taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_ab_task, NULL); } else if (old_slot_sta & PCIEM_SLOT_STA_PDS) { /* Only initiate detach sequence if device present. */ device_printf(dev, "Attention Button Pressed: Detaching in 5 seconds\n"); sc->flags |= PCIB_DETACH_PENDING; taskqueue_enqueue_timeout(taskqueue_pci_hp, &sc->pcie_ab_task, 5 * hz); } } if (sc->pcie_slot_sta & PCIEM_SLOT_STA_PFD) device_printf(dev, "Power Fault Detected\n"); if (sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSC) device_printf(dev, "MRL Sensor Changed to %s\n", sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSS ? "open" : "closed"); if (bootverbose && sc->pcie_slot_sta & PCIEM_SLOT_STA_PDC) device_printf(dev, "Presence Detect Changed to %s\n", sc->pcie_slot_sta & PCIEM_SLOT_STA_PDS ? "card present" : "empty"); if (sc->pcie_slot_sta & PCIEM_SLOT_STA_CC) pcib_pcie_hotplug_command_completed(sc); if (sc->pcie_slot_sta & PCIEM_SLOT_STA_DLLSC) { sc->pcie_link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2); if (bootverbose) device_printf(dev, "Data Link Layer State Changed to %s\n", sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE ? "active" : "inactive"); } pcib_pcie_hotplug_update(sc, 0, 0, true); PCIB_HP_UNLOCK(sc); } static void pcib_pcie_hotplug_task(void *context, int pending) { struct pcib_softc *sc; device_t dev; sc = context; PCIB_HP_LOCK(sc); dev = sc->dev; if (pcib_hotplug_present(sc) != 0) { if (sc->child == NULL) { sc->child = device_add_child(dev, "pci", -1); bus_generic_attach(dev); } } else { if (sc->child != NULL) { if (device_delete_child(dev, sc->child) == 0) sc->child = NULL; } } PCIB_HP_UNLOCK(sc); } static void pcib_pcie_ab_timeout(void *arg, int pending) { struct pcib_softc *sc = arg; PCIB_HP_LOCK(sc); if (sc->flags & PCIB_DETACH_PENDING) { sc->flags |= PCIB_DETACHING; sc->flags &= ~PCIB_DETACH_PENDING; pcib_pcie_hotplug_update(sc, 0, 0, true); } PCIB_HP_UNLOCK(sc); } static void pcib_pcie_cc_timeout(void *arg, int pending) { struct pcib_softc *sc = arg; device_t dev = sc->dev; uint16_t sta; PCIB_HP_LOCK(sc); sta = pcie_read_config(dev, PCIER_SLOT_STA, 2); if (!(sta & PCIEM_SLOT_STA_CC)) { device_printf(dev, "HotPlug Command Timed Out\n"); sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING; } else { device_printf(dev, "Missed HotPlug interrupt waiting for Command Completion\n"); pcib_pcie_intr_hotplug(sc); } PCIB_HP_UNLOCK(sc); } static void pcib_pcie_dll_timeout(void *arg, int pending) { struct pcib_softc *sc = arg; device_t dev = sc->dev; uint16_t sta; PCIB_HP_LOCK(sc); sta = pcie_read_config(dev, PCIER_LINK_STA, 2); if (!(sta & PCIEM_LINK_STA_DL_ACTIVE)) { device_printf(dev, "Timed out waiting for Data Link Layer Active\n"); sc->flags |= PCIB_DETACHING; pcib_pcie_hotplug_update(sc, 0, 0, true); } else if (sta != sc->pcie_link_sta) { device_printf(dev, "Missed HotPlug interrupt waiting for DLL Active\n"); pcib_pcie_intr_hotplug(sc); } PCIB_HP_UNLOCK(sc); } static int pcib_alloc_pcie_irq(struct pcib_softc *sc) { device_t dev; int count, error, mem_rid, rid; rid = -1; dev = sc->dev; /* * For simplicity, only use MSI-X if there is a single message. * To support a device with multiple messages we would have to * use remap intr if the MSI number is not 0. */ count = pci_msix_count(dev); if (count == 1) { mem_rid = pci_msix_table_bar(dev); sc->pcie_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &mem_rid, RF_ACTIVE); if (sc->pcie_mem == NULL) { device_printf(dev, "Failed to allocate BAR for MSI-X table\n"); } else { error = pci_alloc_msix(dev, &count); if (error == 0) rid = 1; } } if (rid < 0 && pci_msi_count(dev) > 0) { count = 1; error = pci_alloc_msi(dev, &count); if (error == 0) rid = 1; } if (rid < 0) rid = 0; sc->pcie_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | RF_SHAREABLE); if (sc->pcie_irq == NULL) { device_printf(dev, "Failed to allocate interrupt for PCI-e events\n"); if (rid > 0) pci_release_msi(dev); return (ENXIO); } error = bus_setup_intr(dev, sc->pcie_irq, INTR_TYPE_MISC|INTR_MPSAFE, NULL, pcib_pcie_intr_hotplug, sc, &sc->pcie_ihand); if (error) { device_printf(dev, "Failed to setup PCI-e interrupt handler\n"); bus_release_resource(dev, SYS_RES_IRQ, rid, sc->pcie_irq); if (rid > 0) pci_release_msi(dev); return (error); } return (0); } static int pcib_release_pcie_irq(struct pcib_softc *sc) { device_t dev; int error; dev = sc->dev; error = bus_teardown_intr(dev, sc->pcie_irq, sc->pcie_ihand); if (error) return (error); error = bus_free_resource(dev, SYS_RES_IRQ, sc->pcie_irq); if (error) return (error); error = pci_release_msi(dev); if (error) return (error); if (sc->pcie_mem != NULL) error = bus_free_resource(dev, SYS_RES_MEMORY, sc->pcie_mem); return (error); } static void pcib_setup_hotplug(struct pcib_softc *sc) { device_t dev; uint16_t mask, val; dev = sc->dev; TASK_INIT(&sc->pcie_hp_task, 0, pcib_pcie_hotplug_task, sc); TIMEOUT_TASK_INIT(taskqueue_pci_hp, &sc->pcie_ab_task, 0, pcib_pcie_ab_timeout, sc); TIMEOUT_TASK_INIT(taskqueue_pci_hp, &sc->pcie_cc_task, 0, pcib_pcie_cc_timeout, sc); TIMEOUT_TASK_INIT(taskqueue_pci_hp, &sc->pcie_dll_task, 0, pcib_pcie_dll_timeout, sc); sc->pcie_hp_lock = bus_topo_mtx(); /* Allocate IRQ. */ if (pcib_alloc_pcie_irq(sc) != 0) return; sc->pcie_link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2); sc->pcie_slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2); /* Clear any events previously pending. */ pcie_write_config(dev, PCIER_SLOT_STA, sc->pcie_slot_sta, 2); /* Enable HotPlug events. */ mask = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE | PCIEM_SLOT_CTL_CCIE | PCIEM_SLOT_CTL_PDCE | PCIEM_SLOT_CTL_MRLSCE | PCIEM_SLOT_CTL_PFDE | PCIEM_SLOT_CTL_ABPE; val = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE | PCIEM_SLOT_CTL_PDCE; if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_APB) val |= PCIEM_SLOT_CTL_ABPE; if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PCP) val |= PCIEM_SLOT_CTL_PFDE; if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP) val |= PCIEM_SLOT_CTL_MRLSCE; if (!(sc->pcie_slot_cap & PCIEM_SLOT_CAP_NCCS)) val |= PCIEM_SLOT_CTL_CCIE; /* Turn the attention indicator off. */ if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_AIP) { mask |= PCIEM_SLOT_CTL_AIC; val |= PCIEM_SLOT_CTL_AI_OFF; } pcib_pcie_hotplug_update(sc, val, mask, false); } static int pcib_detach_hotplug(struct pcib_softc *sc) { uint16_t mask, val; int error; /* Disable the card in the slot and force it to detach. */ if (sc->flags & PCIB_DETACH_PENDING) { sc->flags &= ~PCIB_DETACH_PENDING; taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_ab_task, NULL); } sc->flags |= PCIB_DETACHING; if (sc->flags & PCIB_HOTPLUG_CMD_PENDING) { taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_cc_task, NULL); tsleep(sc, 0, "hpcmd", hz); sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING; } /* Disable HotPlug events. */ mask = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE | PCIEM_SLOT_CTL_CCIE | PCIEM_SLOT_CTL_PDCE | PCIEM_SLOT_CTL_MRLSCE | PCIEM_SLOT_CTL_PFDE | PCIEM_SLOT_CTL_ABPE; val = 0; /* Turn the attention indicator off. */ if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_AIP) { mask |= PCIEM_SLOT_CTL_AIC; val |= PCIEM_SLOT_CTL_AI_OFF; } pcib_pcie_hotplug_update(sc, val, mask, false); error = pcib_release_pcie_irq(sc); if (error) return (error); taskqueue_drain(taskqueue_pci_hp, &sc->pcie_hp_task); taskqueue_drain_timeout(taskqueue_pci_hp, &sc->pcie_ab_task); taskqueue_drain_timeout(taskqueue_pci_hp, &sc->pcie_cc_task); taskqueue_drain_timeout(taskqueue_pci_hp, &sc->pcie_dll_task); return (0); } #endif /* * Get current bridge configuration. */ static void pcib_cfg_save(struct pcib_softc *sc) { #ifndef NEW_PCIB device_t dev; uint16_t command; dev = sc->dev; command = pci_read_config(dev, PCIR_COMMAND, 2); if (command & PCIM_CMD_PORTEN) pcib_get_io_decode(sc); if (command & PCIM_CMD_MEMEN) pcib_get_mem_decode(sc); #endif } /* * Restore previous bridge configuration. */ static void pcib_cfg_restore(struct pcib_softc *sc) { #ifndef NEW_PCIB uint16_t command; #endif #ifdef NEW_PCIB pcib_write_windows(sc, WIN_IO | WIN_MEM | WIN_PMEM); #else command = pci_read_config(sc->dev, PCIR_COMMAND, 2); if (command & PCIM_CMD_PORTEN) pcib_set_io_decode(sc); if (command & PCIM_CMD_MEMEN) pcib_set_mem_decode(sc); #endif } /* * Generic device interface */ static int pcib_probe(device_t dev) { if ((pci_get_class(dev) == PCIC_BRIDGE) && (pci_get_subclass(dev) == PCIS_BRIDGE_PCI)) { device_set_desc(dev, "PCI-PCI bridge"); return(-10000); } return(ENXIO); } void pcib_attach_common(device_t dev) { struct pcib_softc *sc; struct sysctl_ctx_list *sctx; struct sysctl_oid *soid; int comma; sc = device_get_softc(dev); sc->dev = dev; /* * Get current bridge configuration. */ sc->domain = pci_get_domain(dev); #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) sc->bus.sec = pci_read_config(dev, PCIR_SECBUS_1, 1); sc->bus.sub = pci_read_config(dev, PCIR_SUBBUS_1, 1); #endif sc->bridgectl = pci_read_config(dev, PCIR_BRIDGECTL_1, 2); pcib_cfg_save(sc); /* * The primary bus register should always be the bus of the * parent. */ sc->pribus = pci_get_bus(dev); pci_write_config(dev, PCIR_PRIBUS_1, sc->pribus, 1); /* * Setup sysctl reporting nodes */ sctx = device_get_sysctl_ctx(dev); soid = device_get_sysctl_tree(dev); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "domain", CTLFLAG_RD, &sc->domain, 0, "Domain number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "pribus", CTLFLAG_RD, &sc->pribus, 0, "Primary bus number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "secbus", CTLFLAG_RD, &sc->bus.sec, 0, "Secondary bus number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "subbus", CTLFLAG_RD, &sc->bus.sub, 0, "Subordinate bus number"); /* * Quirk handling. */ switch (pci_get_devid(dev)) { #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) case 0x12258086: /* Intel 82454KX/GX (Orion) */ { uint8_t supbus; supbus = pci_read_config(dev, 0x41, 1); if (supbus != 0xff) { sc->bus.sec = supbus + 1; sc->bus.sub = supbus + 1; } break; } #endif /* * The i82380FB mobile docking controller is a PCI-PCI bridge, * and it is a subtractive bridge. However, the ProgIf is wrong * so the normal setting of PCIB_SUBTRACTIVE bit doesn't * happen. There are also Toshiba and Cavium ThunderX bridges * that behave this way. */ case 0xa002177d: /* Cavium ThunderX */ case 0x124b8086: /* Intel 82380FB Mobile */ case 0x060513d7: /* Toshiba ???? */ sc->flags |= PCIB_SUBTRACTIVE; break; #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) /* Compaq R3000 BIOS sets wrong subordinate bus number. */ case 0x00dd10de: { char *cp; if ((cp = kern_getenv("smbios.planar.maker")) == NULL) break; if (strncmp(cp, "Compal", 6) != 0) { freeenv(cp); break; } freeenv(cp); if ((cp = kern_getenv("smbios.planar.product")) == NULL) break; if (strncmp(cp, "08A0", 4) != 0) { freeenv(cp); break; } freeenv(cp); if (sc->bus.sub < 0xa) { pci_write_config(dev, PCIR_SUBBUS_1, 0xa, 1); sc->bus.sub = pci_read_config(dev, PCIR_SUBBUS_1, 1); } break; } #endif } if (pci_msi_device_blacklisted(dev)) sc->flags |= PCIB_DISABLE_MSI; if (pci_msix_device_blacklisted(dev)) sc->flags |= PCIB_DISABLE_MSIX; /* * Intel 815, 845 and other chipsets say they are PCI-PCI bridges, * but have a ProgIF of 0x80. The 82801 family (AA, AB, BAM/CAM, * BA/CA/DB and E) PCI bridges are HUB-PCI bridges, in Intelese. * This means they act as if they were subtractively decoding * bridges and pass all transactions. Mark them and real ProgIf 1 * parts as subtractive. */ if ((pci_get_devid(dev) & 0xff00ffff) == 0x24008086 || pci_read_config(dev, PCIR_PROGIF, 1) == PCIP_BRIDGE_PCI_SUBTRACTIVE) sc->flags |= PCIB_SUBTRACTIVE; #ifdef PCI_HP pcib_probe_hotplug(sc); #endif #ifdef NEW_PCIB #ifdef PCI_RES_BUS pcib_setup_secbus(dev, &sc->bus, 1); #endif pcib_probe_windows(sc); #endif #ifdef PCI_HP if (sc->flags & PCIB_HOTPLUG) pcib_setup_hotplug(sc); #endif if (bootverbose) { device_printf(dev, " domain %d\n", sc->domain); device_printf(dev, " secondary bus %d\n", sc->bus.sec); device_printf(dev, " subordinate bus %d\n", sc->bus.sub); #ifdef NEW_PCIB if (pcib_is_window_open(&sc->io)) device_printf(dev, " I/O decode 0x%jx-0x%jx\n", (uintmax_t)sc->io.base, (uintmax_t)sc->io.limit); if (pcib_is_window_open(&sc->mem)) device_printf(dev, " memory decode 0x%jx-0x%jx\n", (uintmax_t)sc->mem.base, (uintmax_t)sc->mem.limit); if (pcib_is_window_open(&sc->pmem)) device_printf(dev, " prefetched decode 0x%jx-0x%jx\n", (uintmax_t)sc->pmem.base, (uintmax_t)sc->pmem.limit); #else if (pcib_is_io_open(sc)) device_printf(dev, " I/O decode 0x%x-0x%x\n", sc->iobase, sc->iolimit); if (pcib_is_nonprefetch_open(sc)) device_printf(dev, " memory decode 0x%jx-0x%jx\n", (uintmax_t)sc->membase, (uintmax_t)sc->memlimit); if (pcib_is_prefetch_open(sc)) device_printf(dev, " prefetched decode 0x%jx-0x%jx\n", (uintmax_t)sc->pmembase, (uintmax_t)sc->pmemlimit); #endif if (sc->bridgectl & (PCIB_BCR_ISA_ENABLE | PCIB_BCR_VGA_ENABLE) || sc->flags & PCIB_SUBTRACTIVE) { device_printf(dev, " special decode "); comma = 0; if (sc->bridgectl & PCIB_BCR_ISA_ENABLE) { printf("ISA"); comma = 1; } if (sc->bridgectl & PCIB_BCR_VGA_ENABLE) { printf("%sVGA", comma ? ", " : ""); comma = 1; } if (sc->flags & PCIB_SUBTRACTIVE) printf("%ssubtractive", comma ? ", " : ""); printf("\n"); } } /* * Always enable busmastering on bridges so that transactions * initiated on the secondary bus are passed through to the * primary bus. */ pci_enable_busmaster(dev); } #ifdef PCI_HP static int pcib_present(struct pcib_softc *sc) { if (sc->flags & PCIB_HOTPLUG) return (pcib_hotplug_present(sc) != 0); return (1); } #endif int pcib_attach_child(device_t dev) { struct pcib_softc *sc; sc = device_get_softc(dev); if (sc->bus.sec == 0) { /* no secondary bus; we should have fixed this */ return(0); } #ifdef PCI_HP if (!pcib_present(sc)) { /* An empty HotPlug slot, so don't add a PCI bus yet. */ return (0); } #endif sc->child = device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } int pcib_attach(device_t dev) { pcib_attach_common(dev); return (pcib_attach_child(dev)); } int pcib_detach(device_t dev) { #if defined(PCI_HP) || defined(NEW_PCIB) struct pcib_softc *sc; #endif int error; #if defined(PCI_HP) || defined(NEW_PCIB) sc = device_get_softc(dev); #endif error = bus_generic_detach(dev); if (error) return (error); #ifdef PCI_HP if (sc->flags & PCIB_HOTPLUG) { error = pcib_detach_hotplug(sc); if (error) return (error); } #endif error = device_delete_children(dev); if (error) return (error); #ifdef NEW_PCIB pcib_free_windows(sc); #ifdef PCI_RES_BUS pcib_free_secbus(dev, &sc->bus); #endif #endif return (0); } int pcib_suspend(device_t dev) { pcib_cfg_save(device_get_softc(dev)); return (bus_generic_suspend(dev)); } int pcib_resume(device_t dev) { pcib_cfg_restore(device_get_softc(dev)); /* * Restore the Command register only after restoring the windows. * The bridge should not be claiming random windows. */ pci_write_config(dev, PCIR_COMMAND, pci_get_cmdreg(dev), 2); return (bus_generic_resume(dev)); } void pcib_bridge_init(device_t dev) { pci_write_config(dev, PCIR_IOBASEL_1, 0xff, 1); pci_write_config(dev, PCIR_IOBASEH_1, 0xffff, 2); pci_write_config(dev, PCIR_IOLIMITL_1, 0, 1); pci_write_config(dev, PCIR_IOLIMITH_1, 0, 2); pci_write_config(dev, PCIR_MEMBASE_1, 0xffff, 2); pci_write_config(dev, PCIR_MEMLIMIT_1, 0, 2); pci_write_config(dev, PCIR_PMBASEL_1, 0xffff, 2); pci_write_config(dev, PCIR_PMBASEH_1, 0xffffffff, 4); pci_write_config(dev, PCIR_PMLIMITL_1, 0, 2); pci_write_config(dev, PCIR_PMLIMITH_1, 0, 4); } int pcib_child_present(device_t dev, device_t child) { #ifdef PCI_HP struct pcib_softc *sc = device_get_softc(dev); int retval; retval = bus_child_present(dev); if (retval != 0 && sc->flags & PCIB_HOTPLUG) retval = pcib_hotplug_present(sc); return (retval); #else return (bus_child_present(dev)); #endif } int pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct pcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = sc->domain; return(0); case PCIB_IVAR_BUS: *result = sc->bus.sec; return(0); } return(ENOENT); } int pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { switch (which) { case PCIB_IVAR_DOMAIN: return(EINVAL); case PCIB_IVAR_BUS: return(EINVAL); } return(ENOENT); } #ifdef NEW_PCIB /* * Attempt to allocate a resource from the existing resources assigned * to a window. */ static struct resource * pcib_suballoc_resource(struct pcib_softc *sc, struct pcib_window *w, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; if (!pcib_is_window_open(w)) return (NULL); res = rman_reserve_resource(&w->rman, start, end, count, flags & ~RF_ACTIVE, child); if (res == NULL) return (NULL); if (bootverbose) device_printf(sc->dev, "allocated %s range (%#jx-%#jx) for rid %x of %s\n", w->name, rman_get_start(res), rman_get_end(res), *rid, pcib_child_name(child)); rman_set_rid(res, *rid); rman_set_type(res, type); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, res) != 0) { rman_release_resource(res); return (NULL); } } return (res); } /* Allocate a fresh resource range for an unconfigured window. */ static int pcib_alloc_new_window(struct pcib_softc *sc, struct pcib_window *w, int type, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; rman_res_t base, limit, wmask; int rid; /* * If this is an I/O window on a bridge with ISA enable set * and the start address is below 64k, then try to allocate an * initial window of 0x1000 bytes long starting at address * 0xf000 and walking down. Note that if the original request * was larger than the non-aliased range size of 0x100 our * caller would have raised the start address up to 64k * already. */ if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE && start < 65536) { for (base = 0xf000; (long)base >= 0; base -= 0x1000) { limit = base + 0xfff; /* * Skip ranges that wouldn't work for the * original request. Note that the actual * window that overlaps are the non-alias * ranges within [base, limit], so this isn't * quite a simple comparison. */ if (start + count > limit - 0x400) continue; if (base == 0) { /* * The first open region for the window at * 0 is 0x400-0x4ff. */ if (end - count + 1 < 0x400) continue; } else { if (end - count + 1 < base) continue; } if (pcib_alloc_nonisa_ranges(sc, base, limit) == 0) { w->base = base; w->limit = limit; return (0); } } return (ENOSPC); } wmask = ((rman_res_t)1 << w->step) - 1; if (RF_ALIGNMENT(flags) < w->step) { flags &= ~RF_ALIGNMENT_MASK; flags |= RF_ALIGNMENT_LOG2(w->step); } start &= ~wmask; end |= wmask; count = roundup2(count, (rman_res_t)1 << w->step); rid = w->reg; res = bus_alloc_resource(sc->dev, type, &rid, start, end, count, flags | RF_ACTIVE | RF_UNMAPPED); if (res == NULL) return (ENOSPC); pcib_add_window_resources(w, &res, 1); pcib_activate_window(sc, type); w->base = rman_get_start(res); w->limit = rman_get_end(res); return (0); } /* Try to expand an existing window to the requested base and limit. */ static int pcib_expand_window(struct pcib_softc *sc, struct pcib_window *w, int type, rman_res_t base, rman_res_t limit) { struct resource *res; int error, i, force_64k_base; KASSERT(base <= w->base && limit >= w->limit, ("attempting to shrink window")); /* * XXX: pcib_grow_window() doesn't try to do this anyway and * the error handling for all the edge cases would be tedious. */ KASSERT(limit == w->limit || base == w->base, ("attempting to grow both ends of a window")); /* * Yet more special handling for requests to expand an I/O * window behind an ISA-enabled bridge. Since I/O windows * have to grow in 0x1000 increments and the end of the 0xffff * range is an alias, growing a window below 64k will always * result in allocating new resources and never adjusting an * existing resource. */ if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE && (limit <= 65535 || (base <= 65535 && base != w->base))) { KASSERT(limit == w->limit || limit <= 65535, ("attempting to grow both ends across 64k ISA alias")); if (base != w->base) error = pcib_alloc_nonisa_ranges(sc, base, w->base - 1); else error = pcib_alloc_nonisa_ranges(sc, w->limit + 1, limit); if (error == 0) { w->base = base; w->limit = limit; } return (error); } /* * Find the existing resource to adjust. Usually there is only one, * but for an ISA-enabled bridge we might be growing the I/O window * above 64k and need to find the existing resource that maps all * of the area above 64k. */ for (i = 0; i < w->count; i++) { if (rman_get_end(w->res[i]) == w->limit) break; } KASSERT(i != w->count, ("did not find existing resource")); res = w->res[i]; /* * Usually the resource we found should match the window's * existing range. The one exception is the ISA-enabled case * mentioned above in which case the resource should start at * 64k. */ if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE && w->base <= 65535) { KASSERT(rman_get_start(res) == 65536, ("existing resource mismatch")); force_64k_base = 1; } else { KASSERT(w->base == rman_get_start(res), ("existing resource mismatch")); force_64k_base = 0; } error = bus_adjust_resource(sc->dev, type, res, force_64k_base ? rman_get_start(res) : base, limit); if (error) return (error); /* Add the newly allocated region to the resource manager. */ if (w->base != base) { error = rman_manage_region(&w->rman, base, w->base - 1); w->base = base; } else { error = rman_manage_region(&w->rman, w->limit + 1, limit); w->limit = limit; } if (error) { if (bootverbose) device_printf(sc->dev, "failed to expand %s resource manager\n", w->name); (void)bus_adjust_resource(sc->dev, type, res, force_64k_base ? rman_get_start(res) : w->base, w->limit); } return (error); } /* * Attempt to grow a window to make room for a given resource request. */ static int pcib_grow_window(struct pcib_softc *sc, struct pcib_window *w, int type, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { rman_res_t align, start_free, end_free, front, back, wmask; int error; /* * Clamp the desired resource range to the maximum address * this window supports. Reject impossible requests. * * For I/O port requests behind a bridge with the ISA enable * bit set, force large allocations to start above 64k. */ if (!w->valid) return (EINVAL); if (sc->bridgectl & PCIB_BCR_ISA_ENABLE && count > 0x100 && start < 65536) start = 65536; if (end > w->rman.rm_end) end = w->rman.rm_end; if (start + count - 1 > end || start + count < start) return (EINVAL); wmask = ((rman_res_t)1 << w->step) - 1; /* * If there is no resource at all, just try to allocate enough * aligned space for this resource. */ if (w->res == NULL) { error = pcib_alloc_new_window(sc, w, type, start, end, count, flags); if (error) { if (bootverbose) device_printf(sc->dev, "failed to allocate initial %s window (%#jx-%#jx,%#jx)\n", w->name, start, end, count); return (error); } if (bootverbose) device_printf(sc->dev, "allocated initial %s window of %#jx-%#jx\n", w->name, (uintmax_t)w->base, (uintmax_t)w->limit); goto updatewin; } /* * See if growing the window would help. Compute the minimum * amount of address space needed on both the front and back * ends of the existing window to satisfy the allocation. * * For each end, build a candidate region adjusting for the * required alignment, etc. If there is a free region at the * edge of the window, grow from the inner edge of the free * region. Otherwise grow from the window boundary. * * Growing an I/O window below 64k for a bridge with the ISA * enable bit doesn't require any special magic as the step * size of an I/O window (1k) always includes multiple * non-alias ranges when it is grown in either direction. * * XXX: Special case: if w->res is completely empty and the * request size is larger than w->res, we should find the * optimal aligned buffer containing w->res and allocate that. */ if (bootverbose) device_printf(sc->dev, "attempting to grow %s window for (%#jx-%#jx,%#jx)\n", w->name, start, end, count); align = (rman_res_t)1 << RF_ALIGNMENT(flags); if (start < w->base) { if (rman_first_free_region(&w->rman, &start_free, &end_free) != 0 || start_free != w->base) end_free = w->base; if (end_free > end) end_free = end + 1; /* Move end_free down until it is properly aligned. */ end_free &= ~(align - 1); end_free--; front = end_free - (count - 1); /* * The resource would now be allocated at (front, * end_free). Ensure that fits in the (start, end) * bounds. end_free is checked above. If 'front' is * ok, ensure it is properly aligned for this window. * Also check for underflow. */ if (front >= start && front <= end_free) { if (bootverbose) printf("\tfront candidate range: %#jx-%#jx\n", front, end_free); front &= ~wmask; front = w->base - front; } else front = 0; } else front = 0; if (end > w->limit) { if (rman_last_free_region(&w->rman, &start_free, &end_free) != 0 || end_free != w->limit) start_free = w->limit + 1; if (start_free < start) start_free = start; /* Move start_free up until it is properly aligned. */ start_free = roundup2(start_free, align); back = start_free + count - 1; /* * The resource would now be allocated at (start_free, * back). Ensure that fits in the (start, end) * bounds. start_free is checked above. If 'back' is * ok, ensure it is properly aligned for this window. * Also check for overflow. */ if (back <= end && start_free <= back) { if (bootverbose) printf("\tback candidate range: %#jx-%#jx\n", start_free, back); back |= wmask; back -= w->limit; } else back = 0; } else back = 0; /* * Try to allocate the smallest needed region first. * If that fails, fall back to the other region. */ error = ENOSPC; while (front != 0 || back != 0) { if (front != 0 && (front <= back || back == 0)) { error = pcib_expand_window(sc, w, type, w->base - front, w->limit); if (error == 0) break; front = 0; } else { error = pcib_expand_window(sc, w, type, w->base, w->limit + back); if (error == 0) break; back = 0; } } if (error) return (error); if (bootverbose) device_printf(sc->dev, "grew %s window to %#jx-%#jx\n", w->name, (uintmax_t)w->base, (uintmax_t)w->limit); updatewin: /* Write the new window. */ KASSERT((w->base & wmask) == 0, ("start address is not aligned")); KASSERT((w->limit & wmask) == wmask, ("end address is not aligned")); pcib_write_windows(sc, w->mask); return (0); } /* * We have to trap resource allocation requests and ensure that the bridge * is set up to, or capable of handling them. */ static struct resource * pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct pcib_softc *sc; struct resource *r; sc = device_get_softc(dev); /* * VGA resources are decoded iff the VGA enable bit is set in * the bridge control register. VGA resources do not fall into * the resource windows and are passed up to the parent. */ if ((type == SYS_RES_IOPORT && pci_is_vga_ioport_range(start, end)) || (type == SYS_RES_MEMORY && pci_is_vga_memory_range(start, end))) { if (sc->bridgectl & PCIB_BCR_VGA_ENABLE) return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); else return (NULL); } switch (type) { #ifdef PCI_RES_BUS case PCI_RES_BUS: return (pcib_alloc_subbus(&sc->bus, child, rid, start, end, count, flags)); #endif case SYS_RES_IOPORT: if (pcib_is_isa_range(sc, start, end, count)) return (NULL); r = pcib_suballoc_resource(sc, &sc->io, child, type, rid, start, end, count, flags); if (r != NULL || (sc->flags & PCIB_SUBTRACTIVE) != 0) break; if (pcib_grow_window(sc, &sc->io, type, start, end, count, flags) == 0) r = pcib_suballoc_resource(sc, &sc->io, child, type, rid, start, end, count, flags); break; case SYS_RES_MEMORY: /* * For prefetchable resources, prefer the prefetchable * memory window, but fall back to the regular memory * window if that fails. Try both windows before * attempting to grow a window in case the firmware * has used a range in the regular memory window to * map a prefetchable BAR. */ if (flags & RF_PREFETCHABLE) { r = pcib_suballoc_resource(sc, &sc->pmem, child, type, rid, start, end, count, flags); if (r != NULL) break; } r = pcib_suballoc_resource(sc, &sc->mem, child, type, rid, start, end, count, flags); if (r != NULL || (sc->flags & PCIB_SUBTRACTIVE) != 0) break; if (flags & RF_PREFETCHABLE) { if (pcib_grow_window(sc, &sc->pmem, type, start, end, count, flags) == 0) { r = pcib_suballoc_resource(sc, &sc->pmem, child, type, rid, start, end, count, flags); if (r != NULL) break; } } if (pcib_grow_window(sc, &sc->mem, type, start, end, count, flags & ~RF_PREFETCHABLE) == 0) r = pcib_suballoc_resource(sc, &sc->mem, child, type, rid, start, end, count, flags); break; default: return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); } /* * If attempts to suballocate from the window fail but this is a * subtractive bridge, pass the request up the tree. */ if (sc->flags & PCIB_SUBTRACTIVE && r == NULL) return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); return (r); } static int -pcib_adjust_resource(device_t bus, device_t child, int type, struct resource *r, +pcib_adjust_resource(device_t bus, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct pcib_softc *sc; struct pcib_window *w; rman_res_t wmask; - int error; + int error, type; sc = device_get_softc(bus); + type = rman_get_type(r); /* * If the resource wasn't sub-allocated from one of our region * managers then just pass the request up. */ if (!pcib_is_resource_managed(sc, r)) - return (bus_generic_adjust_resource(bus, child, type, r, - start, end)); + return (bus_generic_adjust_resource(bus, child, r, start, end)); #ifdef PCI_RES_BUS if (type == PCI_RES_BUS) { /* * If our bus range isn't big enough to grow the sub-allocation * then we need to grow our bus range. Any request that would * require us to decrease the start of our own bus range is * invalid, we can only extend the end; ignore such requests * and let rman_adjust_resource fail below. */ if (start >= sc->bus.sec && end > sc->bus.sub) { error = pcib_grow_subbus(&sc->bus, end); if (error != 0) return (error); } } else #endif { /* * Resource is managed and not a secondary bus number, must * be from one of our windows. */ w = pcib_get_resource_window(sc, r); KASSERT(w != NULL, ("%s: no window for resource (%#jx-%#jx) type %d", __func__, rman_get_start(r), rman_get_end(r), type)); /* * If our window isn't big enough to grow the sub-allocation * then we need to expand the window. */ if (start < w->base || end > w->limit) { wmask = ((rman_res_t)1 << w->step) - 1; error = pcib_expand_window(sc, w, type, MIN(start & ~wmask, w->base), MAX(end | wmask, w->limit)); if (error != 0) return (error); if (bootverbose) device_printf(sc->dev, "grew %s window to %#jx-%#jx\n", w->name, (uintmax_t)w->base, (uintmax_t)w->limit); pcib_write_windows(sc, w->mask); } } return (rman_adjust_resource(r, start, end)); } static int pcib_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct pcib_softc *sc; int error; sc = device_get_softc(dev); if (pcib_is_resource_managed(sc, r)) { if (rman_get_flags(r) & RF_ACTIVE) { error = bus_deactivate_resource(child, type, rid, r); if (error) return (error); } return (rman_release_resource(r)); } return (bus_generic_release_resource(dev, child, type, rid, r)); } static int pcib_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct pcib_softc *sc = device_get_softc(dev); struct resource_map map; int error; if (!pcib_is_resource_managed(sc, r)) return (bus_generic_activate_resource(dev, child, type, rid, r)); error = rman_activate_resource(r); if (error != 0) return (error); if ((rman_get_flags(r) & RF_UNMAPPED) == 0 && (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT)) { error = BUS_MAP_RESOURCE(dev, child, type, r, NULL, &map); if (error != 0) { rman_deactivate_resource(r); return (error); } rman_set_mapping(r, &map); } return (0); } static int pcib_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct pcib_softc *sc = device_get_softc(dev); struct resource_map map; int error; if (!pcib_is_resource_managed(sc, r)) return (bus_generic_deactivate_resource(dev, child, type, rid, r)); error = rman_deactivate_resource(r); if (error != 0) return (error); if ((rman_get_flags(r) & RF_UNMAPPED) == 0 && (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT)) { rman_get_mapping(r, &map); BUS_UNMAP_RESOURCE(dev, child, type, r, &map); } return (0); } static struct resource * pcib_find_parent_resource(struct pcib_window *w, struct resource *r) { for (int i = 0; i < w->count; i++) { if (rman_get_start(w->res[i]) <= rman_get_start(r) && rman_get_end(w->res[i]) >= rman_get_end(r)) return (w->res[i]); } return (NULL); } static int pcib_map_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct pcib_softc *sc = device_get_softc(dev); struct resource_map_request args; struct pcib_window *w; struct resource *pres; rman_res_t length, start; int error; w = pcib_get_resource_window(sc, r); if (w == NULL) return (bus_generic_map_resource(dev, child, type, r, argsp, map)); /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); pres = pcib_find_parent_resource(w, r); if (pres == NULL) return (ENOENT); args.offset = start - rman_get_start(pres); args.length = length; return (bus_generic_map_resource(dev, child, type, pres, &args, map)); } static int pcib_unmap_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map *map) { struct pcib_softc *sc = device_get_softc(dev); struct pcib_window *w; w = pcib_get_resource_window(sc, r); if (w != NULL) { r = pcib_find_parent_resource(w, r); if (r == NULL) return (ENOENT); } return (bus_generic_unmap_resource(dev, child, type, r, map)); } #else /* * We have to trap resource allocation requests and ensure that the bridge * is set up to, or capable of handling them. */ static struct resource * pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct pcib_softc *sc = device_get_softc(dev); const char *name, *suffix; int ok; /* * Fail the allocation for this range if it's not supported. */ name = device_get_nameunit(child); if (name == NULL) { name = ""; suffix = ""; } else suffix = " "; switch (type) { case SYS_RES_IOPORT: ok = 0; if (!pcib_is_io_open(sc)) break; ok = (start >= sc->iobase && end <= sc->iolimit); /* * Make sure we allow access to VGA I/O addresses when the * bridge has the "VGA Enable" bit set. */ if (!ok && pci_is_vga_ioport_range(start, end)) ok = (sc->bridgectl & PCIB_BCR_VGA_ENABLE) ? 1 : 0; if ((sc->flags & PCIB_SUBTRACTIVE) == 0) { if (!ok) { if (start < sc->iobase) start = sc->iobase; if (end > sc->iolimit) end = sc->iolimit; if (start < end) ok = 1; } } else { ok = 1; #if 0 /* * If we overlap with the subtractive range, then * pick the upper range to use. */ if (start < sc->iolimit && end > sc->iobase) start = sc->iolimit + 1; #endif } if (end < start) { device_printf(dev, "ioport: end (%jx) < start (%jx)\n", end, start); start = 0; end = 0; ok = 0; } if (!ok) { device_printf(dev, "%s%srequested unsupported I/O " "range 0x%jx-0x%jx (decoding 0x%x-0x%x)\n", name, suffix, start, end, sc->iobase, sc->iolimit); return (NULL); } if (bootverbose) device_printf(dev, "%s%srequested I/O range 0x%jx-0x%jx: in range\n", name, suffix, start, end); break; case SYS_RES_MEMORY: ok = 0; if (pcib_is_nonprefetch_open(sc)) ok = ok || (start >= sc->membase && end <= sc->memlimit); if (pcib_is_prefetch_open(sc)) ok = ok || (start >= sc->pmembase && end <= sc->pmemlimit); /* * Make sure we allow access to VGA memory addresses when the * bridge has the "VGA Enable" bit set. */ if (!ok && pci_is_vga_memory_range(start, end)) ok = (sc->bridgectl & PCIB_BCR_VGA_ENABLE) ? 1 : 0; if ((sc->flags & PCIB_SUBTRACTIVE) == 0) { if (!ok) { ok = 1; if (flags & RF_PREFETCHABLE) { if (pcib_is_prefetch_open(sc)) { if (start < sc->pmembase) start = sc->pmembase; if (end > sc->pmemlimit) end = sc->pmemlimit; } else { ok = 0; } } else { /* non-prefetchable */ if (pcib_is_nonprefetch_open(sc)) { if (start < sc->membase) start = sc->membase; if (end > sc->memlimit) end = sc->memlimit; } else { ok = 0; } } } } else if (!ok) { ok = 1; /* subtractive bridge: always ok */ #if 0 if (pcib_is_nonprefetch_open(sc)) { if (start < sc->memlimit && end > sc->membase) start = sc->memlimit + 1; } if (pcib_is_prefetch_open(sc)) { if (start < sc->pmemlimit && end > sc->pmembase) start = sc->pmemlimit + 1; } #endif } if (end < start) { device_printf(dev, "memory: end (%jx) < start (%jx)\n", end, start); start = 0; end = 0; ok = 0; } if (!ok && bootverbose) device_printf(dev, "%s%srequested unsupported memory range %#jx-%#jx " "(decoding %#jx-%#jx, %#jx-%#jx)\n", name, suffix, start, end, (uintmax_t)sc->membase, (uintmax_t)sc->memlimit, (uintmax_t)sc->pmembase, (uintmax_t)sc->pmemlimit); if (!ok) return (NULL); if (bootverbose) device_printf(dev,"%s%srequested memory range " "0x%jx-0x%jx: good\n", name, suffix, start, end); break; default: break; } /* * Bridge is OK decoding this resource, so pass it up. */ return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); } #endif /* * If ARI is enabled on this downstream port, translate the function number * to the non-ARI slot/function. The downstream port will convert it back in * hardware. If ARI is not enabled slot and func are not modified. */ static __inline void pcib_xlate_ari(device_t pcib, int bus, int *slot, int *func) { struct pcib_softc *sc; int ari_func; sc = device_get_softc(pcib); ari_func = *func; if (sc->flags & PCIB_ENABLE_ARI) { KASSERT(*slot == 0, ("Non-zero slot number with ARI enabled!")); *slot = PCIE_ARI_SLOT(ari_func); *func = PCIE_ARI_FUNC(ari_func); } } static void pcib_enable_ari(struct pcib_softc *sc, uint32_t pcie_pos) { uint32_t ctl2; ctl2 = pci_read_config(sc->dev, pcie_pos + PCIER_DEVICE_CTL2, 4); ctl2 |= PCIEM_CTL2_ARI; pci_write_config(sc->dev, pcie_pos + PCIER_DEVICE_CTL2, ctl2, 4); sc->flags |= PCIB_ENABLE_ARI; } /* * PCIB interface. */ int pcib_maxslots(device_t dev) { #if !defined(__amd64__) && !defined(__i386__) uint32_t pcie_pos; uint16_t val; /* * If this is a PCIe rootport or downstream switch port, there's only * one slot permitted. */ if (pci_find_cap(dev, PCIY_EXPRESS, &pcie_pos) == 0) { val = pci_read_config(dev, pcie_pos + PCIER_FLAGS, 2); val &= PCIEM_FLAGS_TYPE; if (val == PCIEM_TYPE_ROOT_PORT || val == PCIEM_TYPE_DOWNSTREAM_PORT) return (0); } #endif return (PCI_SLOTMAX); } static int pcib_ari_maxslots(device_t dev) { struct pcib_softc *sc; sc = device_get_softc(dev); if (sc->flags & PCIB_ENABLE_ARI) return (PCIE_ARI_SLOTMAX); else return (pcib_maxslots(dev)); } static int pcib_ari_maxfuncs(device_t dev) { struct pcib_softc *sc; sc = device_get_softc(dev); if (sc->flags & PCIB_ENABLE_ARI) return (PCIE_ARI_FUNCMAX); else return (PCI_FUNCMAX); } static void pcib_ari_decode_rid(device_t pcib, uint16_t rid, int *bus, int *slot, int *func) { struct pcib_softc *sc; sc = device_get_softc(pcib); *bus = PCI_RID2BUS(rid); if (sc->flags & PCIB_ENABLE_ARI) { *slot = PCIE_ARI_RID2SLOT(rid); *func = PCIE_ARI_RID2FUNC(rid); } else { *slot = PCI_RID2SLOT(rid); *func = PCI_RID2FUNC(rid); } } /* * Since we are a child of a PCI bus, its parent must support the pcib interface. */ static uint32_t pcib_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width) { #ifdef PCI_HP struct pcib_softc *sc; sc = device_get_softc(dev); if (!pcib_present(sc)) { switch (width) { case 2: return (0xffff); case 1: return (0xff); default: return (0xffffffff); } } #endif pcib_xlate_ari(dev, b, &s, &f); return(PCIB_READ_CONFIG(device_get_parent(device_get_parent(dev)), b, s, f, reg, width)); } static void pcib_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, uint32_t val, int width) { #ifdef PCI_HP struct pcib_softc *sc; sc = device_get_softc(dev); if (!pcib_present(sc)) return; #endif pcib_xlate_ari(dev, b, &s, &f); PCIB_WRITE_CONFIG(device_get_parent(device_get_parent(dev)), b, s, f, reg, val, width); } /* * Route an interrupt across a PCI bridge. */ int pcib_route_interrupt(device_t pcib, device_t dev, int pin) { device_t bus; int parent_intpin; int intnum; /* * * The PCI standard defines a swizzle of the child-side device/intpin to * the parent-side intpin as follows. * * device = device on child bus * child_intpin = intpin on child bus slot (0-3) * parent_intpin = intpin on parent bus slot (0-3) * * parent_intpin = (device + child_intpin) % 4 */ parent_intpin = (pci_get_slot(dev) + (pin - 1)) % 4; /* * Our parent is a PCI bus. Its parent must export the pcib interface * which includes the ability to route interrupts. */ bus = device_get_parent(pcib); intnum = PCIB_ROUTE_INTERRUPT(device_get_parent(bus), pcib, parent_intpin + 1); if (PCI_INTERRUPT_VALID(intnum) && bootverbose) { device_printf(pcib, "slot %d INT%c is routed to irq %d\n", pci_get_slot(dev), 'A' + pin - 1, intnum); } return(intnum); } /* Pass request to alloc MSI/MSI-X messages up to the parent bridge. */ int pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs) { struct pcib_softc *sc = device_get_softc(pcib); device_t bus; if (sc->flags & PCIB_DISABLE_MSI) return (ENXIO); bus = device_get_parent(pcib); return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount, irqs)); } /* Pass request to release MSI/MSI-X messages up to the parent bridge. */ int pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs) { device_t bus; bus = device_get_parent(pcib); return (PCIB_RELEASE_MSI(device_get_parent(bus), dev, count, irqs)); } /* Pass request to alloc an MSI-X message up to the parent bridge. */ int pcib_alloc_msix(device_t pcib, device_t dev, int *irq) { struct pcib_softc *sc = device_get_softc(pcib); device_t bus; if (sc->flags & PCIB_DISABLE_MSIX) return (ENXIO); bus = device_get_parent(pcib); return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq)); } /* Pass request to release an MSI-X message up to the parent bridge. */ int pcib_release_msix(device_t pcib, device_t dev, int irq) { device_t bus; bus = device_get_parent(pcib); return (PCIB_RELEASE_MSIX(device_get_parent(bus), dev, irq)); } /* Pass request to map MSI/MSI-X message up to parent bridge. */ int pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data) { device_t bus; int error; bus = device_get_parent(pcib); error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data); if (error) return (error); pci_ht_map_msi(pcib, *addr); return (0); } /* Pass request for device power state up to parent bridge. */ int pcib_power_for_sleep(device_t pcib, device_t dev, int *pstate) { device_t bus; bus = device_get_parent(pcib); return (PCIB_POWER_FOR_SLEEP(bus, dev, pstate)); } static int pcib_ari_enabled(device_t pcib) { struct pcib_softc *sc; sc = device_get_softc(pcib); return ((sc->flags & PCIB_ENABLE_ARI) != 0); } static int pcib_ari_get_id(device_t pcib, device_t dev, enum pci_id_type type, uintptr_t *id) { struct pcib_softc *sc; device_t bus_dev; uint8_t bus, slot, func; if (type != PCI_ID_RID) { bus_dev = device_get_parent(pcib); return (PCIB_GET_ID(device_get_parent(bus_dev), dev, type, id)); } sc = device_get_softc(pcib); if (sc->flags & PCIB_ENABLE_ARI) { bus = pci_get_bus(dev); func = pci_get_function(dev); *id = (PCI_ARI_RID(bus, func)); } else { bus = pci_get_bus(dev); slot = pci_get_slot(dev); func = pci_get_function(dev); *id = (PCI_RID(bus, slot, func)); } return (0); } /* * Check that the downstream port (pcib) and the endpoint device (dev) both * support ARI. If so, enable it and return 0, otherwise return an error. */ static int pcib_try_enable_ari(device_t pcib, device_t dev) { struct pcib_softc *sc; int error; uint32_t cap2; int ari_cap_off; uint32_t ari_ver; uint32_t pcie_pos; sc = device_get_softc(pcib); /* * ARI is controlled in a register in the PCIe capability structure. * If the downstream port does not have the PCIe capability structure * then it does not support ARI. */ error = pci_find_cap(pcib, PCIY_EXPRESS, &pcie_pos); if (error != 0) return (ENODEV); /* Check that the PCIe port advertises ARI support. */ cap2 = pci_read_config(pcib, pcie_pos + PCIER_DEVICE_CAP2, 4); if (!(cap2 & PCIEM_CAP2_ARI)) return (ENODEV); /* * Check that the endpoint device advertises ARI support via the ARI * extended capability structure. */ error = pci_find_extcap(dev, PCIZ_ARI, &ari_cap_off); if (error != 0) return (ENODEV); /* * Finally, check that the endpoint device supports the same version * of ARI that we do. */ ari_ver = pci_read_config(dev, ari_cap_off, 4); if (PCI_EXTCAP_VER(ari_ver) != PCIB_SUPPORTED_ARI_VER) { if (bootverbose) device_printf(pcib, "Unsupported version of ARI (%d) detected\n", PCI_EXTCAP_VER(ari_ver)); return (ENXIO); } pcib_enable_ari(sc, pcie_pos); return (0); } int pcib_request_feature_allow(device_t pcib, device_t dev, enum pci_feature feature) { /* * No host firmware we have to negotiate with, so we allow * every valid feature requested. */ switch (feature) { case PCI_FEATURE_AER: case PCI_FEATURE_HP: break; default: return (EINVAL); } return (0); } int pcib_request_feature(device_t dev, enum pci_feature feature) { /* * Invoke PCIB_REQUEST_FEATURE of this bridge first in case * the firmware overrides the method of PCI-PCI bridges. */ return (PCIB_REQUEST_FEATURE(dev, dev, feature)); } /* * Pass the request to use this PCI feature up the tree. Either there's a * firmware like ACPI that's using this feature that will approve (or deny) the * request to take it over, or the platform has no such firmware, in which case * the request will be approved. If the request is approved, the OS is expected * to make use of the feature or render it harmless. */ static int pcib_request_feature_default(device_t pcib, device_t dev, enum pci_feature feature) { device_t bus; /* * Our parent is necessarily a pci bus. Its parent will either be * another pci bridge (which passes it up) or a host bridge that can * approve or reject the request. */ bus = device_get_parent(pcib); return (PCIB_REQUEST_FEATURE(device_get_parent(bus), dev, feature)); } static int pcib_reset_child(device_t dev, device_t child, int flags) { struct pci_devinfo *pdinfo; int error; error = 0; if (dev == NULL || device_get_parent(child) != dev) goto out; error = ENXIO; if (device_get_devclass(child) != devclass_find("pci")) goto out; pdinfo = device_get_ivars(dev); if (pdinfo->cfg.pcie.pcie_location != 0 && (pdinfo->cfg.pcie.pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT || pdinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT)) { error = bus_helper_reset_prepare(child, flags); if (error == 0) { error = pcie_link_reset(dev, pdinfo->cfg.pcie.pcie_location); /* XXXKIB call _post even if error != 0 ? */ bus_helper_reset_post(child, flags); } } out: return (error); } diff --git a/sys/dev/pci/pci_subr.c b/sys/dev/pci/pci_subr.c index e2583a75e303..4be3e3f166eb 100644 --- a/sys/dev/pci/pci_subr.c +++ b/sys/dev/pci/pci_subr.c @@ -1,417 +1,417 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2011 Hudson River Trading LLC * Written by: John H. Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Support APIs for Host to PCI bridge drivers and drivers that * provide PCI domains. */ #include #include #include #include #include #include #include #include /* * Try to read the bus number of a host-PCI bridge using appropriate config * registers. */ int host_pcib_get_busno(pci_read_config_fn read_config, int bus, int slot, int func, uint8_t *busnum) { uint32_t id; id = read_config(0, bus, slot, func, PCIR_DEVVENDOR, 4); if (id == 0xffffffff) return (0); switch (id) { case 0x12258086: /* Intel 824?? */ /* XXX This is a guess */ /* *busnum = read_config(0, bus, slot, func, 0x41, 1); */ *busnum = bus; break; case 0x84c48086: /* Intel 82454KX/GX (Orion) */ *busnum = read_config(0, bus, slot, func, 0x4a, 1); break; case 0x84ca8086: /* * For the 450nx chipset, there is a whole bundle of * things pretending to be host bridges. The MIOC will * be seen first and isn't really a pci bridge (the * actual buses are attached to the PXB's). We need to * read the registers of the MIOC to figure out the * bus numbers for the PXB channels. * * Since the MIOC doesn't have a pci bus attached, we * pretend it wasn't there. */ return (0); case 0x84cb8086: switch (slot) { case 0x12: /* Intel 82454NX PXB#0, Bus#A */ *busnum = read_config(0, bus, 0x10, func, 0xd0, 1); break; case 0x13: /* Intel 82454NX PXB#0, Bus#B */ *busnum = read_config(0, bus, 0x10, func, 0xd1, 1) + 1; break; case 0x14: /* Intel 82454NX PXB#1, Bus#A */ *busnum = read_config(0, bus, 0x10, func, 0xd3, 1); break; case 0x15: /* Intel 82454NX PXB#1, Bus#B */ *busnum = read_config(0, bus, 0x10, func, 0xd4, 1) + 1; break; } break; /* ServerWorks -- vendor 0x1166 */ case 0x00051166: case 0x00061166: case 0x00081166: case 0x00091166: case 0x00101166: case 0x00111166: case 0x00171166: case 0x01011166: case 0x010f1014: case 0x01101166: case 0x02011166: case 0x02251166: case 0x03021014: *busnum = read_config(0, bus, slot, func, 0x44, 1); break; /* Compaq/HP -- vendor 0x0e11 */ case 0x60100e11: *busnum = read_config(0, bus, slot, func, 0xc8, 1); break; default: /* Don't know how to read bus number. */ return 0; } return 1; } #ifdef NEW_PCIB /* * Return a pointer to a pretty name for a PCI device. If the device * has a driver attached, the device's name is used, otherwise a name * is generated from the device's PCI address. */ const char * pcib_child_name(device_t child) { static char buf[64]; if (device_get_nameunit(child) != NULL) return (device_get_nameunit(child)); snprintf(buf, sizeof(buf), "pci%d:%d:%d:%d", pci_get_domain(child), pci_get_bus(child), pci_get_slot(child), pci_get_function(child)); return (buf); } /* * Some Host-PCI bridge drivers know which resource ranges they can * decode and should only allocate subranges to child PCI devices. * This API provides a way to manage this. The bridge driver should * initialize this structure during attach and call * pcib_host_res_decodes() on each resource range it decodes. It can * then use pcib_host_res_alloc() and pcib_host_res_adjust() as helper * routines for BUS_ALLOC_RESOURCE() and BUS_ADJUST_RESOURCE(). This * API assumes that resources for any decoded ranges can be safely * allocated from the parent via bus_generic_alloc_resource(). */ int pcib_host_res_init(device_t pcib, struct pcib_host_resources *hr) { hr->hr_pcib = pcib; resource_list_init(&hr->hr_rl); return (0); } int pcib_host_res_free(device_t pcib, struct pcib_host_resources *hr) { resource_list_free(&hr->hr_rl); return (0); } int pcib_host_res_decodes(struct pcib_host_resources *hr, int type, rman_res_t start, rman_res_t end, u_int flags) { struct resource_list_entry *rle; int rid; if (bootverbose) device_printf(hr->hr_pcib, "decoding %d %srange %#jx-%#jx\n", type, flags & RF_PREFETCHABLE ? "prefetchable ": "", start, end); rid = resource_list_add_next(&hr->hr_rl, type, start, end, end - start + 1); if (flags & RF_PREFETCHABLE) { KASSERT(type == SYS_RES_MEMORY, ("only memory is prefetchable")); rle = resource_list_find(&hr->hr_rl, type, rid); rle->flags = RLE_PREFETCH; } return (0); } struct resource * pcib_host_res_alloc(struct pcib_host_resources *hr, device_t dev, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list_entry *rle; struct resource *r; rman_res_t new_start, new_end; if (flags & RF_PREFETCHABLE) KASSERT(type == SYS_RES_MEMORY, ("only memory is prefetchable")); rle = resource_list_find(&hr->hr_rl, type, 0); if (rle == NULL) { /* * No decoding ranges for this resource type, just pass * the request up to the parent. */ return (bus_generic_alloc_resource(hr->hr_pcib, dev, type, rid, start, end, count, flags)); } restart: /* Try to allocate from each decoded range. */ for (; rle != NULL; rle = STAILQ_NEXT(rle, link)) { if (rle->type != type) continue; if (((flags & RF_PREFETCHABLE) != 0) != ((rle->flags & RLE_PREFETCH) != 0)) continue; new_start = ummax(start, rle->start); new_end = ummin(end, rle->end); if (new_start > new_end || new_start + count - 1 > new_end || new_start + count < new_start) continue; r = bus_generic_alloc_resource(hr->hr_pcib, dev, type, rid, new_start, new_end, count, flags); if (r != NULL) { if (bootverbose) device_printf(hr->hr_pcib, "allocated type %d (%#jx-%#jx) for rid %x of %s\n", type, rman_get_start(r), rman_get_end(r), *rid, pcib_child_name(dev)); return (r); } } /* * If we failed to find a prefetch range for a memory * resource, try again without prefetch. */ if (flags & RF_PREFETCHABLE) { flags &= ~RF_PREFETCHABLE; rle = resource_list_find(&hr->hr_rl, type, 0); goto restart; } return (NULL); } int -pcib_host_res_adjust(struct pcib_host_resources *hr, device_t dev, int type, +pcib_host_res_adjust(struct pcib_host_resources *hr, device_t dev, struct resource *r, rman_res_t start, rman_res_t end) { struct resource_list_entry *rle; - rle = resource_list_find(&hr->hr_rl, type, 0); + rle = resource_list_find(&hr->hr_rl, rman_get_type(r), 0); if (rle == NULL) { /* * No decoding ranges for this resource type, just pass * the request up to the parent. */ - return (bus_generic_adjust_resource(hr->hr_pcib, dev, type, r, - start, end)); + return (bus_generic_adjust_resource(hr->hr_pcib, dev, r, start, + end)); } /* Only allow adjustments that stay within a decoded range. */ for (; rle != NULL; rle = STAILQ_NEXT(rle, link)) { if (rle->start <= start && rle->end >= end) return (bus_generic_adjust_resource(hr->hr_pcib, dev, - type, r, start, end)); + r, start, end)); } return (ERANGE); } #ifdef PCI_RES_BUS struct pci_domain { int pd_domain; struct rman pd_bus_rman; TAILQ_ENTRY(pci_domain) pd_link; }; static TAILQ_HEAD(, pci_domain) domains = TAILQ_HEAD_INITIALIZER(domains); /* * Each PCI domain maintains its own resource manager for PCI bus * numbers in that domain. Domain objects are created on first use. * Host to PCI bridge drivers and PCI-PCI bridge drivers should * allocate their bus ranges from their domain. */ static struct pci_domain * pci_find_domain(int domain) { struct pci_domain *d; char buf[64]; int error; TAILQ_FOREACH(d, &domains, pd_link) { if (d->pd_domain == domain) return (d); } snprintf(buf, sizeof(buf), "PCI domain %d bus numbers", domain); d = malloc(sizeof(*d) + strlen(buf) + 1, M_DEVBUF, M_WAITOK | M_ZERO); d->pd_domain = domain; d->pd_bus_rman.rm_start = 0; d->pd_bus_rman.rm_end = PCI_BUSMAX; d->pd_bus_rman.rm_type = RMAN_ARRAY; strcpy((char *)(d + 1), buf); d->pd_bus_rman.rm_descr = (char *)(d + 1); error = rman_init(&d->pd_bus_rman); if (error == 0) error = rman_manage_region(&d->pd_bus_rman, 0, PCI_BUSMAX); if (error) panic("Failed to initialize PCI domain %d rman", domain); TAILQ_INSERT_TAIL(&domains, d, pd_link); return (d); } struct resource * pci_domain_alloc_bus(int domain, device_t dev, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct pci_domain *d; struct resource *res; if (domain < 0 || domain > PCI_DOMAINMAX) return (NULL); d = pci_find_domain(domain); res = rman_reserve_resource(&d->pd_bus_rman, start, end, count, flags, dev); if (res == NULL) return (NULL); rman_set_rid(res, *rid); rman_set_type(res, PCI_RES_BUS); return (res); } int pci_domain_adjust_bus(int domain, device_t dev, struct resource *r, rman_res_t start, rman_res_t end) { #ifdef INVARIANTS struct pci_domain *d; #endif if (domain < 0 || domain > PCI_DOMAINMAX) return (EINVAL); #ifdef INVARIANTS d = pci_find_domain(domain); KASSERT(rman_is_region_manager(r, &d->pd_bus_rman), ("bad resource")); #endif return (rman_adjust_resource(r, start, end)); } int pci_domain_release_bus(int domain, device_t dev, int rid, struct resource *r) { #ifdef INVARIANTS struct pci_domain *d; #endif if (domain < 0 || domain > PCI_DOMAINMAX) return (EINVAL); #ifdef INVARIANTS d = pci_find_domain(domain); KASSERT(rman_is_region_manager(r, &d->pd_bus_rman), ("bad resource")); #endif return (rman_release_resource(r)); } int pci_domain_activate_bus(int domain, device_t dev, int rid, struct resource *r) { #ifdef INVARIANTS struct pci_domain *d; #endif if (domain < 0 || domain > PCI_DOMAINMAX) return (EINVAL); #ifdef INVARIANTS d = pci_find_domain(domain); KASSERT(rman_is_region_manager(r, &d->pd_bus_rman), ("bad resource")); #endif return (rman_activate_resource(r)); } int pci_domain_deactivate_bus(int domain, device_t dev, int rid, struct resource *r) { #ifdef INVARIANTS struct pci_domain *d; #endif if (domain < 0 || domain > PCI_DOMAINMAX) return (EINVAL); #ifdef INVARIANTS d = pci_find_domain(domain); KASSERT(rman_is_region_manager(r, &d->pd_bus_rman), ("bad resource")); #endif return (rman_deactivate_resource(r)); } #endif /* PCI_RES_BUS */ #endif /* NEW_PCIB */ diff --git a/sys/dev/pci/pcib_private.h b/sys/dev/pci/pcib_private.h index bc0b48d9f031..1f4f18d921e5 100644 --- a/sys/dev/pci/pcib_private.h +++ b/sys/dev/pci/pcib_private.h @@ -1,199 +1,199 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1994,1995 Stefan Esser, Wolfgang StanglMeier * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef __PCIB_PRIVATE_H__ #define __PCIB_PRIVATE_H__ #include #ifdef NEW_PCIB /* * Data structure and routines that Host to PCI bridge drivers can use * to restrict allocations for child devices to ranges decoded by the * bridge. */ struct pcib_host_resources { device_t hr_pcib; struct resource_list hr_rl; }; int pcib_host_res_init(device_t pcib, struct pcib_host_resources *hr); int pcib_host_res_free(device_t pcib, struct pcib_host_resources *hr); int pcib_host_res_decodes(struct pcib_host_resources *hr, int type, rman_res_t start, rman_res_t end, u_int flags); struct resource *pcib_host_res_alloc(struct pcib_host_resources *hr, device_t dev, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int pcib_host_res_adjust(struct pcib_host_resources *hr, - device_t dev, int type, struct resource *r, rman_res_t start, + device_t dev, struct resource *r, rman_res_t start, rman_res_t end); #endif /* * Export portions of generic PCI:PCI bridge support so that it can be * used by subclasses. */ DECLARE_CLASS(pcib_driver); #ifdef NEW_PCIB #define WIN_IO 0x1 #define WIN_MEM 0x2 #define WIN_PMEM 0x4 struct pcib_window { pci_addr_t base; /* base address */ pci_addr_t limit; /* topmost address */ struct rman rman; struct resource **res; int count; /* size of 'res' array */ int reg; /* resource id from parent */ int valid; int mask; /* WIN_* bitmask of this window */ int step; /* log_2 of window granularity */ const char *name; }; #endif struct pcib_secbus { u_int sec; u_int sub; #if defined(NEW_PCIB) && defined(PCI_RES_BUS) device_t dev; struct rman rman; struct resource *res; const char *name; int sub_reg; #endif }; /* * Bridge-specific data. */ struct pcib_softc { device_t dev; device_t child; uint32_t flags; /* flags */ #define PCIB_SUBTRACTIVE 0x1 #define PCIB_DISABLE_MSI 0x2 #define PCIB_DISABLE_MSIX 0x4 #define PCIB_ENABLE_ARI 0x8 #define PCIB_HOTPLUG 0x10 #define PCIB_HOTPLUG_CMD_PENDING 0x20 #define PCIB_DETACH_PENDING 0x40 #define PCIB_DETACHING 0x80 u_int domain; /* domain number */ u_int pribus; /* primary bus number */ struct pcib_secbus bus; /* secondary bus numbers */ #ifdef NEW_PCIB struct pcib_window io; /* I/O port window */ struct pcib_window mem; /* memory window */ struct pcib_window pmem; /* prefetchable memory window */ #else pci_addr_t pmembase; /* base address of prefetchable memory */ pci_addr_t pmemlimit; /* topmost address of prefetchable memory */ pci_addr_t membase; /* base address of memory window */ pci_addr_t memlimit; /* topmost address of memory window */ uint32_t iobase; /* base address of port window */ uint32_t iolimit; /* topmost address of port window */ #endif uint16_t bridgectl; /* bridge control register */ uint16_t pcie_link_sta; uint16_t pcie_slot_sta; uint32_t pcie_slot_cap; struct resource *pcie_mem; struct resource *pcie_irq; void *pcie_ihand; struct task pcie_hp_task; struct timeout_task pcie_ab_task; struct timeout_task pcie_cc_task; struct timeout_task pcie_dll_task; struct mtx *pcie_hp_lock; }; #define PCIB_HP_LOCK(sc) mtx_lock((sc)->pcie_hp_lock) #define PCIB_HP_UNLOCK(sc) mtx_unlock((sc)->pcie_hp_lock) #define PCIB_HP_LOCK_ASSERT(sc) mtx_assert((sc)->pcie_hp_lock, MA_OWNED) #define PCIB_SUPPORTED_ARI_VER 1 typedef uint32_t pci_read_config_fn(int d, int b, int s, int f, int reg, int width); int host_pcib_get_busno(pci_read_config_fn read_config, int bus, int slot, int func, uint8_t *busnum); #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct resource *pci_domain_alloc_bus(int domain, device_t dev, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int pci_domain_adjust_bus(int domain, device_t dev, struct resource *r, rman_res_t start, rman_res_t end); int pci_domain_release_bus(int domain, device_t dev, int rid, struct resource *r); int pci_domain_activate_bus(int domain, device_t dev, int rid, struct resource *r); int pci_domain_deactivate_bus(int domain, device_t dev, int rid, struct resource *r); struct resource *pcib_alloc_subbus(struct pcib_secbus *bus, device_t child, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); void pcib_free_secbus(device_t dev, struct pcib_secbus *bus); void pcib_setup_secbus(device_t dev, struct pcib_secbus *bus, int min_count); #endif int pcib_attach(device_t dev); int pcib_attach_child(device_t dev); void pcib_attach_common(device_t dev); void pcib_bridge_init(device_t dev); #ifdef NEW_PCIB const char *pcib_child_name(device_t child); #endif int pcib_detach(device_t dev); int pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); int pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value); int pcib_maxslots(device_t dev); int pcib_maxfuncs(device_t dev); int pcib_route_interrupt(device_t pcib, device_t dev, int pin); int pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs); int pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs); int pcib_alloc_msix(device_t pcib, device_t dev, int *irq); int pcib_release_msix(device_t pcib, device_t dev, int irq); int pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data); int pcib_get_id(device_t pcib, device_t dev, enum pci_id_type type, uintptr_t *id); void pcib_decode_rid(device_t pcib, uint16_t rid, int *bus, int *slot, int *func); int pcib_request_feature(device_t dev, enum pci_feature feature); int pcib_request_feature_allow(device_t pcib, device_t dev, enum pci_feature feature); #endif diff --git a/sys/dev/vmd/vmd.c b/sys/dev/vmd/vmd.c index d885cd15ac26..c258ef7a7047 100644 --- a/sys/dev/vmd/vmd.c +++ b/sys/dev/vmd/vmd.c @@ -1,761 +1,759 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Alexander Motin * Copyright 2019 Cisco Systems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" struct vmd_type { u_int16_t vmd_vid; u_int16_t vmd_did; char *vmd_name; int flags; #define BUS_RESTRICT 1 #define VECTOR_OFFSET 2 #define CAN_BYPASS_MSI 4 }; #define VMD_CAP 0x40 #define VMD_BUS_RESTRICT 0x1 #define VMD_CONFIG 0x44 #define VMD_BYPASS_MSI 0x2 #define VMD_BUS_START(x) ((x >> 8) & 0x3) #define VMD_LOCK 0x70 SYSCTL_NODE(_hw, OID_AUTO, vmd, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Intel Volume Management Device tuning parameters"); /* * By default all VMD devices remap children MSI/MSI-X interrupts into their * own. It creates additional isolation, but also complicates things due to * sharing, etc. Fortunately some VMD devices can bypass the remapping. */ static int vmd_bypass_msi = 1; SYSCTL_INT(_hw_vmd, OID_AUTO, bypass_msi, CTLFLAG_RWTUN, &vmd_bypass_msi, 0, "Bypass MSI remapping on capable hardware"); /* * All MSIs within a group share address, so VMD can't distinguish them. * It makes no sense to use more than one per device, only if required by * some specific device drivers. */ static int vmd_max_msi = 1; SYSCTL_INT(_hw_vmd, OID_AUTO, max_msi, CTLFLAG_RWTUN, &vmd_max_msi, 0, "Maximum number of MSI vectors per device"); /* * MSI-X can use different addresses, but we have limited number of MSI-X * we can route to, so use conservative default to try to avoid sharing. */ static int vmd_max_msix = 3; SYSCTL_INT(_hw_vmd, OID_AUTO, max_msix, CTLFLAG_RWTUN, &vmd_max_msix, 0, "Maximum number of MSI-X vectors per device"); static struct vmd_type vmd_devs[] = { { 0x8086, 0x201d, "Intel Volume Management Device", 0 }, { 0x8086, 0x28c0, "Intel Volume Management Device", BUS_RESTRICT | CAN_BYPASS_MSI }, { 0x8086, 0x467f, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET }, { 0x8086, 0x4c3d, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET }, { 0x8086, 0x7d0b, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET }, { 0x8086, 0x9a0b, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET }, { 0x8086, 0xa77f, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET }, { 0x8086, 0xad0b, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET }, { 0, 0, NULL, 0 } }; static int vmd_probe(device_t dev) { struct vmd_type *t; uint16_t vid, did; vid = pci_get_vendor(dev); did = pci_get_device(dev); for (t = vmd_devs; t->vmd_name != NULL; t++) { if (vid == t->vmd_vid && did == t->vmd_did) { device_set_desc(dev, t->vmd_name); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static void vmd_free(struct vmd_softc *sc) { struct vmd_irq *vi; struct vmd_irq_user *u; int i; if (sc->psc.bus.rman.rm_end != 0) rman_fini(&sc->psc.bus.rman); if (sc->psc.mem.rman.rm_end != 0) rman_fini(&sc->psc.mem.rman); while ((u = LIST_FIRST(&sc->vmd_users)) != NULL) { LIST_REMOVE(u, viu_link); free(u, M_DEVBUF); } if (sc->vmd_irq != NULL) { for (i = 0; i < sc->vmd_msix_count; i++) { vi = &sc->vmd_irq[i]; if (vi->vi_res == NULL) continue; bus_teardown_intr(sc->psc.dev, vi->vi_res, vi->vi_handle); bus_release_resource(sc->psc.dev, SYS_RES_IRQ, vi->vi_rid, vi->vi_res); } } free(sc->vmd_irq, M_DEVBUF); sc->vmd_irq = NULL; pci_release_msi(sc->psc.dev); for (i = 0; i < VMD_MAX_BAR; i++) { if (sc->vmd_regs_res[i] != NULL) bus_release_resource(sc->psc.dev, SYS_RES_MEMORY, sc->vmd_regs_rid[i], sc->vmd_regs_res[i]); } } /* Hidden PCI Roots are hidden in BAR(0). */ static uint32_t vmd_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width) { struct vmd_softc *sc; bus_addr_t offset; sc = device_get_softc(dev); if (b < sc->vmd_bus_start || b > sc->vmd_bus_end) return (0xffffffff); offset = ((b - sc->vmd_bus_start) << 20) + (s << 15) + (f << 12) + reg; switch (width) { case 4: return (bus_read_4(sc->vmd_regs_res[0], offset)); case 2: return (bus_read_2(sc->vmd_regs_res[0], offset)); case 1: return (bus_read_1(sc->vmd_regs_res[0], offset)); default: __assert_unreachable(); return (0xffffffff); } } static void vmd_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, uint32_t val, int width) { struct vmd_softc *sc; bus_addr_t offset; sc = device_get_softc(dev); if (b < sc->vmd_bus_start || b > sc->vmd_bus_end) return; offset = ((b - sc->vmd_bus_start) << 20) + (s << 15) + (f << 12) + reg; switch (width) { case 4: return (bus_write_4(sc->vmd_regs_res[0], offset, val)); case 2: return (bus_write_2(sc->vmd_regs_res[0], offset, val)); case 1: return (bus_write_1(sc->vmd_regs_res[0], offset, val)); default: __assert_unreachable(); } } static void vmd_set_msi_bypass(device_t dev, bool enable) { uint16_t val; val = pci_read_config(dev, VMD_CONFIG, 2); if (enable) val |= VMD_BYPASS_MSI; else val &= ~VMD_BYPASS_MSI; pci_write_config(dev, VMD_CONFIG, val, 2); } static int vmd_intr(void *arg) { /* * We have nothing to do here, but we have to register some interrupt * handler to make PCI code setup and enable the MSI-X vector. */ return (FILTER_STRAY); } static int vmd_attach(device_t dev) { struct vmd_softc *sc; struct pcib_secbus *bus; struct pcib_window *w; struct vmd_type *t; struct vmd_irq *vi; uint16_t vid, did; uint32_t bar; int i, j, error; char buf[64]; sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); sc->psc.dev = dev; sc->psc.domain = PCI_DOMAINMAX - device_get_unit(dev); pci_enable_busmaster(dev); for (i = 0, j = 0; i < VMD_MAX_BAR; i++, j++) { sc->vmd_regs_rid[i] = PCIR_BAR(j); bar = pci_read_config(dev, PCIR_BAR(0), 4); if (PCI_BAR_MEM(bar) && (bar & PCIM_BAR_MEM_TYPE) == PCIM_BAR_MEM_64) j++; if ((sc->vmd_regs_res[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->vmd_regs_rid[i], RF_ACTIVE)) == NULL) { device_printf(dev, "Cannot allocate resources\n"); goto fail; } } vid = pci_get_vendor(dev); did = pci_get_device(dev); for (t = vmd_devs; t->vmd_name != NULL; t++) { if (vid == t->vmd_vid && did == t->vmd_did) break; } sc->vmd_bus_start = 0; if ((t->flags & BUS_RESTRICT) && (pci_read_config(dev, VMD_CAP, 2) & VMD_BUS_RESTRICT)) { switch (VMD_BUS_START(pci_read_config(dev, VMD_CONFIG, 2))) { case 0: sc->vmd_bus_start = 0; break; case 1: sc->vmd_bus_start = 128; break; case 2: sc->vmd_bus_start = 224; break; default: device_printf(dev, "Unknown bus offset\n"); goto fail; } } sc->vmd_bus_end = MIN(PCI_BUSMAX, sc->vmd_bus_start + (rman_get_size(sc->vmd_regs_res[0]) >> 20) - 1); bus = &sc->psc.bus; bus->sec = sc->vmd_bus_start; bus->sub = sc->vmd_bus_end; bus->dev = dev; bus->rman.rm_start = 0; bus->rman.rm_end = PCI_BUSMAX; bus->rman.rm_type = RMAN_ARRAY; snprintf(buf, sizeof(buf), "%s bus numbers", device_get_nameunit(dev)); bus->rman.rm_descr = strdup(buf, M_DEVBUF); error = rman_init(&bus->rman); if (error) { device_printf(dev, "Failed to initialize bus rman\n"); bus->rman.rm_end = 0; goto fail; } error = rman_manage_region(&bus->rman, sc->vmd_bus_start, sc->vmd_bus_end); if (error) { device_printf(dev, "Failed to add resource to bus rman\n"); goto fail; } w = &sc->psc.mem; w->rman.rm_type = RMAN_ARRAY; snprintf(buf, sizeof(buf), "%s memory window", device_get_nameunit(dev)); w->rman.rm_descr = strdup(buf, M_DEVBUF); error = rman_init(&w->rman); if (error) { device_printf(dev, "Failed to initialize memory rman\n"); w->rman.rm_end = 0; goto fail; } error = rman_manage_region(&w->rman, rman_get_start(sc->vmd_regs_res[1]), rman_get_end(sc->vmd_regs_res[1])); if (error) { device_printf(dev, "Failed to add resource to memory rman\n"); goto fail; } error = rman_manage_region(&w->rman, rman_get_start(sc->vmd_regs_res[2]) + 0x2000, rman_get_end(sc->vmd_regs_res[2])); if (error) { device_printf(dev, "Failed to add resource to memory rman\n"); goto fail; } LIST_INIT(&sc->vmd_users); sc->vmd_fist_vector = (t->flags & VECTOR_OFFSET) ? 1 : 0; sc->vmd_msix_count = pci_msix_count(dev); if (vmd_bypass_msi && (t->flags & CAN_BYPASS_MSI)) { sc->vmd_msix_count = 0; vmd_set_msi_bypass(dev, true); } else if (pci_alloc_msix(dev, &sc->vmd_msix_count) == 0) { sc->vmd_irq = malloc(sizeof(struct vmd_irq) * sc->vmd_msix_count, M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < sc->vmd_msix_count; i++) { vi = &sc->vmd_irq[i]; vi->vi_rid = i + 1; vi->vi_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &vi->vi_rid, RF_ACTIVE | RF_SHAREABLE); if (vi->vi_res == NULL) { device_printf(dev, "Failed to allocate irq\n"); goto fail; } vi->vi_irq = rman_get_start(vi->vi_res); if (bus_setup_intr(dev, vi->vi_res, INTR_TYPE_MISC | INTR_MPSAFE, vmd_intr, NULL, vi, &vi->vi_handle)) { device_printf(dev, "Can't set up interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, vi->vi_rid, vi->vi_res); vi->vi_res = NULL; goto fail; } } vmd_set_msi_bypass(dev, false); } sc->vmd_dma_tag = bus_get_dma_tag(dev); sc->psc.child = device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); fail: vmd_free(sc); return (ENXIO); } static int vmd_detach(device_t dev) { struct vmd_softc *sc = device_get_softc(dev); int error; error = bus_generic_detach(dev); if (error) return (error); error = device_delete_children(dev); if (error) return (error); if (sc->vmd_msix_count == 0) vmd_set_msi_bypass(dev, false); vmd_free(sc); return (0); } static bus_dma_tag_t vmd_get_dma_tag(device_t dev, device_t child) { struct vmd_softc *sc = device_get_softc(dev); return (sc->vmd_dma_tag); } static struct rman * vmd_get_rman(device_t dev, int type, u_int flags) { struct vmd_softc *sc = device_get_softc(dev); switch (type) { case SYS_RES_MEMORY: return (&sc->psc.mem.rman); case PCI_RES_BUS: return (&sc->psc.bus.rman); default: /* VMD hardware does not support I/O ports. */ return (NULL); } } static struct resource * vmd_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; if (type == SYS_RES_IRQ) { /* VMD hardware does not support legacy interrupts. */ if (*rid == 0) return (NULL); return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags | RF_SHAREABLE)); } res = bus_generic_rman_alloc_resource(dev, child, type, rid, start, end, count, flags); if (bootverbose && res != NULL) { switch (type) { case SYS_RES_MEMORY: device_printf(dev, "allocated memory range (%#jx-%#jx) for rid %d of %s\n", rman_get_start(res), rman_get_end(res), *rid, pcib_child_name(child)); break; case PCI_RES_BUS: device_printf(dev, "allocated bus range (%ju-%ju) for rid %d of %s\n", rman_get_start(res), rman_get_end(res), *rid, pcib_child_name(child)); break; } } return (res); } static int -vmd_adjust_resource(device_t dev, device_t child, int type, +vmd_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { - if (type == SYS_RES_IRQ) { - return (bus_generic_adjust_resource(dev, child, type, r, - start, end)); + if (rman_get_type(r) == SYS_RES_IRQ) { + return (bus_generic_adjust_resource(dev, child, r, start, end)); } - return (bus_generic_rman_adjust_resource(dev, child, type, r, start, - end)); + return (bus_generic_rman_adjust_resource(dev, child, r, start, end)); } static int vmd_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { if (type == SYS_RES_IRQ) { return (bus_generic_release_resource(dev, child, type, rid, r)); } return (bus_generic_rman_release_resource(dev, child, type, rid, r)); } static int vmd_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { if (type == SYS_RES_IRQ) { return (bus_generic_activate_resource(dev, child, type, rid, r)); } return (bus_generic_rman_activate_resource(dev, child, type, rid, r)); } static int vmd_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { if (type == SYS_RES_IRQ) { return (bus_generic_deactivate_resource(dev, child, type, rid, r)); } return (bus_generic_rman_deactivate_resource(dev, child, type, rid, r)); } static struct resource * vmd_find_parent_resource(struct vmd_softc *sc, struct resource *r) { for (int i = 1; i < 3; i++) { if (rman_get_start(sc->vmd_regs_res[i]) <= rman_get_start(r) && rman_get_end(sc->vmd_regs_res[i]) >= rman_get_end(r)) return (sc->vmd_regs_res[i]); } return (NULL); } static int vmd_map_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct vmd_softc *sc = device_get_softc(dev); struct resource_map_request args; struct resource *pres; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); pres = vmd_find_parent_resource(sc, r); if (pres == NULL) return (ENOENT); args.offset = start - rman_get_start(pres); args.length = length; return (bus_generic_map_resource(dev, child, type, pres, &args, map)); } static int vmd_unmap_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map *map) { struct vmd_softc *sc = device_get_softc(dev); r = vmd_find_parent_resource(sc, r); if (r == NULL) return (ENOENT); return (bus_generic_unmap_resource(dev, child, type, r, map)); } static int vmd_route_interrupt(device_t dev, device_t child, int pin) { /* VMD hardware does not support legacy interrupts. */ return (PCI_INVALID_IRQ); } static int vmd_alloc_msi(device_t dev, device_t child, int count, int maxcount, int *irqs) { struct vmd_softc *sc = device_get_softc(dev); struct vmd_irq_user *u; int i, ibest = 0, best = INT_MAX; if (sc->vmd_msix_count == 0) { return (PCIB_ALLOC_MSI(device_get_parent(device_get_parent(dev)), child, count, maxcount, irqs)); } if (count > vmd_max_msi) return (ENOSPC); LIST_FOREACH(u, &sc->vmd_users, viu_link) { if (u->viu_child == child) return (EBUSY); } for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) { if (best > sc->vmd_irq[i].vi_nusers) { best = sc->vmd_irq[i].vi_nusers; ibest = i; } } u = malloc(sizeof(*u), M_DEVBUF, M_WAITOK | M_ZERO); u->viu_child = child; u->viu_vector = ibest; LIST_INSERT_HEAD(&sc->vmd_users, u, viu_link); sc->vmd_irq[ibest].vi_nusers += count; for (i = 0; i < count; i++) irqs[i] = sc->vmd_irq[ibest].vi_irq; return (0); } static int vmd_release_msi(device_t dev, device_t child, int count, int *irqs) { struct vmd_softc *sc = device_get_softc(dev); struct vmd_irq_user *u; if (sc->vmd_msix_count == 0) { return (PCIB_RELEASE_MSI(device_get_parent(device_get_parent(dev)), child, count, irqs)); } LIST_FOREACH(u, &sc->vmd_users, viu_link) { if (u->viu_child == child) { sc->vmd_irq[u->viu_vector].vi_nusers -= count; LIST_REMOVE(u, viu_link); free(u, M_DEVBUF); return (0); } } return (EINVAL); } static int vmd_alloc_msix(device_t dev, device_t child, int *irq) { struct vmd_softc *sc = device_get_softc(dev); struct vmd_irq_user *u; int i, ibest = 0, best = INT_MAX; if (sc->vmd_msix_count == 0) { return (PCIB_ALLOC_MSIX(device_get_parent(device_get_parent(dev)), child, irq)); } i = 0; LIST_FOREACH(u, &sc->vmd_users, viu_link) { if (u->viu_child == child) i++; } if (i >= vmd_max_msix) return (ENOSPC); for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) { if (best > sc->vmd_irq[i].vi_nusers) { best = sc->vmd_irq[i].vi_nusers; ibest = i; } } u = malloc(sizeof(*u), M_DEVBUF, M_WAITOK | M_ZERO); u->viu_child = child; u->viu_vector = ibest; LIST_INSERT_HEAD(&sc->vmd_users, u, viu_link); sc->vmd_irq[ibest].vi_nusers++; *irq = sc->vmd_irq[ibest].vi_irq; return (0); } static int vmd_release_msix(device_t dev, device_t child, int irq) { struct vmd_softc *sc = device_get_softc(dev); struct vmd_irq_user *u; if (sc->vmd_msix_count == 0) { return (PCIB_RELEASE_MSIX(device_get_parent(device_get_parent(dev)), child, irq)); } LIST_FOREACH(u, &sc->vmd_users, viu_link) { if (u->viu_child == child && sc->vmd_irq[u->viu_vector].vi_irq == irq) { sc->vmd_irq[u->viu_vector].vi_nusers--; LIST_REMOVE(u, viu_link); free(u, M_DEVBUF); return (0); } } return (EINVAL); } static int vmd_map_msi(device_t dev, device_t child, int irq, uint64_t *addr, uint32_t *data) { struct vmd_softc *sc = device_get_softc(dev); int i; if (sc->vmd_msix_count == 0) { return (PCIB_MAP_MSI(device_get_parent(device_get_parent(dev)), child, irq, addr, data)); } for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) { if (sc->vmd_irq[i].vi_irq == irq) break; } if (i >= sc->vmd_msix_count) return (EINVAL); *addr = MSI_INTEL_ADDR_BASE | (i << 12); *data = 0; return (0); } static device_method_t vmd_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, vmd_probe), DEVMETHOD(device_attach, vmd_attach), DEVMETHOD(device_detach, vmd_detach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), DEVMETHOD(device_shutdown, bus_generic_shutdown), /* Bus interface */ DEVMETHOD(bus_get_dma_tag, vmd_get_dma_tag), DEVMETHOD(bus_get_rman, vmd_get_rman), DEVMETHOD(bus_read_ivar, pcib_read_ivar), DEVMETHOD(bus_write_ivar, pcib_write_ivar), DEVMETHOD(bus_alloc_resource, vmd_alloc_resource), DEVMETHOD(bus_adjust_resource, vmd_adjust_resource), DEVMETHOD(bus_release_resource, vmd_release_resource), DEVMETHOD(bus_activate_resource, vmd_activate_resource), DEVMETHOD(bus_deactivate_resource, vmd_deactivate_resource), DEVMETHOD(bus_map_resource, vmd_map_resource), DEVMETHOD(bus_unmap_resource, vmd_unmap_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, pcib_maxslots), DEVMETHOD(pcib_read_config, vmd_read_config), DEVMETHOD(pcib_write_config, vmd_write_config), DEVMETHOD(pcib_route_interrupt, vmd_route_interrupt), DEVMETHOD(pcib_alloc_msi, vmd_alloc_msi), DEVMETHOD(pcib_release_msi, vmd_release_msi), DEVMETHOD(pcib_alloc_msix, vmd_alloc_msix), DEVMETHOD(pcib_release_msix, vmd_release_msix), DEVMETHOD(pcib_map_msi, vmd_map_msi), DEVMETHOD(pcib_request_feature, pcib_request_feature_allow), DEVMETHOD_END }; DEFINE_CLASS_0(pcib, vmd_pci_driver, vmd_pci_methods, sizeof(struct vmd_softc)); DRIVER_MODULE(vmd, pci, vmd_pci_driver, NULL, NULL); MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, vmd, vmd_devs, nitems(vmd_devs) - 1); diff --git a/sys/kern/bus_if.m b/sys/kern/bus_if.m index 7078683911b8..497b98ca4601 100644 --- a/sys/kern/bus_if.m +++ b/sys/kern/bus_if.m @@ -1,985 +1,983 @@ #- # Copyright (c) 1998-2004 Doug Rabson # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # # #include #include #include /** * @defgroup BUS bus - KObj methods for drivers of devices with children * @brief A set of methods required device drivers that support * child devices. * @{ */ INTERFACE bus; # # Default implementations of some methods. # CODE { static struct resource * null_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { return (0); } static int null_remap_intr(device_t bus, device_t dev, u_int irq) { if (dev != NULL) return (BUS_REMAP_INTR(dev, NULL, irq)); return (ENXIO); } static device_t null_add_child(device_t bus, int order, const char *name, int unit) { panic("bus_add_child is not implemented"); } static int null_reset_post(device_t bus, device_t dev) { return (0); } static int null_reset_prepare(device_t bus, device_t dev) { return (0); } static struct rman * null_get_rman(device_t bus, int type, u_int flags) { return (NULL); } }; /** * @brief Print a description of a child device * * This is called from system code which prints out a description of a * device. It should describe the attachment that the child has with * the parent. For instance the TurboLaser bus prints which node the * device is attached to. See bus_generic_print_child() for more * information. * * @param _dev the device whose child is being printed * @param _child the child device to describe * * @returns the number of characters output. */ METHOD int print_child { device_t _dev; device_t _child; } DEFAULT bus_generic_print_child; /** * @brief Print a notification about an unprobed child device. * * Called for each child device that did not succeed in probing for a * driver. * * @param _dev the device whose child was being probed * @param _child the child device which failed to probe */ METHOD void probe_nomatch { device_t _dev; device_t _child; }; /** * @brief Read the value of a bus-specific attribute of a device * * This method, along with BUS_WRITE_IVAR() manages a bus-specific set * of instance variables of a child device. The intention is that * each different type of bus defines a set of appropriate instance * variables (such as ports and irqs for ISA bus etc.) * * This information could be given to the child device as a struct but * that makes it hard for a bus to add or remove variables without * forcing an edit and recompile for all drivers which may not be * possible for vendor supplied binary drivers. * * This method copies the value of an instance variable to the * location specified by @p *_result. * * @param _dev the device whose child was being examined * @param _child the child device whose instance variable is * being read * @param _index the instance variable to read * @param _result a location to receive the instance variable * value * * @retval 0 success * @retval ENOENT no such instance variable is supported by @p * _dev */ METHOD int read_ivar { device_t _dev; device_t _child; int _index; uintptr_t *_result; }; /** * @brief Write the value of a bus-specific attribute of a device * * This method sets the value of an instance variable to @p _value. * * @param _dev the device whose child was being updated * @param _child the child device whose instance variable is * being written * @param _index the instance variable to write * @param _value the value to write to that instance variable * * @retval 0 success * @retval ENOENT no such instance variable is supported by @p * _dev * @retval EINVAL the instance variable was recognised but * contains a read-only value */ METHOD int write_ivar { device_t _dev; device_t _child; int _indx; uintptr_t _value; }; /** * @brief Notify a bus that a child was deleted * * Called at the beginning of device_delete_child() to allow the parent * to teardown any bus-specific state for the child. * * @param _dev the device whose child is being deleted * @param _child the child device which is being deleted */ METHOD void child_deleted { device_t _dev; device_t _child; }; /** * @brief Notify a bus that a child was detached * * Called after the child's DEVICE_DETACH() method to allow the parent * to reclaim any resources allocated on behalf of the child. * * @param _dev the device whose child changed state * @param _child the child device which changed state */ METHOD void child_detached { device_t _dev; device_t _child; }; /** * @brief Notify a bus that a new driver was added * * Called when a new driver is added to the devclass which owns this * bus. The generic implementation of this method attempts to probe and * attach any un-matched children of the bus. * * @param _dev the device whose devclass had a new driver * added to it * @param _driver the new driver which was added */ METHOD void driver_added { device_t _dev; driver_t *_driver; } DEFAULT bus_generic_driver_added; /** * @brief Create a new child device * * For buses which use use drivers supporting DEVICE_IDENTIFY() to * enumerate their devices, this method is used to create new * device instances. The new device will be added after the last * existing child with the same order. Implementations of bus_add_child * call device_add_child_ordered to add the child and often add * a suitable ivar to the device specific to that bus. * * @param _dev the bus device which will be the parent of the * new child device * @param _order a value which is used to partially sort the * children of @p _dev - devices created using * lower values of @p _order appear first in @p * _dev's list of children * @param _name devclass name for new device or @c NULL if not * specified * @param _unit unit number for new device or @c -1 if not * specified */ METHOD device_t add_child { device_t _dev; u_int _order; const char *_name; int _unit; } DEFAULT null_add_child; /** * @brief Rescan the bus * * This method is called by a parent bridge or devctl to trigger a bus * rescan. The rescan should delete devices no longer present and * enumerate devices that have newly arrived. * * @param _dev the bus device */ METHOD int rescan { device_t _dev; } DEFAULT bus_null_rescan; /** * @brief Allocate a system resource * * This method is called by child devices of a bus to allocate resources. * The types are defined in ; the meaning of the * resource-ID field varies from bus to bus (but @p *rid == 0 is always * valid if the resource type is). If a resource was allocated and the * caller did not use the RF_ACTIVE to specify that it should be * activated immediately, the caller is responsible for calling * BUS_ACTIVATE_RESOURCE() when it actually uses the resource. * * @param _dev the parent device of @p _child * @param _child the device which is requesting an allocation * @param _type the type of resource to allocate * @param _rid a pointer to the resource identifier * @param _start hint at the start of the resource range - pass * @c 0 for any start address * @param _end hint at the end of the resource range - pass * @c ~0 for any end address * @param _count hint at the size of range required - pass @c 1 * for any size * @param _flags any extra flags to control the resource * allocation - see @c RF_XXX flags in * for details * * @returns the resource which was allocated or @c NULL if no * resource could be allocated */ METHOD struct resource * alloc_resource { device_t _dev; device_t _child; int _type; int *_rid; rman_res_t _start; rman_res_t _end; rman_res_t _count; u_int _flags; } DEFAULT null_alloc_resource; /** * @brief Activate a resource * * Activate a resource previously allocated with * BUS_ALLOC_RESOURCE(). This may enable decoding of this resource in a * device for instance. It will also establish a mapping for the resource * unless RF_UNMAPPED was set when allocating the resource. * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _type the type of resource * @param _rid the resource identifier * @param _r the resource to activate */ METHOD int activate_resource { device_t _dev; device_t _child; int _type; int _rid; struct resource *_r; }; /** * @brief Map a resource * * Allocate a mapping for a range of an active resource. The mapping * is described by a struct resource_map object. This may for instance * map a memory region into the kernel's virtual address space. * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _type the type of resource * @param _r the resource to map * @param _args optional attributes of the mapping * @param _map the mapping */ METHOD int map_resource { device_t _dev; device_t _child; int _type; struct resource *_r; struct resource_map_request *_args; struct resource_map *_map; } DEFAULT bus_generic_map_resource; /** * @brief Unmap a resource * * Release a mapping previously allocated with * BUS_MAP_RESOURCE(). This may for instance unmap a memory region * from the kernel's virtual address space. * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _type the type of resource * @param _r the resource * @param _map the mapping to release */ METHOD int unmap_resource { device_t _dev; device_t _child; int _type; struct resource *_r; struct resource_map *_map; } DEFAULT bus_generic_unmap_resource; /** * @brief Deactivate a resource * * Deactivate a resource previously allocated with * BUS_ALLOC_RESOURCE(). * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _type the type of resource * @param _rid the resource identifier * @param _r the resource to deactivate */ METHOD int deactivate_resource { device_t _dev; device_t _child; int _type; int _rid; struct resource *_r; }; /** * @brief Adjust a resource * * Adjust the start and/or end of a resource allocated by * BUS_ALLOC_RESOURCE. At least part of the new address range must overlap * with the existing address range. If the successful, the resource's range * will be adjusted to [start, end] on return. * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource - * @param _type the type of resource * @param _res the resource to adjust * @param _start the new starting address of the resource range * @param _end the new ending address of the resource range */ METHOD int adjust_resource { device_t _dev; device_t _child; - int _type; struct resource *_res; rman_res_t _start; rman_res_t _end; }; /** * @brief translate a resource value * * Give a bus driver the opportunity to translate resource ranges. If * successful, the host's view of the resource starting at @p _start is * returned in @p _newstart, otherwise an error is returned. * * @param _dev the device associated with the resource * @param _type the type of resource * @param _start the starting address of the resource range * @param _newstart the new starting address of the resource range */ METHOD int translate_resource { device_t _dev; int _type; rman_res_t _start; rman_res_t *_newstart; } DEFAULT bus_generic_translate_resource; /** * @brief Release a resource * * Free a resource allocated by the BUS_ALLOC_RESOURCE. The @p _rid * value must be the same as the one returned by BUS_ALLOC_RESOURCE() * (which is not necessarily the same as the one the client passed). * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _type the type of resource * @param _rid the resource identifier * @param _r the resource to release */ METHOD int release_resource { device_t _dev; device_t _child; int _type; int _rid; struct resource *_res; }; /** * @brief Install an interrupt handler * * This method is used to associate an interrupt handler function with * an irq resource. When the interrupt triggers, the function @p _intr * will be called with the value of @p _arg as its single * argument. The value returned in @p *_cookiep is used to cancel the * interrupt handler - the caller should save this value to use in a * future call to BUS_TEARDOWN_INTR(). * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _irq the resource representing the interrupt * @param _flags a set of bits from enum intr_type specifying * the class of interrupt * @param _intr the function to call when the interrupt * triggers * @param _arg a value to use as the single argument in calls * to @p _intr * @param _cookiep a pointer to a location to receive a cookie * value that may be used to remove the interrupt * handler */ METHOD int setup_intr { device_t _dev; device_t _child; struct resource *_irq; int _flags; driver_filter_t *_filter; driver_intr_t *_intr; void *_arg; void **_cookiep; }; /** * @brief Uninstall an interrupt handler * * This method is used to disassociate an interrupt handler function * with an irq resource. The value of @p _cookie must be the value * returned from a previous call to BUS_SETUP_INTR(). * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _irq the resource representing the interrupt * @param _cookie the cookie value returned when the interrupt * was originally registered */ METHOD int teardown_intr { device_t _dev; device_t _child; struct resource *_irq; void *_cookie; }; /** * @brief Suspend an interrupt handler * * This method is used to mark a handler as suspended in the case * that the associated device is powered down and cannot be a source * for the, typically shared, interrupt. * The value of @p _irq must be the interrupt resource passed * to a previous call to BUS_SETUP_INTR(). * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _irq the resource representing the interrupt */ METHOD int suspend_intr { device_t _dev; device_t _child; struct resource *_irq; } DEFAULT bus_generic_suspend_intr; /** * @brief Resume an interrupt handler * * This method is used to clear suspended state of a handler when * the associated device is powered up and can be an interrupt source * again. * The value of @p _irq must be the interrupt resource passed * to a previous call to BUS_SETUP_INTR(). * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _irq the resource representing the interrupt */ METHOD int resume_intr { device_t _dev; device_t _child; struct resource *_irq; } DEFAULT bus_generic_resume_intr; /** * @brief Define a resource which can be allocated with * BUS_ALLOC_RESOURCE(). * * This method is used by some buses (typically ISA) to allow a * driver to describe a resource range that it would like to * allocate. The resource defined by @p _type and @p _rid is defined * to start at @p _start and to include @p _count indices in its * range. * * @param _dev the parent device of @p _child * @param _child the device which owns the resource * @param _type the type of resource * @param _rid the resource identifier * @param _start the start of the resource range * @param _count the size of the resource range */ METHOD int set_resource { device_t _dev; device_t _child; int _type; int _rid; rman_res_t _start; rman_res_t _count; }; /** * @brief Describe a resource * * This method allows a driver to examine the range used for a given * resource without actually allocating it. * * @param _dev the parent device of @p _child * @param _child the device which owns the resource * @param _type the type of resource * @param _rid the resource identifier * @param _start the address of a location to receive the start * index of the resource range * @param _count the address of a location to receive the size * of the resource range */ METHOD int get_resource { device_t _dev; device_t _child; int _type; int _rid; rman_res_t *_startp; rman_res_t *_countp; }; /** * @brief Delete a resource. * * Use this to delete a resource (possibly one previously added with * BUS_SET_RESOURCE()). * * @param _dev the parent device of @p _child * @param _child the device which owns the resource * @param _type the type of resource * @param _rid the resource identifier */ METHOD void delete_resource { device_t _dev; device_t _child; int _type; int _rid; }; /** * @brief Return a struct resource_list. * * Used by drivers which use bus_generic_rl_alloc_resource() etc. to * implement their resource handling. It should return the resource * list of the given child device. * * @param _dev the parent device of @p _child * @param _child the device which owns the resource list */ METHOD struct resource_list * get_resource_list { device_t _dev; device_t _child; } DEFAULT bus_generic_get_resource_list; /** * @brief Return a struct rman. * * Used by drivers which use bus_generic_rman_alloc_resource() etc. to * implement their resource handling. It should return the resource * manager used for the given resource type. * * @param _dev the bus device * @param _type the resource type * @param _flags resource flags (@c RF_XXX flags in * ) */ METHOD struct rman * get_rman { device_t _dev; int _type; u_int _flags; } DEFAULT null_get_rman; /** * @brief Is the hardware described by @p _child still attached to the * system? * * This method should return 0 if the device is not present. It * should return -1 if it is present. Any errors in determining * should be returned as a normal errno value. Client drivers are to * assume that the device is present, even if there is an error * determining if it is there. Buses are to try to avoid returning * errors, but newcard will return an error if the device fails to * implement this method. * * @param _dev the parent device of @p _child * @param _child the device which is being examined */ METHOD int child_present { device_t _dev; device_t _child; } DEFAULT bus_generic_child_present; /** * @brief Returns the pnp info for this device. * * Return it as a string, appended to @p _sb * * The string must be formatted as a space-separated list of * name=value pairs. Names may only contain alphanumeric characters, * underscores ('_') and hyphens ('-'). Values can contain any * non-whitespace characters. Values containing whitespace can be * quoted with double quotes ('"'). Double quotes and backslashes in * quoted values can be escaped with backslashes ('\'). * * @param _dev the parent device of @p _child * @param _child the device which is being examined * @param _sb sbuf for results string */ METHOD int child_pnpinfo { device_t _dev; device_t _child; struct sbuf *_sb; } DEFAULT bus_generic_child_pnpinfo; /** * @brief Returns the location for this device. * * Return it as a string, appended to @p _sb * * The string must be formatted as a space-separated list of * name=value pairs. Names may only contain alphanumeric characters, * underscores ('_') and hyphens ('-'). Values can contain any * non-whitespace characters. Values containing whitespace can be * quoted with double quotes ('"'). Double quotes and backslashes in * quoted values can be escaped with backslashes ('\'). * * @param _dev the parent device of @p _child * @param _child the device which is being examined * @param _sb sbuf for results string */ METHOD int child_location { device_t _dev; device_t _child; struct sbuf *_sb; } DEFAULT bus_generic_child_location; /** * @brief Allow drivers to request that an interrupt be bound to a specific * CPU. * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _irq the resource representing the interrupt * @param _cpu the CPU to bind the interrupt to */ METHOD int bind_intr { device_t _dev; device_t _child; struct resource *_irq; int _cpu; } DEFAULT bus_generic_bind_intr; /** * @brief Allow (bus) drivers to specify the trigger mode and polarity * of the specified interrupt. * * @param _dev the bus device * @param _irq the interrupt number to modify * @param _trig the trigger mode required * @param _pol the interrupt polarity required */ METHOD int config_intr { device_t _dev; int _irq; enum intr_trigger _trig; enum intr_polarity _pol; } DEFAULT bus_generic_config_intr; /** * @brief Allow drivers to associate a description with an active * interrupt handler. * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _irq the resource representing the interrupt * @param _cookie the cookie value returned when the interrupt * was originally registered * @param _descr the description to associate with the interrupt */ METHOD int describe_intr { device_t _dev; device_t _child; struct resource *_irq; void *_cookie; const char *_descr; } DEFAULT bus_generic_describe_intr; /** * @brief Notify a (bus) driver about a child that the hints mechanism * believes it has discovered. * * The bus is responsible for then adding the child in the right order * and discovering other things about the child. The bus driver is * free to ignore this hint, to do special things, etc. It is all up * to the bus driver to interpret. * * This method is only called in response to the parent bus asking for * hinted devices to be enumerated. * * @param _dev the bus device * @param _dname the name of the device w/o unit numbers * @param _dunit the unit number of the device */ METHOD void hinted_child { device_t _dev; const char *_dname; int _dunit; }; /** * @brief Returns bus_dma_tag_t for use w/ devices on the bus. * * @param _dev the parent device of @p _child * @param _child the device to which the tag will belong */ METHOD bus_dma_tag_t get_dma_tag { device_t _dev; device_t _child; } DEFAULT bus_generic_get_dma_tag; /** * @brief Returns bus_space_tag_t for use w/ devices on the bus. * * @param _dev the parent device of @p _child * @param _child the device to which the tag will belong */ METHOD bus_space_tag_t get_bus_tag { device_t _dev; device_t _child; } DEFAULT bus_generic_get_bus_tag; /** * @brief Allow the bus to determine the unit number of a device. * * @param _dev the parent device of @p _child * @param _child the device whose unit is to be wired * @param _name the name of the device's new devclass * @param _unitp a pointer to the device's new unit value */ METHOD void hint_device_unit { device_t _dev; device_t _child; const char *_name; int *_unitp; }; /** * @brief Notify a bus that the bus pass level has been changed * * @param _dev the bus device */ METHOD void new_pass { device_t _dev; } DEFAULT bus_generic_new_pass; /** * @brief Notify a bus that specified child's IRQ should be remapped. * * @param _dev the bus device * @param _child the child device * @param _irq the irq number */ METHOD int remap_intr { device_t _dev; device_t _child; u_int _irq; } DEFAULT null_remap_intr; /** * @brief Suspend a given child * * @param _dev the parent device of @p _child * @param _child the device to suspend */ METHOD int suspend_child { device_t _dev; device_t _child; } DEFAULT bus_generic_suspend_child; /** * @brief Resume a given child * * @param _dev the parent device of @p _child * @param _child the device to resume */ METHOD int resume_child { device_t _dev; device_t _child; } DEFAULT bus_generic_resume_child; /** * @brief Get the VM domain handle for the given bus and child. * * @param _dev the bus device * @param _child the child device * @param _domain a pointer to the bus's domain handle identifier */ METHOD int get_domain { device_t _dev; device_t _child; int *_domain; } DEFAULT bus_generic_get_domain; /** * @brief Request a set of CPUs * * @param _dev the bus device * @param _child the child device * @param _op type of CPUs to request * @param _setsize the size of the set passed in _cpuset * @param _cpuset a pointer to a cpuset to receive the requested * set of CPUs */ METHOD int get_cpus { device_t _dev; device_t _child; enum cpu_sets _op; size_t _setsize; struct _cpuset *_cpuset; } DEFAULT bus_generic_get_cpus; /** * @brief Prepares the given child of the bus for reset * * Typically bus detaches or suspends children' drivers, and then * calls this method to save bus-specific information, for instance, * PCI config space, which is damaged by reset. * * The bus_helper_reset_prepare() helper is provided to ease * implementing bus reset methods. * * @param _dev the bus device * @param _child the child device */ METHOD int reset_prepare { device_t _dev; device_t _child; } DEFAULT null_reset_prepare; /** * @brief Restores the child operations after the reset * * The bus_helper_reset_post() helper is provided to ease * implementing bus reset methods. * * @param _dev the bus device * @param _child the child device */ METHOD int reset_post { device_t _dev; device_t _child; } DEFAULT null_reset_post; /** * @brief Performs reset of the child * * @param _dev the bus device * @param _child the child device * @param _flags DEVF_RESET_ flags */ METHOD int reset_child { device_t _dev; device_t _child; int _flags; }; /** * @brief Gets child's specific property * * The bus_get_property can be used to access device * specific properties stored on the bus. If _propvalue * is NULL or _size is 0, then method only returns size * of the property. * * @param _dev the bus device * @param _child the child device * @param _propname property name * @param _propvalue property value destination * @param _size property value size * * @returns size of property if successful otherwise -1 */ METHOD ssize_t get_property { device_t _dev; device_t _child; const char *_propname; void *_propvalue; size_t _size; device_property_type_t type; } DEFAULT bus_generic_get_property; /** * @brief Gets a child's full path to the device * * The get_device_path method retrieves a device's * full path to the device using one of several * locators present in the system. * * @param _bus the bus device * @param _child the child device * @param _locator locator name * @param _sb buffer loaction string */ METHOD int get_device_path { device_t _bus; device_t _child; const char *_locator; struct sbuf *_sb; } DEFAULT bus_generic_get_device_path; diff --git a/sys/kern/subr_bus.c b/sys/kern/subr_bus.c index 25cb5fba2108..33d7b1e4af88 100644 --- a/sys/kern/subr_bus.c +++ b/sys/kern/subr_bus.c @@ -1,6113 +1,6112 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1997,1998,2003 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_bus.h" #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include SYSCTL_NODE(_hw, OID_AUTO, bus, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, NULL); SYSCTL_ROOT_NODE(OID_AUTO, dev, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, NULL); static bool disable_failed_devs = false; SYSCTL_BOOL(_hw_bus, OID_AUTO, disable_failed_devices, CTLFLAG_RWTUN, &disable_failed_devs, 0, "Do not retry attaching devices that return an error from DEVICE_ATTACH the first time"); /* * Used to attach drivers to devclasses. */ typedef struct driverlink *driverlink_t; struct driverlink { kobj_class_t driver; TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ int pass; int flags; #define DL_DEFERRED_PROBE 1 /* Probe deferred on this */ TAILQ_ENTRY(driverlink) passlink; }; /* * Forward declarations */ typedef TAILQ_HEAD(devclass_list, devclass) devclass_list_t; typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; typedef TAILQ_HEAD(device_list, _device) device_list_t; struct devclass { TAILQ_ENTRY(devclass) link; devclass_t parent; /* parent in devclass hierarchy */ driver_list_t drivers; /* bus devclasses store drivers for bus */ char *name; device_t *devices; /* array of devices indexed by unit */ int maxunit; /* size of devices array */ int flags; #define DC_HAS_CHILDREN 1 struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; }; /** * @brief Implementation of _device. * * The structure is named "_device" instead of "device" to avoid type confusion * caused by other subsystems defining a (struct device). */ struct _device { /* * A device is a kernel object. The first field must be the * current ops table for the object. */ KOBJ_FIELDS; /* * Device hierarchy. */ TAILQ_ENTRY(_device) link; /**< list of devices in parent */ TAILQ_ENTRY(_device) devlink; /**< global device list membership */ device_t parent; /**< parent of this device */ device_list_t children; /**< list of child devices */ /* * Details of this device. */ driver_t *driver; /**< current driver */ devclass_t devclass; /**< current device class */ int unit; /**< current unit number */ char* nameunit; /**< name+unit e.g. foodev0 */ char* desc; /**< driver specific description */ u_int busy; /**< count of calls to device_busy() */ device_state_t state; /**< current device state */ uint32_t devflags; /**< api level flags for device_get_flags() */ u_int flags; /**< internal device flags */ u_int order; /**< order from device_add_child_ordered() */ void *ivars; /**< instance variables */ void *softc; /**< current driver's variables */ struct sysctl_ctx_list sysctl_ctx; /**< state for sysctl variables */ struct sysctl_oid *sysctl_tree; /**< state for sysctl variables */ }; static MALLOC_DEFINE(M_BUS, "bus", "Bus data structures"); static MALLOC_DEFINE(M_BUS_SC, "bus-sc", "Bus data structures, softc"); EVENTHANDLER_LIST_DEFINE(device_attach); EVENTHANDLER_LIST_DEFINE(device_detach); EVENTHANDLER_LIST_DEFINE(device_nomatch); EVENTHANDLER_LIST_DEFINE(dev_lookup); static void devctl2_init(void); static bool device_frozen; #define DRIVERNAME(d) ((d)? d->name : "no driver") #define DEVCLANAME(d) ((d)? d->name : "no devclass") #ifdef BUS_DEBUG static int bus_debug = 1; SYSCTL_INT(_debug, OID_AUTO, bus_debug, CTLFLAG_RWTUN, &bus_debug, 0, "Bus debug level"); #define PDEBUG(a) if (bus_debug) {printf("%s:%d: ", __func__, __LINE__), printf a; printf("\n");} #define DEVICENAME(d) ((d)? device_get_name(d): "no device") /** * Produce the indenting, indent*2 spaces plus a '.' ahead of that to * prevent syslog from deleting initial spaces */ #define indentprintf(p) do { int iJ; printf("."); for (iJ=0; iJparent ? dc->parent->name : ""; break; default: return (EINVAL); } return (SYSCTL_OUT_STR(req, value)); } static void devclass_sysctl_init(devclass_t dc) { if (dc->sysctl_tree != NULL) return; sysctl_ctx_init(&dc->sysctl_ctx); dc->sysctl_tree = SYSCTL_ADD_NODE(&dc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev), OID_AUTO, dc->name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); SYSCTL_ADD_PROC(&dc->sysctl_ctx, SYSCTL_CHILDREN(dc->sysctl_tree), OID_AUTO, "%parent", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dc, DEVCLASS_SYSCTL_PARENT, devclass_sysctl_handler, "A", "parent class"); } enum { DEVICE_SYSCTL_DESC, DEVICE_SYSCTL_DRIVER, DEVICE_SYSCTL_LOCATION, DEVICE_SYSCTL_PNPINFO, DEVICE_SYSCTL_PARENT, }; static int device_sysctl_handler(SYSCTL_HANDLER_ARGS) { struct sbuf sb; device_t dev = (device_t)arg1; int error; sbuf_new_for_sysctl(&sb, NULL, 1024, req); sbuf_clear_flags(&sb, SBUF_INCLUDENUL); bus_topo_lock(); switch (arg2) { case DEVICE_SYSCTL_DESC: sbuf_cat(&sb, dev->desc ? dev->desc : ""); break; case DEVICE_SYSCTL_DRIVER: sbuf_cat(&sb, dev->driver ? dev->driver->name : ""); break; case DEVICE_SYSCTL_LOCATION: bus_child_location(dev, &sb); break; case DEVICE_SYSCTL_PNPINFO: bus_child_pnpinfo(dev, &sb); break; case DEVICE_SYSCTL_PARENT: sbuf_cat(&sb, dev->parent ? dev->parent->nameunit : ""); break; default: error = EINVAL; goto out; } error = sbuf_finish(&sb); out: bus_topo_unlock(); sbuf_delete(&sb); return (error); } static void device_sysctl_init(device_t dev) { devclass_t dc = dev->devclass; int domain; if (dev->sysctl_tree != NULL) return; devclass_sysctl_init(dc); sysctl_ctx_init(&dev->sysctl_ctx); dev->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&dev->sysctl_ctx, SYSCTL_CHILDREN(dc->sysctl_tree), OID_AUTO, dev->nameunit + strlen(dc->name), CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "", "device_index"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%desc", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, DEVICE_SYSCTL_DESC, device_sysctl_handler, "A", "device description"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%driver", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, DEVICE_SYSCTL_DRIVER, device_sysctl_handler, "A", "device driver name"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%location", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, DEVICE_SYSCTL_LOCATION, device_sysctl_handler, "A", "device location relative to parent"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%pnpinfo", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, DEVICE_SYSCTL_PNPINFO, device_sysctl_handler, "A", "device identification"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%parent", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, DEVICE_SYSCTL_PARENT, device_sysctl_handler, "A", "parent device"); if (bus_get_domain(dev, &domain) == 0) SYSCTL_ADD_INT(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%domain", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, domain, "NUMA domain"); } static void device_sysctl_update(device_t dev) { devclass_t dc = dev->devclass; if (dev->sysctl_tree == NULL) return; sysctl_rename_oid(dev->sysctl_tree, dev->nameunit + strlen(dc->name)); } static void device_sysctl_fini(device_t dev) { if (dev->sysctl_tree == NULL) return; sysctl_ctx_free(&dev->sysctl_ctx); dev->sysctl_tree = NULL; } static struct device_list bus_data_devices; static int bus_data_generation = 1; static kobj_method_t null_methods[] = { KOBJMETHOD_END }; DEFINE_CLASS(null, null_methods, 0); void bus_topo_assert(void) { GIANT_REQUIRED; } struct mtx * bus_topo_mtx(void) { return (&Giant); } void bus_topo_lock(void) { mtx_lock(bus_topo_mtx()); } void bus_topo_unlock(void) { mtx_unlock(bus_topo_mtx()); } /* * Bus pass implementation */ static driver_list_t passes = TAILQ_HEAD_INITIALIZER(passes); int bus_current_pass = BUS_PASS_ROOT; /** * @internal * @brief Register the pass level of a new driver attachment * * Register a new driver attachment's pass level. If no driver * attachment with the same pass level has been added, then @p new * will be added to the global passes list. * * @param new the new driver attachment */ static void driver_register_pass(struct driverlink *new) { struct driverlink *dl; /* We only consider pass numbers during boot. */ if (bus_current_pass == BUS_PASS_DEFAULT) return; /* * Walk the passes list. If we already know about this pass * then there is nothing to do. If we don't, then insert this * driver link into the list. */ TAILQ_FOREACH(dl, &passes, passlink) { if (dl->pass < new->pass) continue; if (dl->pass == new->pass) return; TAILQ_INSERT_BEFORE(dl, new, passlink); return; } TAILQ_INSERT_TAIL(&passes, new, passlink); } /** * @brief Raise the current bus pass * * Raise the current bus pass level to @p pass. Call the BUS_NEW_PASS() * method on the root bus to kick off a new device tree scan for each * new pass level that has at least one driver. */ void bus_set_pass(int pass) { struct driverlink *dl; if (bus_current_pass > pass) panic("Attempt to lower bus pass level"); TAILQ_FOREACH(dl, &passes, passlink) { /* Skip pass values below the current pass level. */ if (dl->pass <= bus_current_pass) continue; /* * Bail once we hit a driver with a pass level that is * too high. */ if (dl->pass > pass) break; /* * Raise the pass level to the next level and rescan * the tree. */ bus_current_pass = dl->pass; BUS_NEW_PASS(root_bus); } /* * If there isn't a driver registered for the requested pass, * then bus_current_pass might still be less than 'pass'. Set * it to 'pass' in that case. */ if (bus_current_pass < pass) bus_current_pass = pass; KASSERT(bus_current_pass == pass, ("Failed to update bus pass level")); } /* * Devclass implementation */ static devclass_list_t devclasses = TAILQ_HEAD_INITIALIZER(devclasses); /** * @internal * @brief Find or create a device class * * If a device class with the name @p classname exists, return it, * otherwise if @p create is non-zero create and return a new device * class. * * If @p parentname is non-NULL, the parent of the devclass is set to * the devclass of that name. * * @param classname the devclass name to find or create * @param parentname the parent devclass name or @c NULL * @param create non-zero to create a devclass */ static devclass_t devclass_find_internal(const char *classname, const char *parentname, int create) { devclass_t dc; PDEBUG(("looking for %s", classname)); if (!classname) return (NULL); TAILQ_FOREACH(dc, &devclasses, link) { if (!strcmp(dc->name, classname)) break; } if (create && !dc) { PDEBUG(("creating %s", classname)); dc = malloc(sizeof(struct devclass) + strlen(classname) + 1, M_BUS, M_NOWAIT | M_ZERO); if (!dc) return (NULL); dc->parent = NULL; dc->name = (char*) (dc + 1); strcpy(dc->name, classname); TAILQ_INIT(&dc->drivers); TAILQ_INSERT_TAIL(&devclasses, dc, link); bus_data_generation_update(); } /* * If a parent class is specified, then set that as our parent so * that this devclass will support drivers for the parent class as * well. If the parent class has the same name don't do this though * as it creates a cycle that can trigger an infinite loop in * device_probe_child() if a device exists for which there is no * suitable driver. */ if (parentname && dc && !dc->parent && strcmp(classname, parentname) != 0) { dc->parent = devclass_find_internal(parentname, NULL, TRUE); dc->parent->flags |= DC_HAS_CHILDREN; } return (dc); } /** * @brief Create a device class * * If a device class with the name @p classname exists, return it, * otherwise create and return a new device class. * * @param classname the devclass name to find or create */ devclass_t devclass_create(const char *classname) { return (devclass_find_internal(classname, NULL, TRUE)); } /** * @brief Find a device class * * If a device class with the name @p classname exists, return it, * otherwise return @c NULL. * * @param classname the devclass name to find */ devclass_t devclass_find(const char *classname) { return (devclass_find_internal(classname, NULL, FALSE)); } /** * @brief Register that a device driver has been added to a devclass * * Register that a device driver has been added to a devclass. This * is called by devclass_add_driver to accomplish the recursive * notification of all the children classes of dc, as well as dc. * Each layer will have BUS_DRIVER_ADDED() called for all instances of * the devclass. * * We do a full search here of the devclass list at each iteration * level to save storing children-lists in the devclass structure. If * we ever move beyond a few dozen devices doing this, we may need to * reevaluate... * * @param dc the devclass to edit * @param driver the driver that was just added */ static void devclass_driver_added(devclass_t dc, driver_t *driver) { devclass_t parent; int i; /* * Call BUS_DRIVER_ADDED for any existing buses in this class. */ for (i = 0; i < dc->maxunit; i++) if (dc->devices[i] && device_is_attached(dc->devices[i])) BUS_DRIVER_ADDED(dc->devices[i], driver); /* * Walk through the children classes. Since we only keep a * single parent pointer around, we walk the entire list of * devclasses looking for children. We set the * DC_HAS_CHILDREN flag when a child devclass is created on * the parent, so we only walk the list for those devclasses * that have children. */ if (!(dc->flags & DC_HAS_CHILDREN)) return; parent = dc; TAILQ_FOREACH(dc, &devclasses, link) { if (dc->parent == parent) devclass_driver_added(dc, driver); } } static void device_handle_nomatch(device_t dev) { BUS_PROBE_NOMATCH(dev->parent, dev); EVENTHANDLER_DIRECT_INVOKE(device_nomatch, dev); dev->flags |= DF_DONENOMATCH; } /** * @brief Add a device driver to a device class * * Add a device driver to a devclass. This is normally called * automatically by DRIVER_MODULE(). The BUS_DRIVER_ADDED() method of * all devices in the devclass will be called to allow them to attempt * to re-probe any unmatched children. * * @param dc the devclass to edit * @param driver the driver to register */ int devclass_add_driver(devclass_t dc, driver_t *driver, int pass, devclass_t *dcp) { driverlink_t dl; devclass_t child_dc; const char *parentname; PDEBUG(("%s", DRIVERNAME(driver))); /* Don't allow invalid pass values. */ if (pass <= BUS_PASS_ROOT) return (EINVAL); dl = malloc(sizeof *dl, M_BUS, M_NOWAIT|M_ZERO); if (!dl) return (ENOMEM); /* * Compile the driver's methods. Also increase the reference count * so that the class doesn't get freed when the last instance * goes. This means we can safely use static methods and avoids a * double-free in devclass_delete_driver. */ kobj_class_compile((kobj_class_t) driver); /* * If the driver has any base classes, make the * devclass inherit from the devclass of the driver's * first base class. This will allow the system to * search for drivers in both devclasses for children * of a device using this driver. */ if (driver->baseclasses) parentname = driver->baseclasses[0]->name; else parentname = NULL; child_dc = devclass_find_internal(driver->name, parentname, TRUE); if (dcp != NULL) *dcp = child_dc; dl->driver = driver; TAILQ_INSERT_TAIL(&dc->drivers, dl, link); driver->refs++; /* XXX: kobj_mtx */ dl->pass = pass; driver_register_pass(dl); if (device_frozen) { dl->flags |= DL_DEFERRED_PROBE; } else { devclass_driver_added(dc, driver); } bus_data_generation_update(); return (0); } /** * @brief Register that a device driver has been deleted from a devclass * * Register that a device driver has been removed from a devclass. * This is called by devclass_delete_driver to accomplish the * recursive notification of all the children classes of busclass, as * well as busclass. Each layer will attempt to detach the driver * from any devices that are children of the bus's devclass. The function * will return an error if a device fails to detach. * * We do a full search here of the devclass list at each iteration * level to save storing children-lists in the devclass structure. If * we ever move beyond a few dozen devices doing this, we may need to * reevaluate... * * @param busclass the devclass of the parent bus * @param dc the devclass of the driver being deleted * @param driver the driver being deleted */ static int devclass_driver_deleted(devclass_t busclass, devclass_t dc, driver_t *driver) { devclass_t parent; device_t dev; int error, i; /* * Disassociate from any devices. We iterate through all the * devices in the devclass of the driver and detach any which are * using the driver and which have a parent in the devclass which * we are deleting from. * * Note that since a driver can be in multiple devclasses, we * should not detach devices which are not children of devices in * the affected devclass. * * If we're frozen, we don't generate NOMATCH events. Mark to * generate later. */ for (i = 0; i < dc->maxunit; i++) { if (dc->devices[i]) { dev = dc->devices[i]; if (dev->driver == driver && dev->parent && dev->parent->devclass == busclass) { if ((error = device_detach(dev)) != 0) return (error); if (device_frozen) { dev->flags &= ~DF_DONENOMATCH; dev->flags |= DF_NEEDNOMATCH; } else { device_handle_nomatch(dev); } } } } /* * Walk through the children classes. Since we only keep a * single parent pointer around, we walk the entire list of * devclasses looking for children. We set the * DC_HAS_CHILDREN flag when a child devclass is created on * the parent, so we only walk the list for those devclasses * that have children. */ if (!(busclass->flags & DC_HAS_CHILDREN)) return (0); parent = busclass; TAILQ_FOREACH(busclass, &devclasses, link) { if (busclass->parent == parent) { error = devclass_driver_deleted(busclass, dc, driver); if (error) return (error); } } return (0); } /** * @brief Delete a device driver from a device class * * Delete a device driver from a devclass. This is normally called * automatically by DRIVER_MODULE(). * * If the driver is currently attached to any devices, * devclass_delete_driver() will first attempt to detach from each * device. If one of the detach calls fails, the driver will not be * deleted. * * @param dc the devclass to edit * @param driver the driver to unregister */ int devclass_delete_driver(devclass_t busclass, driver_t *driver) { devclass_t dc = devclass_find(driver->name); driverlink_t dl; int error; PDEBUG(("%s from devclass %s", driver->name, DEVCLANAME(busclass))); if (!dc) return (0); /* * Find the link structure in the bus' list of drivers. */ TAILQ_FOREACH(dl, &busclass->drivers, link) { if (dl->driver == driver) break; } if (!dl) { PDEBUG(("%s not found in %s list", driver->name, busclass->name)); return (ENOENT); } error = devclass_driver_deleted(busclass, dc, driver); if (error != 0) return (error); TAILQ_REMOVE(&busclass->drivers, dl, link); free(dl, M_BUS); /* XXX: kobj_mtx */ driver->refs--; if (driver->refs == 0) kobj_class_free((kobj_class_t) driver); bus_data_generation_update(); return (0); } /** * @brief Quiesces a set of device drivers from a device class * * Quiesce a device driver from a devclass. This is normally called * automatically by DRIVER_MODULE(). * * If the driver is currently attached to any devices, * devclass_quiesece_driver() will first attempt to quiesce each * device. * * @param dc the devclass to edit * @param driver the driver to unregister */ static int devclass_quiesce_driver(devclass_t busclass, driver_t *driver) { devclass_t dc = devclass_find(driver->name); driverlink_t dl; device_t dev; int i; int error; PDEBUG(("%s from devclass %s", driver->name, DEVCLANAME(busclass))); if (!dc) return (0); /* * Find the link structure in the bus' list of drivers. */ TAILQ_FOREACH(dl, &busclass->drivers, link) { if (dl->driver == driver) break; } if (!dl) { PDEBUG(("%s not found in %s list", driver->name, busclass->name)); return (ENOENT); } /* * Quiesce all devices. We iterate through all the devices in * the devclass of the driver and quiesce any which are using * the driver and which have a parent in the devclass which we * are quiescing. * * Note that since a driver can be in multiple devclasses, we * should not quiesce devices which are not children of * devices in the affected devclass. */ for (i = 0; i < dc->maxunit; i++) { if (dc->devices[i]) { dev = dc->devices[i]; if (dev->driver == driver && dev->parent && dev->parent->devclass == busclass) { if ((error = device_quiesce(dev)) != 0) return (error); } } } return (0); } /** * @internal */ static driverlink_t devclass_find_driver_internal(devclass_t dc, const char *classname) { driverlink_t dl; PDEBUG(("%s in devclass %s", classname, DEVCLANAME(dc))); TAILQ_FOREACH(dl, &dc->drivers, link) { if (!strcmp(dl->driver->name, classname)) return (dl); } PDEBUG(("not found")); return (NULL); } /** * @brief Return the name of the devclass */ const char * devclass_get_name(devclass_t dc) { return (dc->name); } /** * @brief Find a device given a unit number * * @param dc the devclass to search * @param unit the unit number to search for * * @returns the device with the given unit number or @c * NULL if there is no such device */ device_t devclass_get_device(devclass_t dc, int unit) { if (dc == NULL || unit < 0 || unit >= dc->maxunit) return (NULL); return (dc->devices[unit]); } /** * @brief Find the softc field of a device given a unit number * * @param dc the devclass to search * @param unit the unit number to search for * * @returns the softc field of the device with the given * unit number or @c NULL if there is no such * device */ void * devclass_get_softc(devclass_t dc, int unit) { device_t dev; dev = devclass_get_device(dc, unit); if (!dev) return (NULL); return (device_get_softc(dev)); } /** * @brief Get a list of devices in the devclass * * An array containing a list of all the devices in the given devclass * is allocated and returned in @p *devlistp. The number of devices * in the array is returned in @p *devcountp. The caller should free * the array using @c free(p, M_TEMP), even if @p *devcountp is 0. * * @param dc the devclass to examine * @param devlistp points at location for array pointer return * value * @param devcountp points at location for array size return value * * @retval 0 success * @retval ENOMEM the array allocation failed */ int devclass_get_devices(devclass_t dc, device_t **devlistp, int *devcountp) { int count, i; device_t *list; count = devclass_get_count(dc); list = malloc(count * sizeof(device_t), M_TEMP, M_NOWAIT|M_ZERO); if (!list) return (ENOMEM); count = 0; for (i = 0; i < dc->maxunit; i++) { if (dc->devices[i]) { list[count] = dc->devices[i]; count++; } } *devlistp = list; *devcountp = count; return (0); } /** * @brief Get a list of drivers in the devclass * * An array containing a list of pointers to all the drivers in the * given devclass is allocated and returned in @p *listp. The number * of drivers in the array is returned in @p *countp. The caller should * free the array using @c free(p, M_TEMP). * * @param dc the devclass to examine * @param listp gives location for array pointer return value * @param countp gives location for number of array elements * return value * * @retval 0 success * @retval ENOMEM the array allocation failed */ int devclass_get_drivers(devclass_t dc, driver_t ***listp, int *countp) { driverlink_t dl; driver_t **list; int count; count = 0; TAILQ_FOREACH(dl, &dc->drivers, link) count++; list = malloc(count * sizeof(driver_t *), M_TEMP, M_NOWAIT); if (list == NULL) return (ENOMEM); count = 0; TAILQ_FOREACH(dl, &dc->drivers, link) { list[count] = dl->driver; count++; } *listp = list; *countp = count; return (0); } /** * @brief Get the number of devices in a devclass * * @param dc the devclass to examine */ int devclass_get_count(devclass_t dc) { int count, i; count = 0; for (i = 0; i < dc->maxunit; i++) if (dc->devices[i]) count++; return (count); } /** * @brief Get the maximum unit number used in a devclass * * Note that this is one greater than the highest currently-allocated * unit. If a null devclass_t is passed in, -1 is returned to indicate * that not even the devclass has been allocated yet. * * @param dc the devclass to examine */ int devclass_get_maxunit(devclass_t dc) { if (dc == NULL) return (-1); return (dc->maxunit); } /** * @brief Find a free unit number in a devclass * * This function searches for the first unused unit number greater * that or equal to @p unit. * * @param dc the devclass to examine * @param unit the first unit number to check */ int devclass_find_free_unit(devclass_t dc, int unit) { if (dc == NULL) return (unit); while (unit < dc->maxunit && dc->devices[unit] != NULL) unit++; return (unit); } /** * @brief Set the parent of a devclass * * The parent class is normally initialised automatically by * DRIVER_MODULE(). * * @param dc the devclass to edit * @param pdc the new parent devclass */ void devclass_set_parent(devclass_t dc, devclass_t pdc) { dc->parent = pdc; } /** * @brief Get the parent of a devclass * * @param dc the devclass to examine */ devclass_t devclass_get_parent(devclass_t dc) { return (dc->parent); } struct sysctl_ctx_list * devclass_get_sysctl_ctx(devclass_t dc) { return (&dc->sysctl_ctx); } struct sysctl_oid * devclass_get_sysctl_tree(devclass_t dc) { return (dc->sysctl_tree); } /** * @internal * @brief Allocate a unit number * * On entry, @p *unitp is the desired unit number (or @c -1 if any * will do). The allocated unit number is returned in @p *unitp. * @param dc the devclass to allocate from * @param unitp points at the location for the allocated unit * number * * @retval 0 success * @retval EEXIST the requested unit number is already allocated * @retval ENOMEM memory allocation failure */ static int devclass_alloc_unit(devclass_t dc, device_t dev, int *unitp) { const char *s; int unit = *unitp; PDEBUG(("unit %d in devclass %s", unit, DEVCLANAME(dc))); /* Ask the parent bus if it wants to wire this device. */ if (unit == -1) BUS_HINT_DEVICE_UNIT(device_get_parent(dev), dev, dc->name, &unit); /* If we were given a wired unit number, check for existing device */ /* XXX imp XXX */ if (unit != -1) { if (unit >= 0 && unit < dc->maxunit && dc->devices[unit] != NULL) { if (bootverbose) printf("%s: %s%d already exists; skipping it\n", dc->name, dc->name, *unitp); return (EEXIST); } } else { /* Unwired device, find the next available slot for it */ unit = 0; for (unit = 0;; unit++) { /* If this device slot is already in use, skip it. */ if (unit < dc->maxunit && dc->devices[unit] != NULL) continue; /* If there is an "at" hint for a unit then skip it. */ if (resource_string_value(dc->name, unit, "at", &s) == 0) continue; break; } } /* * We've selected a unit beyond the length of the table, so let's * extend the table to make room for all units up to and including * this one. */ if (unit >= dc->maxunit) { device_t *newlist, *oldlist; int newsize; oldlist = dc->devices; newsize = roundup((unit + 1), MAX(1, MINALLOCSIZE / sizeof(device_t))); newlist = malloc(sizeof(device_t) * newsize, M_BUS, M_NOWAIT); if (!newlist) return (ENOMEM); if (oldlist != NULL) bcopy(oldlist, newlist, sizeof(device_t) * dc->maxunit); bzero(newlist + dc->maxunit, sizeof(device_t) * (newsize - dc->maxunit)); dc->devices = newlist; dc->maxunit = newsize; if (oldlist != NULL) free(oldlist, M_BUS); } PDEBUG(("now: unit %d in devclass %s", unit, DEVCLANAME(dc))); *unitp = unit; return (0); } /** * @internal * @brief Add a device to a devclass * * A unit number is allocated for the device (using the device's * preferred unit number if any) and the device is registered in the * devclass. This allows the device to be looked up by its unit * number, e.g. by decoding a dev_t minor number. * * @param dc the devclass to add to * @param dev the device to add * * @retval 0 success * @retval EEXIST the requested unit number is already allocated * @retval ENOMEM memory allocation failure */ static int devclass_add_device(devclass_t dc, device_t dev) { int buflen, error; PDEBUG(("%s in devclass %s", DEVICENAME(dev), DEVCLANAME(dc))); buflen = snprintf(NULL, 0, "%s%d$", dc->name, INT_MAX); if (buflen < 0) return (ENOMEM); dev->nameunit = malloc(buflen, M_BUS, M_NOWAIT|M_ZERO); if (!dev->nameunit) return (ENOMEM); if ((error = devclass_alloc_unit(dc, dev, &dev->unit)) != 0) { free(dev->nameunit, M_BUS); dev->nameunit = NULL; return (error); } dc->devices[dev->unit] = dev; dev->devclass = dc; snprintf(dev->nameunit, buflen, "%s%d", dc->name, dev->unit); return (0); } /** * @internal * @brief Delete a device from a devclass * * The device is removed from the devclass's device list and its unit * number is freed. * @param dc the devclass to delete from * @param dev the device to delete * * @retval 0 success */ static int devclass_delete_device(devclass_t dc, device_t dev) { if (!dc || !dev) return (0); PDEBUG(("%s in devclass %s", DEVICENAME(dev), DEVCLANAME(dc))); if (dev->devclass != dc || dc->devices[dev->unit] != dev) panic("devclass_delete_device: inconsistent device class"); dc->devices[dev->unit] = NULL; if (dev->flags & DF_WILDCARD) dev->unit = -1; dev->devclass = NULL; free(dev->nameunit, M_BUS); dev->nameunit = NULL; return (0); } /** * @internal * @brief Make a new device and add it as a child of @p parent * * @param parent the parent of the new device * @param name the devclass name of the new device or @c NULL * to leave the devclass unspecified * @parem unit the unit number of the new device of @c -1 to * leave the unit number unspecified * * @returns the new device */ static device_t make_device(device_t parent, const char *name, int unit) { device_t dev; devclass_t dc; PDEBUG(("%s at %s as unit %d", name, DEVICENAME(parent), unit)); if (name) { dc = devclass_find_internal(name, NULL, TRUE); if (!dc) { printf("make_device: can't find device class %s\n", name); return (NULL); } } else { dc = NULL; } dev = malloc(sizeof(*dev), M_BUS, M_NOWAIT|M_ZERO); if (!dev) return (NULL); dev->parent = parent; TAILQ_INIT(&dev->children); kobj_init((kobj_t) dev, &null_class); dev->driver = NULL; dev->devclass = NULL; dev->unit = unit; dev->nameunit = NULL; dev->desc = NULL; dev->busy = 0; dev->devflags = 0; dev->flags = DF_ENABLED; dev->order = 0; if (unit == -1) dev->flags |= DF_WILDCARD; if (name) { dev->flags |= DF_FIXEDCLASS; if (devclass_add_device(dc, dev)) { kobj_delete((kobj_t) dev, M_BUS); return (NULL); } } if (parent != NULL && device_has_quiet_children(parent)) dev->flags |= DF_QUIET | DF_QUIET_CHILDREN; dev->ivars = NULL; dev->softc = NULL; dev->state = DS_NOTPRESENT; TAILQ_INSERT_TAIL(&bus_data_devices, dev, devlink); bus_data_generation_update(); return (dev); } /** * @internal * @brief Print a description of a device. */ static int device_print_child(device_t dev, device_t child) { int retval = 0; if (device_is_alive(child)) retval += BUS_PRINT_CHILD(dev, child); else retval += device_printf(child, " not found\n"); return (retval); } /** * @brief Create a new device * * This creates a new device and adds it as a child of an existing * parent device. The new device will be added after the last existing * child with order zero. * * @param dev the device which will be the parent of the * new child device * @param name devclass name for new device or @c NULL if not * specified * @param unit unit number for new device or @c -1 if not * specified * * @returns the new device */ device_t device_add_child(device_t dev, const char *name, int unit) { return (device_add_child_ordered(dev, 0, name, unit)); } /** * @brief Create a new device * * This creates a new device and adds it as a child of an existing * parent device. The new device will be added after the last existing * child with the same order. * * @param dev the device which will be the parent of the * new child device * @param order a value which is used to partially sort the * children of @p dev - devices created using * lower values of @p order appear first in @p * dev's list of children * @param name devclass name for new device or @c NULL if not * specified * @param unit unit number for new device or @c -1 if not * specified * * @returns the new device */ device_t device_add_child_ordered(device_t dev, u_int order, const char *name, int unit) { device_t child; device_t place; PDEBUG(("%s at %s with order %u as unit %d", name, DEVICENAME(dev), order, unit)); KASSERT(name != NULL || unit == -1, ("child device with wildcard name and specific unit number")); child = make_device(dev, name, unit); if (child == NULL) return (child); child->order = order; TAILQ_FOREACH(place, &dev->children, link) { if (place->order > order) break; } if (place) { /* * The device 'place' is the first device whose order is * greater than the new child. */ TAILQ_INSERT_BEFORE(place, child, link); } else { /* * The new child's order is greater or equal to the order of * any existing device. Add the child to the tail of the list. */ TAILQ_INSERT_TAIL(&dev->children, child, link); } bus_data_generation_update(); return (child); } /** * @brief Delete a device * * This function deletes a device along with all of its children. If * the device currently has a driver attached to it, the device is * detached first using device_detach(). * * @param dev the parent device * @param child the device to delete * * @retval 0 success * @retval non-zero a unit error code describing the error */ int device_delete_child(device_t dev, device_t child) { int error; device_t grandchild; PDEBUG(("%s from %s", DEVICENAME(child), DEVICENAME(dev))); /* detach parent before deleting children, if any */ if ((error = device_detach(child)) != 0) return (error); /* remove children second */ while ((grandchild = TAILQ_FIRST(&child->children)) != NULL) { error = device_delete_child(child, grandchild); if (error) return (error); } if (child->devclass) devclass_delete_device(child->devclass, child); if (child->parent) BUS_CHILD_DELETED(dev, child); TAILQ_REMOVE(&dev->children, child, link); TAILQ_REMOVE(&bus_data_devices, child, devlink); kobj_delete((kobj_t) child, M_BUS); bus_data_generation_update(); return (0); } /** * @brief Delete all children devices of the given device, if any. * * This function deletes all children devices of the given device, if * any, using the device_delete_child() function for each device it * finds. If a child device cannot be deleted, this function will * return an error code. * * @param dev the parent device * * @retval 0 success * @retval non-zero a device would not detach */ int device_delete_children(device_t dev) { device_t child; int error; PDEBUG(("Deleting all children of %s", DEVICENAME(dev))); error = 0; while ((child = TAILQ_FIRST(&dev->children)) != NULL) { error = device_delete_child(dev, child); if (error) { PDEBUG(("Failed deleting %s", DEVICENAME(child))); break; } } return (error); } /** * @brief Find a device given a unit number * * This is similar to devclass_get_devices() but only searches for * devices which have @p dev as a parent. * * @param dev the parent device to search * @param unit the unit number to search for. If the unit is -1, * return the first child of @p dev which has name * @p classname (that is, the one with the lowest unit.) * * @returns the device with the given unit number or @c * NULL if there is no such device */ device_t device_find_child(device_t dev, const char *classname, int unit) { devclass_t dc; device_t child; dc = devclass_find(classname); if (!dc) return (NULL); if (unit != -1) { child = devclass_get_device(dc, unit); if (child && child->parent == dev) return (child); } else { for (unit = 0; unit < devclass_get_maxunit(dc); unit++) { child = devclass_get_device(dc, unit); if (child && child->parent == dev) return (child); } } return (NULL); } /** * @internal */ static driverlink_t first_matching_driver(devclass_t dc, device_t dev) { if (dev->devclass) return (devclass_find_driver_internal(dc, dev->devclass->name)); return (TAILQ_FIRST(&dc->drivers)); } /** * @internal */ static driverlink_t next_matching_driver(devclass_t dc, device_t dev, driverlink_t last) { if (dev->devclass) { driverlink_t dl; for (dl = TAILQ_NEXT(last, link); dl; dl = TAILQ_NEXT(dl, link)) if (!strcmp(dev->devclass->name, dl->driver->name)) return (dl); return (NULL); } return (TAILQ_NEXT(last, link)); } /** * @internal */ int device_probe_child(device_t dev, device_t child) { devclass_t dc; driverlink_t best = NULL; driverlink_t dl; int result, pri = 0; /* We should preserve the devclass (or lack of) set by the bus. */ int hasclass = (child->devclass != NULL); bus_topo_assert(); dc = dev->devclass; if (!dc) panic("device_probe_child: parent device has no devclass"); /* * If the state is already probed, then return. */ if (child->state == DS_ALIVE) return (0); for (; dc; dc = dc->parent) { for (dl = first_matching_driver(dc, child); dl; dl = next_matching_driver(dc, child, dl)) { /* If this driver's pass is too high, then ignore it. */ if (dl->pass > bus_current_pass) continue; PDEBUG(("Trying %s", DRIVERNAME(dl->driver))); result = device_set_driver(child, dl->driver); if (result == ENOMEM) return (result); else if (result != 0) continue; if (!hasclass) { if (device_set_devclass(child, dl->driver->name) != 0) { char const * devname = device_get_name(child); if (devname == NULL) devname = "(unknown)"; printf("driver bug: Unable to set " "devclass (class: %s " "devname: %s)\n", dl->driver->name, devname); (void)device_set_driver(child, NULL); continue; } } /* Fetch any flags for the device before probing. */ resource_int_value(dl->driver->name, child->unit, "flags", &child->devflags); result = DEVICE_PROBE(child); /* * If the driver returns SUCCESS, there can be * no higher match for this device. */ if (result == 0) { best = dl; pri = 0; break; } /* Reset flags and devclass before the next probe. */ child->devflags = 0; if (!hasclass) (void)device_set_devclass(child, NULL); /* * Reset DF_QUIET in case this driver doesn't * end up as the best driver. */ device_verbose(child); /* * Probes that return BUS_PROBE_NOWILDCARD or lower * only match on devices whose driver was explicitly * specified. */ if (result <= BUS_PROBE_NOWILDCARD && !(child->flags & DF_FIXEDCLASS)) { result = ENXIO; } /* * The driver returned an error so it * certainly doesn't match. */ if (result > 0) { (void)device_set_driver(child, NULL); continue; } /* * A priority lower than SUCCESS, remember the * best matching driver. Initialise the value * of pri for the first match. */ if (best == NULL || result > pri) { best = dl; pri = result; continue; } } /* * If we have an unambiguous match in this devclass, * don't look in the parent. */ if (best && pri == 0) break; } if (best == NULL) return (ENXIO); /* * If we found a driver, change state and initialise the devclass. */ if (pri < 0) { /* Set the winning driver, devclass, and flags. */ result = device_set_driver(child, best->driver); if (result != 0) return (result); if (!child->devclass) { result = device_set_devclass(child, best->driver->name); if (result != 0) { (void)device_set_driver(child, NULL); return (result); } } resource_int_value(best->driver->name, child->unit, "flags", &child->devflags); /* * A bit bogus. Call the probe method again to make sure * that we have the right description. */ result = DEVICE_PROBE(child); if (result > 0) { if (!hasclass) (void)device_set_devclass(child, NULL); (void)device_set_driver(child, NULL); return (result); } } child->state = DS_ALIVE; bus_data_generation_update(); return (0); } /** * @brief Return the parent of a device */ device_t device_get_parent(device_t dev) { return (dev->parent); } /** * @brief Get a list of children of a device * * An array containing a list of all the children of the given device * is allocated and returned in @p *devlistp. The number of devices * in the array is returned in @p *devcountp. The caller should free * the array using @c free(p, M_TEMP). * * @param dev the device to examine * @param devlistp points at location for array pointer return * value * @param devcountp points at location for array size return value * * @retval 0 success * @retval ENOMEM the array allocation failed */ int device_get_children(device_t dev, device_t **devlistp, int *devcountp) { int count; device_t child; device_t *list; count = 0; TAILQ_FOREACH(child, &dev->children, link) { count++; } if (count == 0) { *devlistp = NULL; *devcountp = 0; return (0); } list = malloc(count * sizeof(device_t), M_TEMP, M_NOWAIT|M_ZERO); if (!list) return (ENOMEM); count = 0; TAILQ_FOREACH(child, &dev->children, link) { list[count] = child; count++; } *devlistp = list; *devcountp = count; return (0); } /** * @brief Return the current driver for the device or @c NULL if there * is no driver currently attached */ driver_t * device_get_driver(device_t dev) { return (dev->driver); } /** * @brief Return the current devclass for the device or @c NULL if * there is none. */ devclass_t device_get_devclass(device_t dev) { return (dev->devclass); } /** * @brief Return the name of the device's devclass or @c NULL if there * is none. */ const char * device_get_name(device_t dev) { if (dev != NULL && dev->devclass) return (devclass_get_name(dev->devclass)); return (NULL); } /** * @brief Return a string containing the device's devclass name * followed by an ascii representation of the device's unit number * (e.g. @c "foo2"). */ const char * device_get_nameunit(device_t dev) { return (dev->nameunit); } /** * @brief Return the device's unit number. */ int device_get_unit(device_t dev) { return (dev->unit); } /** * @brief Return the device's description string */ const char * device_get_desc(device_t dev) { return (dev->desc); } /** * @brief Return the device's flags */ uint32_t device_get_flags(device_t dev) { return (dev->devflags); } struct sysctl_ctx_list * device_get_sysctl_ctx(device_t dev) { return (&dev->sysctl_ctx); } struct sysctl_oid * device_get_sysctl_tree(device_t dev) { return (dev->sysctl_tree); } /** * @brief Print the name of the device followed by a colon and a space * * @returns the number of characters printed */ int device_print_prettyname(device_t dev) { const char *name = device_get_name(dev); if (name == NULL) return (printf("unknown: ")); return (printf("%s%d: ", name, device_get_unit(dev))); } /** * @brief Print the name of the device followed by a colon, a space * and the result of calling vprintf() with the value of @p fmt and * the following arguments. * * @returns the number of characters printed */ int device_printf(device_t dev, const char * fmt, ...) { char buf[128]; struct sbuf sb; const char *name; va_list ap; size_t retval; retval = 0; sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); sbuf_set_drain(&sb, sbuf_printf_drain, &retval); name = device_get_name(dev); if (name == NULL) sbuf_cat(&sb, "unknown: "); else sbuf_printf(&sb, "%s%d: ", name, device_get_unit(dev)); va_start(ap, fmt); sbuf_vprintf(&sb, fmt, ap); va_end(ap); sbuf_finish(&sb); sbuf_delete(&sb); return (retval); } /** * @brief Print the name of the device followed by a colon, a space * and the result of calling log() with the value of @p fmt and * the following arguments. * * @returns the number of characters printed */ int device_log(device_t dev, int pri, const char * fmt, ...) { char buf[128]; struct sbuf sb; const char *name; va_list ap; size_t retval; retval = 0; sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); name = device_get_name(dev); if (name == NULL) sbuf_cat(&sb, "unknown: "); else sbuf_printf(&sb, "%s%d: ", name, device_get_unit(dev)); va_start(ap, fmt); sbuf_vprintf(&sb, fmt, ap); va_end(ap); sbuf_finish(&sb); log(pri, "%.*s", (int) sbuf_len(&sb), sbuf_data(&sb)); retval = sbuf_len(&sb); sbuf_delete(&sb); return (retval); } /** * @internal */ static void device_set_desc_internal(device_t dev, const char *desc, bool allocated) { if (dev->desc && (dev->flags & DF_DESCMALLOCED)) { free(dev->desc, M_BUS); dev->flags &= ~DF_DESCMALLOCED; dev->desc = NULL; } if (allocated && desc) dev->flags |= DF_DESCMALLOCED; dev->desc = __DECONST(char *, desc); bus_data_generation_update(); } /** * @brief Set the device's description * * The value of @c desc should be a string constant that will not * change (at least until the description is changed in a subsequent * call to device_set_desc() or device_set_desc_copy()). */ void device_set_desc(device_t dev, const char *desc) { device_set_desc_internal(dev, desc, false); } /** * @brief Set the device's description * * A printf-like version of device_set_desc(). */ void device_set_descf(device_t dev, const char *fmt, ...) { va_list ap; char *buf = NULL; va_start(ap, fmt); vasprintf(&buf, M_BUS, fmt, ap); va_end(ap); device_set_desc_internal(dev, buf, true); } /** * @brief Set the device's description * * The string pointed to by @c desc is copied. Use this function if * the device description is generated, (e.g. with sprintf()). */ void device_set_desc_copy(device_t dev, const char *desc) { char *buf; buf = strdup_flags(desc, M_BUS, M_NOWAIT); device_set_desc_internal(dev, buf, true); } /** * @brief Set the device's flags */ void device_set_flags(device_t dev, uint32_t flags) { dev->devflags = flags; } /** * @brief Return the device's softc field * * The softc is allocated and zeroed when a driver is attached, based * on the size field of the driver. */ void * device_get_softc(device_t dev) { return (dev->softc); } /** * @brief Set the device's softc field * * Most drivers do not need to use this since the softc is allocated * automatically when the driver is attached. */ void device_set_softc(device_t dev, void *softc) { if (dev->softc && !(dev->flags & DF_EXTERNALSOFTC)) free(dev->softc, M_BUS_SC); dev->softc = softc; if (dev->softc) dev->flags |= DF_EXTERNALSOFTC; else dev->flags &= ~DF_EXTERNALSOFTC; } /** * @brief Free claimed softc * * Most drivers do not need to use this since the softc is freed * automatically when the driver is detached. */ void device_free_softc(void *softc) { free(softc, M_BUS_SC); } /** * @brief Claim softc * * This function can be used to let the driver free the automatically * allocated softc using "device_free_softc()". This function is * useful when the driver is refcounting the softc and the softc * cannot be freed when the "device_detach" method is called. */ void device_claim_softc(device_t dev) { if (dev->softc) dev->flags |= DF_EXTERNALSOFTC; else dev->flags &= ~DF_EXTERNALSOFTC; } /** * @brief Get the device's ivars field * * The ivars field is used by the parent device to store per-device * state (e.g. the physical location of the device or a list of * resources). */ void * device_get_ivars(device_t dev) { KASSERT(dev != NULL, ("device_get_ivars(NULL, ...)")); return (dev->ivars); } /** * @brief Set the device's ivars field */ void device_set_ivars(device_t dev, void * ivars) { KASSERT(dev != NULL, ("device_set_ivars(NULL, ...)")); dev->ivars = ivars; } /** * @brief Return the device's state */ device_state_t device_get_state(device_t dev) { return (dev->state); } /** * @brief Set the DF_ENABLED flag for the device */ void device_enable(device_t dev) { dev->flags |= DF_ENABLED; } /** * @brief Clear the DF_ENABLED flag for the device */ void device_disable(device_t dev) { dev->flags &= ~DF_ENABLED; } /** * @brief Increment the busy counter for the device */ void device_busy(device_t dev) { /* * Mark the device as busy, recursively up the tree if this busy count * goes 0->1. */ if (refcount_acquire(&dev->busy) == 0 && dev->parent != NULL) device_busy(dev->parent); } /** * @brief Decrement the busy counter for the device */ void device_unbusy(device_t dev) { /* * Mark the device as unbsy, recursively if this is the last busy count. */ if (refcount_release(&dev->busy) && dev->parent != NULL) device_unbusy(dev->parent); } /** * @brief Set the DF_QUIET flag for the device */ void device_quiet(device_t dev) { dev->flags |= DF_QUIET; } /** * @brief Set the DF_QUIET_CHILDREN flag for the device */ void device_quiet_children(device_t dev) { dev->flags |= DF_QUIET_CHILDREN; } /** * @brief Clear the DF_QUIET flag for the device */ void device_verbose(device_t dev) { dev->flags &= ~DF_QUIET; } ssize_t device_get_property(device_t dev, const char *prop, void *val, size_t sz, device_property_type_t type) { device_t bus = device_get_parent(dev); switch (type) { case DEVICE_PROP_ANY: case DEVICE_PROP_BUFFER: case DEVICE_PROP_HANDLE: /* Size checks done in implementation. */ break; case DEVICE_PROP_UINT32: if (sz % 4 != 0) return (-1); break; case DEVICE_PROP_UINT64: if (sz % 8 != 0) return (-1); break; default: return (-1); } return (BUS_GET_PROPERTY(bus, dev, prop, val, sz, type)); } bool device_has_property(device_t dev, const char *prop) { return (device_get_property(dev, prop, NULL, 0, DEVICE_PROP_ANY) >= 0); } /** * @brief Return non-zero if the DF_QUIET_CHIDLREN flag is set on the device */ int device_has_quiet_children(device_t dev) { return ((dev->flags & DF_QUIET_CHILDREN) != 0); } /** * @brief Return non-zero if the DF_QUIET flag is set on the device */ int device_is_quiet(device_t dev) { return ((dev->flags & DF_QUIET) != 0); } /** * @brief Return non-zero if the DF_ENABLED flag is set on the device */ int device_is_enabled(device_t dev) { return ((dev->flags & DF_ENABLED) != 0); } /** * @brief Return non-zero if the device was successfully probed */ int device_is_alive(device_t dev) { return (dev->state >= DS_ALIVE); } /** * @brief Return non-zero if the device currently has a driver * attached to it */ int device_is_attached(device_t dev) { return (dev->state >= DS_ATTACHED); } /** * @brief Return non-zero if the device is currently suspended. */ int device_is_suspended(device_t dev) { return ((dev->flags & DF_SUSPENDED) != 0); } /** * @brief Set the devclass of a device * @see devclass_add_device(). */ int device_set_devclass(device_t dev, const char *classname) { devclass_t dc; int error; if (!classname) { if (dev->devclass) devclass_delete_device(dev->devclass, dev); return (0); } if (dev->devclass) { printf("device_set_devclass: device class already set\n"); return (EINVAL); } dc = devclass_find_internal(classname, NULL, TRUE); if (!dc) return (ENOMEM); error = devclass_add_device(dc, dev); bus_data_generation_update(); return (error); } /** * @brief Set the devclass of a device and mark the devclass fixed. * @see device_set_devclass() */ int device_set_devclass_fixed(device_t dev, const char *classname) { int error; if (classname == NULL) return (EINVAL); error = device_set_devclass(dev, classname); if (error) return (error); dev->flags |= DF_FIXEDCLASS; return (0); } /** * @brief Query the device to determine if it's of a fixed devclass * @see device_set_devclass_fixed() */ bool device_is_devclass_fixed(device_t dev) { return ((dev->flags & DF_FIXEDCLASS) != 0); } /** * @brief Set the driver of a device * * @retval 0 success * @retval EBUSY the device already has a driver attached * @retval ENOMEM a memory allocation failure occurred */ int device_set_driver(device_t dev, driver_t *driver) { int domain; struct domainset *policy; if (dev->state >= DS_ATTACHED) return (EBUSY); if (dev->driver == driver) return (0); if (dev->softc && !(dev->flags & DF_EXTERNALSOFTC)) { free(dev->softc, M_BUS_SC); dev->softc = NULL; } device_set_desc(dev, NULL); kobj_delete((kobj_t) dev, NULL); dev->driver = driver; if (driver) { kobj_init((kobj_t) dev, (kobj_class_t) driver); if (!(dev->flags & DF_EXTERNALSOFTC) && driver->size > 0) { if (bus_get_domain(dev, &domain) == 0) policy = DOMAINSET_PREF(domain); else policy = DOMAINSET_RR(); dev->softc = malloc_domainset(driver->size, M_BUS_SC, policy, M_NOWAIT | M_ZERO); if (!dev->softc) { kobj_delete((kobj_t) dev, NULL); kobj_init((kobj_t) dev, &null_class); dev->driver = NULL; return (ENOMEM); } } } else { kobj_init((kobj_t) dev, &null_class); } bus_data_generation_update(); return (0); } /** * @brief Probe a device, and return this status. * * This function is the core of the device autoconfiguration * system. Its purpose is to select a suitable driver for a device and * then call that driver to initialise the hardware appropriately. The * driver is selected by calling the DEVICE_PROBE() method of a set of * candidate drivers and then choosing the driver which returned the * best value. This driver is then attached to the device using * device_attach(). * * The set of suitable drivers is taken from the list of drivers in * the parent device's devclass. If the device was originally created * with a specific class name (see device_add_child()), only drivers * with that name are probed, otherwise all drivers in the devclass * are probed. If no drivers return successful probe values in the * parent devclass, the search continues in the parent of that * devclass (see devclass_get_parent()) if any. * * @param dev the device to initialise * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code * @retval -1 Device already attached */ int device_probe(device_t dev) { int error; bus_topo_assert(); if (dev->state >= DS_ALIVE) return (-1); if (!(dev->flags & DF_ENABLED)) { if (bootverbose && device_get_name(dev) != NULL) { device_print_prettyname(dev); printf("not probed (disabled)\n"); } return (-1); } if ((error = device_probe_child(dev->parent, dev)) != 0) { if (bus_current_pass == BUS_PASS_DEFAULT && !(dev->flags & DF_DONENOMATCH)) { device_handle_nomatch(dev); } return (error); } return (0); } /** * @brief Probe a device and attach a driver if possible * * calls device_probe() and attaches if that was successful. */ int device_probe_and_attach(device_t dev) { int error; bus_topo_assert(); error = device_probe(dev); if (error == -1) return (0); else if (error != 0) return (error); CURVNET_SET_QUIET(vnet0); error = device_attach(dev); CURVNET_RESTORE(); return error; } /** * @brief Attach a device driver to a device * * This function is a wrapper around the DEVICE_ATTACH() driver * method. In addition to calling DEVICE_ATTACH(), it initialises the * device's sysctl tree, optionally prints a description of the device * and queues a notification event for user-based device management * services. * * Normally this function is only called internally from * device_probe_and_attach(). * * @param dev the device to initialise * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code */ int device_attach(device_t dev) { uint64_t attachtime; uint16_t attachentropy; int error; if (resource_disabled(dev->driver->name, dev->unit)) { device_disable(dev); if (bootverbose) device_printf(dev, "disabled via hints entry\n"); return (ENXIO); } device_sysctl_init(dev); if (!device_is_quiet(dev)) device_print_child(dev->parent, dev); attachtime = get_cyclecount(); dev->state = DS_ATTACHING; if ((error = DEVICE_ATTACH(dev)) != 0) { printf("device_attach: %s%d attach returned %d\n", dev->driver->name, dev->unit, error); if (disable_failed_devs) { /* * When the user has asked to disable failed devices, we * directly disable the device, but leave it in the * attaching state. It will not try to probe/attach the * device further. This leaves the device numbering * intact for other similar devices in the system. It * can be removed from this state with devctl. */ device_disable(dev); } else { /* * Otherwise, when attach fails, tear down the state * around that so we can retry when, for example, new * drivers are loaded. */ if (!(dev->flags & DF_FIXEDCLASS)) devclass_delete_device(dev->devclass, dev); (void)device_set_driver(dev, NULL); device_sysctl_fini(dev); KASSERT(dev->busy == 0, ("attach failed but busy")); dev->state = DS_NOTPRESENT; } return (error); } dev->flags |= DF_ATTACHED_ONCE; /* * We only need the low bits of this time, but ranges from tens to thousands * have been seen, so keep 2 bytes' worth. */ attachentropy = (uint16_t)(get_cyclecount() - attachtime); random_harvest_direct(&attachentropy, sizeof(attachentropy), RANDOM_ATTACH); device_sysctl_update(dev); dev->state = DS_ATTACHED; dev->flags &= ~DF_DONENOMATCH; EVENTHANDLER_DIRECT_INVOKE(device_attach, dev); return (0); } /** * @brief Detach a driver from a device * * This function is a wrapper around the DEVICE_DETACH() driver * method. If the call to DEVICE_DETACH() succeeds, it calls * BUS_CHILD_DETACHED() for the parent of @p dev, queues a * notification event for user-based device management services and * cleans up the device's sysctl tree. * * @param dev the device to un-initialise * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code */ int device_detach(device_t dev) { int error; bus_topo_assert(); PDEBUG(("%s", DEVICENAME(dev))); if (dev->busy > 0) return (EBUSY); if (dev->state == DS_ATTACHING) { device_printf(dev, "device in attaching state! Deferring detach.\n"); return (EBUSY); } if (dev->state != DS_ATTACHED) return (0); EVENTHANDLER_DIRECT_INVOKE(device_detach, dev, EVHDEV_DETACH_BEGIN); if ((error = DEVICE_DETACH(dev)) != 0) { EVENTHANDLER_DIRECT_INVOKE(device_detach, dev, EVHDEV_DETACH_FAILED); return (error); } else { EVENTHANDLER_DIRECT_INVOKE(device_detach, dev, EVHDEV_DETACH_COMPLETE); } if (!device_is_quiet(dev)) device_printf(dev, "detached\n"); if (dev->parent) BUS_CHILD_DETACHED(dev->parent, dev); if (!(dev->flags & DF_FIXEDCLASS)) devclass_delete_device(dev->devclass, dev); device_verbose(dev); dev->state = DS_NOTPRESENT; (void)device_set_driver(dev, NULL); device_sysctl_fini(dev); return (0); } /** * @brief Tells a driver to quiesce itself. * * This function is a wrapper around the DEVICE_QUIESCE() driver * method. If the call to DEVICE_QUIESCE() succeeds. * * @param dev the device to quiesce * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code */ int device_quiesce(device_t dev) { PDEBUG(("%s", DEVICENAME(dev))); if (dev->busy > 0) return (EBUSY); if (dev->state != DS_ATTACHED) return (0); return (DEVICE_QUIESCE(dev)); } /** * @brief Notify a device of system shutdown * * This function calls the DEVICE_SHUTDOWN() driver method if the * device currently has an attached driver. * * @returns the value returned by DEVICE_SHUTDOWN() */ int device_shutdown(device_t dev) { if (dev->state < DS_ATTACHED) return (0); return (DEVICE_SHUTDOWN(dev)); } /** * @brief Set the unit number of a device * * This function can be used to override the unit number used for a * device (e.g. to wire a device to a pre-configured unit number). */ int device_set_unit(device_t dev, int unit) { devclass_t dc; int err; if (unit == dev->unit) return (0); dc = device_get_devclass(dev); if (unit < dc->maxunit && dc->devices[unit]) return (EBUSY); err = devclass_delete_device(dc, dev); if (err) return (err); dev->unit = unit; err = devclass_add_device(dc, dev); if (err) return (err); bus_data_generation_update(); return (0); } /*======================================*/ /* * Some useful method implementations to make life easier for bus drivers. */ /** * @brief Initialize a resource mapping request * * This is the internal implementation of the public API * resource_init_map_request. Callers may be using a different layout * of struct resource_map_request than the kernel, so callers pass in * the size of the structure they are using to identify the structure * layout. */ void resource_init_map_request_impl(struct resource_map_request *args, size_t sz) { bzero(args, sz); args->size = sz; args->memattr = VM_MEMATTR_DEVICE; } /** * @brief Validate a resource mapping request * * Translate a device driver's mapping request (@p in) to a struct * resource_map_request using the current structure layout (@p out). * In addition, validate the offset and length from the mapping * request against the bounds of the resource @p r. If the offset or * length are invalid, fail with EINVAL. If the offset and length are * valid, the absolute starting address of the requested mapping is * returned in @p startp and the length of the requested mapping is * returned in @p lengthp. */ int resource_validate_map_request(struct resource *r, struct resource_map_request *in, struct resource_map_request *out, rman_res_t *startp, rman_res_t *lengthp) { rman_res_t end, length, start; /* * This assumes that any callers of this function are compiled * into the kernel and use the same version of the structure * as this file. */ MPASS(out->size == sizeof(struct resource_map_request)); if (in != NULL) bcopy(in, out, imin(in->size, out->size)); start = rman_get_start(r) + out->offset; if (out->length == 0) length = rman_get_size(r); else length = out->length; end = start + length - 1; if (start > rman_get_end(r) || start < rman_get_start(r)) return (EINVAL); if (end > rman_get_end(r) || end < start) return (EINVAL); *lengthp = length; *startp = start; return (0); } /** * @brief Initialise a resource list. * * @param rl the resource list to initialise */ void resource_list_init(struct resource_list *rl) { STAILQ_INIT(rl); } /** * @brief Reclaim memory used by a resource list. * * This function frees the memory for all resource entries on the list * (if any). * * @param rl the resource list to free */ void resource_list_free(struct resource_list *rl) { struct resource_list_entry *rle; while ((rle = STAILQ_FIRST(rl)) != NULL) { if (rle->res) panic("resource_list_free: resource entry is busy"); STAILQ_REMOVE_HEAD(rl, link); free(rle, M_BUS); } } /** * @brief Add a resource entry. * * This function adds a resource entry using the given @p type, @p * start, @p end and @p count values. A rid value is chosen by * searching sequentially for the first unused rid starting at zero. * * @param rl the resource list to edit * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param start the start address of the resource * @param end the end address of the resource * @param count XXX end-start+1 */ int resource_list_add_next(struct resource_list *rl, int type, rman_res_t start, rman_res_t end, rman_res_t count) { int rid; rid = 0; while (resource_list_find(rl, type, rid) != NULL) rid++; resource_list_add(rl, type, rid, start, end, count); return (rid); } /** * @brief Add or modify a resource entry. * * If an existing entry exists with the same type and rid, it will be * modified using the given values of @p start, @p end and @p * count. If no entry exists, a new one will be created using the * given values. The resource list entry that matches is then returned. * * @param rl the resource list to edit * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * @param start the start address of the resource * @param end the end address of the resource * @param count XXX end-start+1 */ struct resource_list_entry * resource_list_add(struct resource_list *rl, int type, int rid, rman_res_t start, rman_res_t end, rman_res_t count) { struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (!rle) { rle = malloc(sizeof(struct resource_list_entry), M_BUS, M_NOWAIT); if (!rle) panic("resource_list_add: can't record entry"); STAILQ_INSERT_TAIL(rl, rle, link); rle->type = type; rle->rid = rid; rle->res = NULL; rle->flags = 0; } if (rle->res) panic("resource_list_add: resource entry is busy"); rle->start = start; rle->end = end; rle->count = count; return (rle); } /** * @brief Determine if a resource entry is busy. * * Returns true if a resource entry is busy meaning that it has an * associated resource that is not an unallocated "reserved" resource. * * @param rl the resource list to search * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * * @returns Non-zero if the entry is busy, zero otherwise. */ int resource_list_busy(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (rle == NULL || rle->res == NULL) return (0); if ((rle->flags & (RLE_RESERVED | RLE_ALLOCATED)) == RLE_RESERVED) { KASSERT(!(rman_get_flags(rle->res) & RF_ACTIVE), ("reserved resource is active")); return (0); } return (1); } /** * @brief Determine if a resource entry is reserved. * * Returns true if a resource entry is reserved meaning that it has an * associated "reserved" resource. The resource can either be * allocated or unallocated. * * @param rl the resource list to search * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * * @returns Non-zero if the entry is reserved, zero otherwise. */ int resource_list_reserved(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (rle != NULL && rle->flags & RLE_RESERVED) return (1); return (0); } /** * @brief Find a resource entry by type and rid. * * @param rl the resource list to search * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * * @returns the resource entry pointer or NULL if there is no such * entry. */ struct resource_list_entry * resource_list_find(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle; STAILQ_FOREACH(rle, rl, link) { if (rle->type == type && rle->rid == rid) return (rle); } return (NULL); } /** * @brief Delete a resource entry. * * @param rl the resource list to edit * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier */ void resource_list_delete(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle = resource_list_find(rl, type, rid); if (rle) { if (rle->res != NULL) panic("resource_list_delete: resource has not been released"); STAILQ_REMOVE(rl, rle, resource_list_entry, link); free(rle, M_BUS); } } /** * @brief Allocate a reserved resource * * This can be used by buses to force the allocation of resources * that are always active in the system even if they are not allocated * by a driver (e.g. PCI BARs). This function is usually called when * adding a new child to the bus. The resource is allocated from the * parent bus when it is reserved. The resource list entry is marked * with RLE_RESERVED to note that it is a reserved resource. * * Subsequent attempts to allocate the resource with * resource_list_alloc() will succeed the first time and will set * RLE_ALLOCATED to note that it has been allocated. When a reserved * resource that has been allocated is released with * resource_list_release() the resource RLE_ALLOCATED is cleared, but * the actual resource remains allocated. The resource can be released to * the parent bus by calling resource_list_unreserve(). * * @param rl the resource list to allocate from * @param bus the parent device of @p child * @param child the device for which the resource is being reserved * @param type the type of resource to allocate * @param rid a pointer to the resource identifier * @param start hint at the start of the resource range - pass * @c 0 for any start address * @param end hint at the end of the resource range - pass * @c ~0 for any end address * @param count hint at the size of range required - pass @c 1 * for any size * @param flags any extra flags to control the resource * allocation - see @c RF_XXX flags in * for details * * @returns the resource which was allocated or @c NULL if no * resource could be allocated */ struct resource * resource_list_reserve(struct resource_list *rl, device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); struct resource *r; if (passthrough) panic( "resource_list_reserve() should only be called for direct children"); if (flags & RF_ACTIVE) panic( "resource_list_reserve() should only reserve inactive resources"); r = resource_list_alloc(rl, bus, child, type, rid, start, end, count, flags); if (r != NULL) { rle = resource_list_find(rl, type, *rid); rle->flags |= RLE_RESERVED; } return (r); } /** * @brief Helper function for implementing BUS_ALLOC_RESOURCE() * * Implement BUS_ALLOC_RESOURCE() by looking up a resource from the list * and passing the allocation up to the parent of @p bus. This assumes * that the first entry of @c device_get_ivars(child) is a struct * resource_list. This also handles 'passthrough' allocations where a * child is a remote descendant of bus by passing the allocation up to * the parent of bus. * * Typically, a bus driver would store a list of child resources * somewhere in the child device's ivars (see device_get_ivars()) and * its implementation of BUS_ALLOC_RESOURCE() would find that list and * then call resource_list_alloc() to perform the allocation. * * @param rl the resource list to allocate from * @param bus the parent device of @p child * @param child the device which is requesting an allocation * @param type the type of resource to allocate * @param rid a pointer to the resource identifier * @param start hint at the start of the resource range - pass * @c 0 for any start address * @param end hint at the end of the resource range - pass * @c ~0 for any end address * @param count hint at the size of range required - pass @c 1 * for any size * @param flags any extra flags to control the resource * allocation - see @c RF_XXX flags in * for details * * @returns the resource which was allocated or @c NULL if no * resource could be allocated */ struct resource * resource_list_alloc(struct resource_list *rl, device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); int isdefault = RMAN_IS_DEFAULT_RANGE(start, end); if (passthrough) { return (BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags)); } rle = resource_list_find(rl, type, *rid); if (!rle) return (NULL); /* no resource of that type/rid */ if (rle->res) { if (rle->flags & RLE_RESERVED) { if (rle->flags & RLE_ALLOCATED) return (NULL); if ((flags & RF_ACTIVE) && bus_activate_resource(child, type, *rid, rle->res) != 0) return (NULL); rle->flags |= RLE_ALLOCATED; return (rle->res); } device_printf(bus, "resource entry %#x type %d for child %s is busy\n", *rid, type, device_get_nameunit(child)); return (NULL); } if (isdefault) { start = rle->start; count = ulmax(count, rle->count); end = ulmax(rle->end, start + count - 1); } rle->res = BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags); /* * Record the new range. */ if (rle->res) { rle->start = rman_get_start(rle->res); rle->end = rman_get_end(rle->res); rle->count = count; } return (rle->res); } /** * @brief Helper function for implementing BUS_RELEASE_RESOURCE() * * Implement BUS_RELEASE_RESOURCE() using a resource list. Normally * used with resource_list_alloc(). * * @param rl the resource list which was allocated from * @param bus the parent device of @p child * @param child the device which is requesting a release * @param type the type of resource to release * @param rid the resource identifier * @param res the resource to release * * @retval 0 success * @retval non-zero a standard unix error code indicating what * error condition prevented the operation */ int resource_list_release(struct resource_list *rl, device_t bus, device_t child, int type, int rid, struct resource *res) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); int error; if (passthrough) { return (BUS_RELEASE_RESOURCE(device_get_parent(bus), child, type, rid, res)); } rle = resource_list_find(rl, type, rid); if (!rle) panic("resource_list_release: can't find resource"); if (!rle->res) panic("resource_list_release: resource entry is not busy"); if (rle->flags & RLE_RESERVED) { if (rle->flags & RLE_ALLOCATED) { if (rman_get_flags(res) & RF_ACTIVE) { error = bus_deactivate_resource(child, type, rid, res); if (error) return (error); } rle->flags &= ~RLE_ALLOCATED; return (0); } return (EINVAL); } error = BUS_RELEASE_RESOURCE(device_get_parent(bus), child, type, rid, res); if (error) return (error); rle->res = NULL; return (0); } /** * @brief Release all active resources of a given type * * Release all active resources of a specified type. This is intended * to be used to cleanup resources leaked by a driver after detach or * a failed attach. * * @param rl the resource list which was allocated from * @param bus the parent device of @p child * @param child the device whose active resources are being released * @param type the type of resources to release * * @retval 0 success * @retval EBUSY at least one resource was active */ int resource_list_release_active(struct resource_list *rl, device_t bus, device_t child, int type) { struct resource_list_entry *rle; int error, retval; retval = 0; STAILQ_FOREACH(rle, rl, link) { if (rle->type != type) continue; if (rle->res == NULL) continue; if ((rle->flags & (RLE_RESERVED | RLE_ALLOCATED)) == RLE_RESERVED) continue; retval = EBUSY; error = resource_list_release(rl, bus, child, type, rman_get_rid(rle->res), rle->res); if (error != 0) device_printf(bus, "Failed to release active resource: %d\n", error); } return (retval); } /** * @brief Fully release a reserved resource * * Fully releases a resource reserved via resource_list_reserve(). * * @param rl the resource list which was allocated from * @param bus the parent device of @p child * @param child the device whose reserved resource is being released * @param type the type of resource to release * @param rid the resource identifier * @param res the resource to release * * @retval 0 success * @retval non-zero a standard unix error code indicating what * error condition prevented the operation */ int resource_list_unreserve(struct resource_list *rl, device_t bus, device_t child, int type, int rid) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); if (passthrough) panic( "resource_list_unreserve() should only be called for direct children"); rle = resource_list_find(rl, type, rid); if (!rle) panic("resource_list_unreserve: can't find resource"); if (!(rle->flags & RLE_RESERVED)) return (EINVAL); if (rle->flags & RLE_ALLOCATED) return (EBUSY); rle->flags &= ~RLE_RESERVED; return (resource_list_release(rl, bus, child, type, rid, rle->res)); } /** * @brief Print a description of resources in a resource list * * Print all resources of a specified type, for use in BUS_PRINT_CHILD(). * The name is printed if at least one resource of the given type is available. * The format is used to print resource start and end. * * @param rl the resource list to print * @param name the name of @p type, e.g. @c "memory" * @param type type type of resource entry to print * @param format printf(9) format string to print resource * start and end values * * @returns the number of characters printed */ int resource_list_print_type(struct resource_list *rl, const char *name, int type, const char *format) { struct resource_list_entry *rle; int printed, retval; printed = 0; retval = 0; /* Yes, this is kinda cheating */ STAILQ_FOREACH(rle, rl, link) { if (rle->type == type) { if (printed == 0) retval += printf(" %s ", name); else retval += printf(","); printed++; retval += printf(format, rle->start); if (rle->count > 1) { retval += printf("-"); retval += printf(format, rle->start + rle->count - 1); } } } return (retval); } /** * @brief Releases all the resources in a list. * * @param rl The resource list to purge. * * @returns nothing */ void resource_list_purge(struct resource_list *rl) { struct resource_list_entry *rle; while ((rle = STAILQ_FIRST(rl)) != NULL) { if (rle->res) bus_release_resource(rman_get_device(rle->res), rle->type, rle->rid, rle->res); STAILQ_REMOVE_HEAD(rl, link); free(rle, M_BUS); } } device_t bus_generic_add_child(device_t dev, u_int order, const char *name, int unit) { return (device_add_child_ordered(dev, order, name, unit)); } /** * @brief Helper function for implementing DEVICE_PROBE() * * This function can be used to help implement the DEVICE_PROBE() for * a bus (i.e. a device which has other devices attached to it). It * calls the DEVICE_IDENTIFY() method of each driver in the device's * devclass. */ int bus_generic_probe(device_t dev) { devclass_t dc = dev->devclass; driverlink_t dl; TAILQ_FOREACH(dl, &dc->drivers, link) { /* * If this driver's pass is too high, then ignore it. * For most drivers in the default pass, this will * never be true. For early-pass drivers they will * only call the identify routines of eligible drivers * when this routine is called. Drivers for later * passes should have their identify routines called * on early-pass buses during BUS_NEW_PASS(). */ if (dl->pass > bus_current_pass) continue; DEVICE_IDENTIFY(dl->driver, dev); } return (0); } /** * @brief Helper function for implementing DEVICE_ATTACH() * * This function can be used to help implement the DEVICE_ATTACH() for * a bus. It calls device_probe_and_attach() for each of the device's * children. */ int bus_generic_attach(device_t dev) { device_t child; TAILQ_FOREACH(child, &dev->children, link) { device_probe_and_attach(child); } return (0); } /** * @brief Helper function for delaying attaching children * * Many buses can't run transactions on the bus which children need to probe and * attach until after interrupts and/or timers are running. This function * delays their attach until interrupts and timers are enabled. */ int bus_delayed_attach_children(device_t dev) { /* Probe and attach the bus children when interrupts are available */ config_intrhook_oneshot((ich_func_t)bus_generic_attach, dev); return (0); } /** * @brief Helper function for implementing DEVICE_DETACH() * * This function can be used to help implement the DEVICE_DETACH() for * a bus. It calls device_detach() for each of the device's * children. */ int bus_generic_detach(device_t dev) { device_t child; int error; if (dev->state != DS_ATTACHED) return (EBUSY); /* * Detach children in the reverse order. * See bus_generic_suspend for details. */ TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) { if ((error = device_detach(child)) != 0) return (error); } return (0); } /** * @brief Helper function for implementing DEVICE_SHUTDOWN() * * This function can be used to help implement the DEVICE_SHUTDOWN() * for a bus. It calls device_shutdown() for each of the device's * children. */ int bus_generic_shutdown(device_t dev) { device_t child; /* * Shut down children in the reverse order. * See bus_generic_suspend for details. */ TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) { device_shutdown(child); } return (0); } /** * @brief Default function for suspending a child device. * * This function is to be used by a bus's DEVICE_SUSPEND_CHILD(). */ int bus_generic_suspend_child(device_t dev, device_t child) { int error; error = DEVICE_SUSPEND(child); if (error == 0) { child->flags |= DF_SUSPENDED; } else { printf("DEVICE_SUSPEND(%s) failed: %d\n", device_get_nameunit(child), error); } return (error); } /** * @brief Default function for resuming a child device. * * This function is to be used by a bus's DEVICE_RESUME_CHILD(). */ int bus_generic_resume_child(device_t dev, device_t child) { DEVICE_RESUME(child); child->flags &= ~DF_SUSPENDED; return (0); } /** * @brief Helper function for implementing DEVICE_SUSPEND() * * This function can be used to help implement the DEVICE_SUSPEND() * for a bus. It calls DEVICE_SUSPEND() for each of the device's * children. If any call to DEVICE_SUSPEND() fails, the suspend * operation is aborted and any devices which were suspended are * resumed immediately by calling their DEVICE_RESUME() methods. */ int bus_generic_suspend(device_t dev) { int error; device_t child; /* * Suspend children in the reverse order. * For most buses all children are equal, so the order does not matter. * Other buses, such as acpi, carefully order their child devices to * express implicit dependencies between them. For such buses it is * safer to bring down devices in the reverse order. */ TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) { error = BUS_SUSPEND_CHILD(dev, child); if (error != 0) { child = TAILQ_NEXT(child, link); if (child != NULL) { TAILQ_FOREACH_FROM(child, &dev->children, link) BUS_RESUME_CHILD(dev, child); } return (error); } } return (0); } /** * @brief Helper function for implementing DEVICE_RESUME() * * This function can be used to help implement the DEVICE_RESUME() for * a bus. It calls DEVICE_RESUME() on each of the device's children. */ int bus_generic_resume(device_t dev) { device_t child; TAILQ_FOREACH(child, &dev->children, link) { BUS_RESUME_CHILD(dev, child); /* if resume fails, there's nothing we can usefully do... */ } return (0); } /** * @brief Helper function for implementing BUS_RESET_POST * * Bus can use this function to implement common operations of * re-attaching or resuming the children after the bus itself was * reset, and after restoring bus-unique state of children. * * @param dev The bus * #param flags DEVF_RESET_* */ int bus_helper_reset_post(device_t dev, int flags) { device_t child; int error, error1; error = 0; TAILQ_FOREACH(child, &dev->children,link) { BUS_RESET_POST(dev, child); error1 = (flags & DEVF_RESET_DETACH) != 0 ? device_probe_and_attach(child) : BUS_RESUME_CHILD(dev, child); if (error == 0 && error1 != 0) error = error1; } return (error); } static void bus_helper_reset_prepare_rollback(device_t dev, device_t child, int flags) { child = TAILQ_NEXT(child, link); if (child == NULL) return; TAILQ_FOREACH_FROM(child, &dev->children,link) { BUS_RESET_POST(dev, child); if ((flags & DEVF_RESET_DETACH) != 0) device_probe_and_attach(child); else BUS_RESUME_CHILD(dev, child); } } /** * @brief Helper function for implementing BUS_RESET_PREPARE * * Bus can use this function to implement common operations of * detaching or suspending the children before the bus itself is * reset, and then save bus-unique state of children that must * persists around reset. * * @param dev The bus * #param flags DEVF_RESET_* */ int bus_helper_reset_prepare(device_t dev, int flags) { device_t child; int error; if (dev->state != DS_ATTACHED) return (EBUSY); TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) { if ((flags & DEVF_RESET_DETACH) != 0) { error = device_get_state(child) == DS_ATTACHED ? device_detach(child) : 0; } else { error = BUS_SUSPEND_CHILD(dev, child); } if (error == 0) { error = BUS_RESET_PREPARE(dev, child); if (error != 0) { if ((flags & DEVF_RESET_DETACH) != 0) device_probe_and_attach(child); else BUS_RESUME_CHILD(dev, child); } } if (error != 0) { bus_helper_reset_prepare_rollback(dev, child, flags); return (error); } } return (0); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function prints the first part of the ascii representation of * @p child, including its name, unit and description (if any - see * device_set_desc()). * * @returns the number of characters printed */ int bus_print_child_header(device_t dev, device_t child) { int retval = 0; if (device_get_desc(child)) { retval += device_printf(child, "<%s>", device_get_desc(child)); } else { retval += printf("%s", device_get_nameunit(child)); } return (retval); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function prints the last part of the ascii representation of * @p child, which consists of the string @c " on " followed by the * name and unit of the @p dev. * * @returns the number of characters printed */ int bus_print_child_footer(device_t dev, device_t child) { return (printf(" on %s\n", device_get_nameunit(dev))); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function prints out the VM domain for the given device. * * @returns the number of characters printed */ int bus_print_child_domain(device_t dev, device_t child) { int domain; /* No domain? Don't print anything */ if (BUS_GET_DOMAIN(dev, child, &domain) != 0) return (0); return (printf(" numa-domain %d", domain)); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function simply calls bus_print_child_header() followed by * bus_print_child_footer(). * * @returns the number of characters printed */ int bus_generic_print_child(device_t dev, device_t child) { int retval = 0; retval += bus_print_child_header(dev, child); retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } /** * @brief Stub function for implementing BUS_READ_IVAR(). * * @returns ENOENT */ int bus_generic_read_ivar(device_t dev, device_t child, int index, uintptr_t * result) { return (ENOENT); } /** * @brief Stub function for implementing BUS_WRITE_IVAR(). * * @returns ENOENT */ int bus_generic_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { return (ENOENT); } /** * @brief Helper function for implementing BUS_GET_PROPERTY(). * * This simply calls the BUS_GET_PROPERTY of the parent of dev, * until a non-default implementation is found. */ ssize_t bus_generic_get_property(device_t dev, device_t child, const char *propname, void *propvalue, size_t size, device_property_type_t type) { if (device_get_parent(dev) != NULL) return (BUS_GET_PROPERTY(device_get_parent(dev), child, propname, propvalue, size, type)); return (-1); } /** * @brief Stub function for implementing BUS_GET_RESOURCE_LIST(). * * @returns NULL */ struct resource_list * bus_generic_get_resource_list(device_t dev, device_t child) { return (NULL); } /** * @brief Helper function for implementing BUS_DRIVER_ADDED(). * * This implementation of BUS_DRIVER_ADDED() simply calls the driver's * DEVICE_IDENTIFY() method to allow it to add new children to the bus * and then calls device_probe_and_attach() for each unattached child. */ void bus_generic_driver_added(device_t dev, driver_t *driver) { device_t child; DEVICE_IDENTIFY(driver, dev); TAILQ_FOREACH(child, &dev->children, link) { if (child->state == DS_NOTPRESENT) device_probe_and_attach(child); } } /** * @brief Helper function for implementing BUS_NEW_PASS(). * * This implementing of BUS_NEW_PASS() first calls the identify * routines for any drivers that probe at the current pass. Then it * walks the list of devices for this bus. If a device is already * attached, then it calls BUS_NEW_PASS() on that device. If the * device is not already attached, it attempts to attach a driver to * it. */ void bus_generic_new_pass(device_t dev) { driverlink_t dl; devclass_t dc; device_t child; dc = dev->devclass; TAILQ_FOREACH(dl, &dc->drivers, link) { if (dl->pass == bus_current_pass) DEVICE_IDENTIFY(dl->driver, dev); } TAILQ_FOREACH(child, &dev->children, link) { if (child->state >= DS_ATTACHED) BUS_NEW_PASS(child); else if (child->state == DS_NOTPRESENT) device_probe_and_attach(child); } } /** * @brief Helper function for implementing BUS_SETUP_INTR(). * * This simple implementation of BUS_SETUP_INTR() simply calls the * BUS_SETUP_INTR() method of the parent of @p dev. */ int bus_generic_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_SETUP_INTR(dev->parent, child, irq, flags, filter, intr, arg, cookiep)); return (EINVAL); } /** * @brief Helper function for implementing BUS_TEARDOWN_INTR(). * * This simple implementation of BUS_TEARDOWN_INTR() simply calls the * BUS_TEARDOWN_INTR() method of the parent of @p dev. */ int bus_generic_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_TEARDOWN_INTR(dev->parent, child, irq, cookie)); return (EINVAL); } /** * @brief Helper function for implementing BUS_SUSPEND_INTR(). * * This simple implementation of BUS_SUSPEND_INTR() simply calls the * BUS_SUSPEND_INTR() method of the parent of @p dev. */ int bus_generic_suspend_intr(device_t dev, device_t child, struct resource *irq) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_SUSPEND_INTR(dev->parent, child, irq)); return (EINVAL); } /** * @brief Helper function for implementing BUS_RESUME_INTR(). * * This simple implementation of BUS_RESUME_INTR() simply calls the * BUS_RESUME_INTR() method of the parent of @p dev. */ int bus_generic_resume_intr(device_t dev, device_t child, struct resource *irq) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_RESUME_INTR(dev->parent, child, irq)); return (EINVAL); } /** * @brief Helper function for implementing BUS_ADJUST_RESOURCE(). * * This simple implementation of BUS_ADJUST_RESOURCE() simply calls the * BUS_ADJUST_RESOURCE() method of the parent of @p dev. */ int -bus_generic_adjust_resource(device_t dev, device_t child, int type, - struct resource *r, rman_res_t start, rman_res_t end) +bus_generic_adjust_resource(device_t dev, device_t child, struct resource *r, + rman_res_t start, rman_res_t end) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) - return (BUS_ADJUST_RESOURCE(dev->parent, child, type, r, start, - end)); + return (BUS_ADJUST_RESOURCE(dev->parent, child, r, start, end)); return (EINVAL); } /* * @brief Helper function for implementing BUS_TRANSLATE_RESOURCE(). * * This simple implementation of BUS_TRANSLATE_RESOURCE() simply calls the * BUS_TRANSLATE_RESOURCE() method of the parent of @p dev. If there is no * parent, no translation happens. */ int bus_generic_translate_resource(device_t dev, int type, rman_res_t start, rman_res_t *newstart) { if (dev->parent) return (BUS_TRANSLATE_RESOURCE(dev->parent, type, start, newstart)); *newstart = start; return (0); } /** * @brief Helper function for implementing BUS_ALLOC_RESOURCE(). * * This simple implementation of BUS_ALLOC_RESOURCE() simply calls the * BUS_ALLOC_RESOURCE() method of the parent of @p dev. */ struct resource * bus_generic_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_ALLOC_RESOURCE(dev->parent, child, type, rid, start, end, count, flags)); return (NULL); } /** * @brief Helper function for implementing BUS_RELEASE_RESOURCE(). * * This simple implementation of BUS_RELEASE_RESOURCE() simply calls the * BUS_RELEASE_RESOURCE() method of the parent of @p dev. */ int bus_generic_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_RELEASE_RESOURCE(dev->parent, child, type, rid, r)); return (EINVAL); } /** * @brief Helper function for implementing BUS_ACTIVATE_RESOURCE(). * * This simple implementation of BUS_ACTIVATE_RESOURCE() simply calls the * BUS_ACTIVATE_RESOURCE() method of the parent of @p dev. */ int bus_generic_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_ACTIVATE_RESOURCE(dev->parent, child, type, rid, r)); return (EINVAL); } /** * @brief Helper function for implementing BUS_DEACTIVATE_RESOURCE(). * * This simple implementation of BUS_DEACTIVATE_RESOURCE() simply calls the * BUS_DEACTIVATE_RESOURCE() method of the parent of @p dev. */ int bus_generic_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_DEACTIVATE_RESOURCE(dev->parent, child, type, rid, r)); return (EINVAL); } /** * @brief Helper function for implementing BUS_MAP_RESOURCE(). * * This simple implementation of BUS_MAP_RESOURCE() simply calls the * BUS_MAP_RESOURCE() method of the parent of @p dev. */ int bus_generic_map_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map_request *args, struct resource_map *map) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_MAP_RESOURCE(dev->parent, child, type, r, args, map)); return (EINVAL); } /** * @brief Helper function for implementing BUS_UNMAP_RESOURCE(). * * This simple implementation of BUS_UNMAP_RESOURCE() simply calls the * BUS_UNMAP_RESOURCE() method of the parent of @p dev. */ int bus_generic_unmap_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map *map) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_UNMAP_RESOURCE(dev->parent, child, type, r, map)); return (EINVAL); } /** * @brief Helper function for implementing BUS_BIND_INTR(). * * This simple implementation of BUS_BIND_INTR() simply calls the * BUS_BIND_INTR() method of the parent of @p dev. */ int bus_generic_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_BIND_INTR(dev->parent, child, irq, cpu)); return (EINVAL); } /** * @brief Helper function for implementing BUS_CONFIG_INTR(). * * This simple implementation of BUS_CONFIG_INTR() simply calls the * BUS_CONFIG_INTR() method of the parent of @p dev. */ int bus_generic_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_CONFIG_INTR(dev->parent, irq, trig, pol)); return (EINVAL); } /** * @brief Helper function for implementing BUS_DESCRIBE_INTR(). * * This simple implementation of BUS_DESCRIBE_INTR() simply calls the * BUS_DESCRIBE_INTR() method of the parent of @p dev. */ int bus_generic_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_DESCRIBE_INTR(dev->parent, child, irq, cookie, descr)); return (EINVAL); } /** * @brief Helper function for implementing BUS_GET_CPUS(). * * This simple implementation of BUS_GET_CPUS() simply calls the * BUS_GET_CPUS() method of the parent of @p dev. */ int bus_generic_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent != NULL) return (BUS_GET_CPUS(dev->parent, child, op, setsize, cpuset)); return (EINVAL); } /** * @brief Helper function for implementing BUS_GET_DMA_TAG(). * * This simple implementation of BUS_GET_DMA_TAG() simply calls the * BUS_GET_DMA_TAG() method of the parent of @p dev. */ bus_dma_tag_t bus_generic_get_dma_tag(device_t dev, device_t child) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent != NULL) return (BUS_GET_DMA_TAG(dev->parent, child)); return (NULL); } /** * @brief Helper function for implementing BUS_GET_BUS_TAG(). * * This simple implementation of BUS_GET_BUS_TAG() simply calls the * BUS_GET_BUS_TAG() method of the parent of @p dev. */ bus_space_tag_t bus_generic_get_bus_tag(device_t dev, device_t child) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent != NULL) return (BUS_GET_BUS_TAG(dev->parent, child)); return ((bus_space_tag_t)0); } /** * @brief Helper function for implementing BUS_GET_RESOURCE(). * * This implementation of BUS_GET_RESOURCE() uses the * resource_list_find() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list to * search. */ int bus_generic_rl_get_resource(device_t dev, device_t child, int type, int rid, rman_res_t *startp, rman_res_t *countp) { struct resource_list * rl = NULL; struct resource_list_entry * rle = NULL; rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (EINVAL); rle = resource_list_find(rl, type, rid); if (!rle) return (ENOENT); if (startp) *startp = rle->start; if (countp) *countp = rle->count; return (0); } /** * @brief Helper function for implementing BUS_SET_RESOURCE(). * * This implementation of BUS_SET_RESOURCE() uses the * resource_list_add() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list to * edit. */ int bus_generic_rl_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct resource_list * rl = NULL; rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (EINVAL); resource_list_add(rl, type, rid, start, (start + count - 1), count); return (0); } /** * @brief Helper function for implementing BUS_DELETE_RESOURCE(). * * This implementation of BUS_DELETE_RESOURCE() uses the * resource_list_delete() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list to * edit. */ void bus_generic_rl_delete_resource(device_t dev, device_t child, int type, int rid) { struct resource_list * rl = NULL; rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return; resource_list_delete(rl, type, rid); return; } /** * @brief Helper function for implementing BUS_RELEASE_RESOURCE(). * * This implementation of BUS_RELEASE_RESOURCE() uses the * resource_list_release() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list. */ int bus_generic_rl_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct resource_list * rl = NULL; if (device_get_parent(child) != dev) return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child, type, rid, r)); rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (EINVAL); return (resource_list_release(rl, dev, child, type, rid, r)); } /** * @brief Helper function for implementing BUS_ALLOC_RESOURCE(). * * This implementation of BUS_ALLOC_RESOURCE() uses the * resource_list_alloc() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list. */ struct resource * bus_generic_rl_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list * rl = NULL; if (device_get_parent(child) != dev) return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid, start, end, count, flags)); rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (NULL); return (resource_list_alloc(rl, dev, child, type, rid, start, end, count, flags)); } /** * @brief Helper function for implementing BUS_ALLOC_RESOURCE(). * * This implementation of BUS_ALLOC_RESOURCE() allocates a * resource from a resource manager. It uses BUS_GET_RMAN() * to obtain the resource manager. */ struct resource * bus_generic_rman_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *r; struct rman *rm; rm = BUS_GET_RMAN(dev, type, flags); if (rm == NULL) return (NULL); r = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE, child); if (r == NULL) return (NULL); rman_set_rid(r, *rid); rman_set_type(r, type); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, r) != 0) { rman_release_resource(r); return (NULL); } } return (r); } /** * @brief Helper function for implementing BUS_ADJUST_RESOURCE(). * * This implementation of BUS_ADJUST_RESOURCE() adjusts resources only * if they were allocated from the resource manager returned by * BUS_GET_RMAN(). */ int -bus_generic_rman_adjust_resource(device_t dev, device_t child, int type, +bus_generic_rman_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct rman *rm; - rm = BUS_GET_RMAN(dev, type, rman_get_flags(r)); + rm = BUS_GET_RMAN(dev, rman_get_type(r), rman_get_flags(r)); if (rm == NULL) return (ENXIO); if (!rman_is_region_manager(r, rm)) return (EINVAL); return (rman_adjust_resource(r, start, end)); } /** * @brief Helper function for implementing BUS_RELEASE_RESOURCE(). * * This implementation of BUS_RELEASE_RESOURCE() releases resources * allocated by bus_generic_rman_alloc_resource. */ int bus_generic_rman_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { #ifdef INVARIANTS struct rman *rm; #endif int error; #ifdef INVARIANTS rm = BUS_GET_RMAN(dev, type, rman_get_flags(r)); KASSERT(rman_is_region_manager(r, rm), ("%s: rman %p doesn't match for resource %p", __func__, rm, r)); #endif if (rman_get_flags(r) & RF_ACTIVE) { error = bus_deactivate_resource(child, type, rid, r); if (error != 0) return (error); } return (rman_release_resource(r)); } /** * @brief Helper function for implementing BUS_ACTIVATE_RESOURCE(). * * This implementation of BUS_ACTIVATE_RESOURCE() activates resources * allocated by bus_generic_rman_alloc_resource. */ int bus_generic_rman_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct resource_map map; #ifdef INVARIANTS struct rman *rm; #endif int error; #ifdef INVARIANTS rm = BUS_GET_RMAN(dev, type, rman_get_flags(r)); KASSERT(rman_is_region_manager(r, rm), ("%s: rman %p doesn't match for resource %p", __func__, rm, r)); #endif error = rman_activate_resource(r); if (error != 0) return (error); if ((rman_get_flags(r) & RF_UNMAPPED) == 0 && (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT)) { error = BUS_MAP_RESOURCE(dev, child, type, r, NULL, &map); if (error != 0) { rman_deactivate_resource(r); return (error); } rman_set_mapping(r, &map); } return (0); } /** * @brief Helper function for implementing BUS_DEACTIVATE_RESOURCE(). * * This implementation of BUS_DEACTIVATE_RESOURCE() deactivates * resources allocated by bus_generic_rman_alloc_resource. */ int bus_generic_rman_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct resource_map map; #ifdef INVARIANTS struct rman *rm; #endif int error; #ifdef INVARIANTS rm = BUS_GET_RMAN(dev, type, rman_get_flags(r)); KASSERT(rman_is_region_manager(r, rm), ("%s: rman %p doesn't match for resource %p", __func__, rm, r)); #endif error = rman_deactivate_resource(r); if (error != 0) return (error); if ((rman_get_flags(r) & RF_UNMAPPED) == 0 && (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT)) { rman_get_mapping(r, &map); BUS_UNMAP_RESOURCE(dev, child, type, r, &map); } return (0); } /** * @brief Helper function for implementing BUS_CHILD_PRESENT(). * * This simple implementation of BUS_CHILD_PRESENT() simply calls the * BUS_CHILD_PRESENT() method of the parent of @p dev. */ int bus_generic_child_present(device_t dev, device_t child) { return (BUS_CHILD_PRESENT(device_get_parent(dev), dev)); } /** * @brief Helper function for implementing BUS_GET_DOMAIN(). * * This simple implementation of BUS_GET_DOMAIN() calls the * BUS_GET_DOMAIN() method of the parent of @p dev. If @p dev * does not have a parent, the function fails with ENOENT. */ int bus_generic_get_domain(device_t dev, device_t child, int *domain) { if (dev->parent) return (BUS_GET_DOMAIN(dev->parent, dev, domain)); return (ENOENT); } /** * @brief Helper function to implement normal BUS_GET_DEVICE_PATH() * * This function knows how to (a) pass the request up the tree if there's * a parent and (b) Knows how to supply a FreeBSD locator. * * @param bus bus in the walk up the tree * @param child leaf node to print information about * @param locator BUS_LOCATOR_xxx string for locator * @param sb Buffer to print information into */ int bus_generic_get_device_path(device_t bus, device_t child, const char *locator, struct sbuf *sb) { int rv = 0; device_t parent; /* * We don't recurse on ACPI since either we know the handle for the * device or we don't. And if we're in the generic routine, we don't * have a ACPI override. All other locators build up a path by having * their parents create a path and then adding the path element for this * node. That's why we recurse with parent, bus rather than the typical * parent, child: each spot in the tree is independent of what our child * will do with this path. */ parent = device_get_parent(bus); if (parent != NULL && strcmp(locator, BUS_LOCATOR_ACPI) != 0) { rv = BUS_GET_DEVICE_PATH(parent, bus, locator, sb); } if (strcmp(locator, BUS_LOCATOR_FREEBSD) == 0) { if (rv == 0) { sbuf_printf(sb, "/%s", device_get_nameunit(child)); } return (rv); } /* * Don't know what to do. So assume we do nothing. Not sure that's * the right thing, but keeps us from having a big list here. */ return (0); } /** * @brief Helper function for implementing BUS_RESCAN(). * * This null implementation of BUS_RESCAN() always fails to indicate * the bus does not support rescanning. */ int bus_null_rescan(device_t dev) { return (ENODEV); } /* * Some convenience functions to make it easier for drivers to use the * resource-management functions. All these really do is hide the * indirection through the parent's method table, making for slightly * less-wordy code. In the future, it might make sense for this code * to maintain some sort of a list of resources allocated by each device. */ int bus_alloc_resources(device_t dev, struct resource_spec *rs, struct resource **res) { int i; for (i = 0; rs[i].type != -1; i++) res[i] = NULL; for (i = 0; rs[i].type != -1; i++) { res[i] = bus_alloc_resource_any(dev, rs[i].type, &rs[i].rid, rs[i].flags); if (res[i] == NULL && !(rs[i].flags & RF_OPTIONAL)) { bus_release_resources(dev, rs, res); return (ENXIO); } } return (0); } void bus_release_resources(device_t dev, const struct resource_spec *rs, struct resource **res) { int i; for (i = 0; rs[i].type != -1; i++) if (res[i] != NULL) { bus_release_resource( dev, rs[i].type, rs[i].rid, res[i]); res[i] = NULL; } } /** * @brief Wrapper function for BUS_ALLOC_RESOURCE(). * * This function simply calls the BUS_ALLOC_RESOURCE() method of the * parent of @p dev. */ struct resource * bus_alloc_resource(device_t dev, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; if (dev->parent == NULL) return (NULL); res = BUS_ALLOC_RESOURCE(dev->parent, dev, type, rid, start, end, count, flags); return (res); } /** * @brief Wrapper function for BUS_ADJUST_RESOURCE(). * * This function simply calls the BUS_ADJUST_RESOURCE() method of the * parent of @p dev. */ int -bus_adjust_resource(device_t dev, int type, struct resource *r, rman_res_t start, +bus_adjust_resource(device_t dev, struct resource *r, rman_res_t start, rman_res_t end) { if (dev->parent == NULL) return (EINVAL); - return (BUS_ADJUST_RESOURCE(dev->parent, dev, type, r, start, end)); + return (BUS_ADJUST_RESOURCE(dev->parent, dev, r, start, end)); } int -bus_adjust_resource_new(device_t dev, struct resource *r, rman_res_t start, - rman_res_t end) +bus_adjust_resource_old(device_t dev, int type __unused, struct resource *r, + rman_res_t start, rman_res_t end) { - return (bus_adjust_resource(dev, rman_get_type(r), r, start, end)); + return (bus_adjust_resource(dev, r, start, end)); } /** * @brief Wrapper function for BUS_TRANSLATE_RESOURCE(). * * This function simply calls the BUS_TRANSLATE_RESOURCE() method of the * parent of @p dev. */ int bus_translate_resource(device_t dev, int type, rman_res_t start, rman_res_t *newstart) { if (dev->parent == NULL) return (EINVAL); return (BUS_TRANSLATE_RESOURCE(dev->parent, type, start, newstart)); } /** * @brief Wrapper function for BUS_ACTIVATE_RESOURCE(). * * This function simply calls the BUS_ACTIVATE_RESOURCE() method of the * parent of @p dev. */ int bus_activate_resource(device_t dev, int type, int rid, struct resource *r) { if (dev->parent == NULL) return (EINVAL); return (BUS_ACTIVATE_RESOURCE(dev->parent, dev, type, rid, r)); } int bus_activate_resource_new(device_t dev, struct resource *r) { return (bus_activate_resource(dev, rman_get_type(r), rman_get_rid(r), r)); } /** * @brief Wrapper function for BUS_DEACTIVATE_RESOURCE(). * * This function simply calls the BUS_DEACTIVATE_RESOURCE() method of the * parent of @p dev. */ int bus_deactivate_resource(device_t dev, int type, int rid, struct resource *r) { if (dev->parent == NULL) return (EINVAL); return (BUS_DEACTIVATE_RESOURCE(dev->parent, dev, type, rid, r)); } int bus_deactivate_resource_new(device_t dev, struct resource *r) { return (bus_deactivate_resource(dev, rman_get_type(r), rman_get_rid(r), r)); } /** * @brief Wrapper function for BUS_MAP_RESOURCE(). * * This function simply calls the BUS_MAP_RESOURCE() method of the * parent of @p dev. */ int bus_map_resource(device_t dev, int type, struct resource *r, struct resource_map_request *args, struct resource_map *map) { if (dev->parent == NULL) return (EINVAL); return (BUS_MAP_RESOURCE(dev->parent, dev, type, r, args, map)); } int bus_map_resource_new(device_t dev, struct resource *r, struct resource_map_request *args, struct resource_map *map) { return (bus_map_resource(dev, rman_get_type(r), r, args, map)); } /** * @brief Wrapper function for BUS_UNMAP_RESOURCE(). * * This function simply calls the BUS_UNMAP_RESOURCE() method of the * parent of @p dev. */ int bus_unmap_resource(device_t dev, int type, struct resource *r, struct resource_map *map) { if (dev->parent == NULL) return (EINVAL); return (BUS_UNMAP_RESOURCE(dev->parent, dev, type, r, map)); } int bus_unmap_resource_new(device_t dev, struct resource *r, struct resource_map *map) { return (bus_unmap_resource(dev, rman_get_type(r), r, map)); } /** * @brief Wrapper function for BUS_RELEASE_RESOURCE(). * * This function simply calls the BUS_RELEASE_RESOURCE() method of the * parent of @p dev. */ int bus_release_resource(device_t dev, int type, int rid, struct resource *r) { int rv; if (dev->parent == NULL) return (EINVAL); rv = BUS_RELEASE_RESOURCE(dev->parent, dev, type, rid, r); return (rv); } int bus_release_resource_new(device_t dev, struct resource *r) { return (bus_release_resource(dev, rman_get_type(r), rman_get_rid(r), r)); } /** * @brief Wrapper function for BUS_SETUP_INTR(). * * This function simply calls the BUS_SETUP_INTR() method of the * parent of @p dev. */ int bus_setup_intr(device_t dev, struct resource *r, int flags, driver_filter_t filter, driver_intr_t handler, void *arg, void **cookiep) { int error; if (dev->parent == NULL) return (EINVAL); error = BUS_SETUP_INTR(dev->parent, dev, r, flags, filter, handler, arg, cookiep); if (error != 0) return (error); if (handler != NULL && !(flags & INTR_MPSAFE)) device_printf(dev, "[GIANT-LOCKED]\n"); return (0); } /** * @brief Wrapper function for BUS_TEARDOWN_INTR(). * * This function simply calls the BUS_TEARDOWN_INTR() method of the * parent of @p dev. */ int bus_teardown_intr(device_t dev, struct resource *r, void *cookie) { if (dev->parent == NULL) return (EINVAL); return (BUS_TEARDOWN_INTR(dev->parent, dev, r, cookie)); } /** * @brief Wrapper function for BUS_SUSPEND_INTR(). * * This function simply calls the BUS_SUSPEND_INTR() method of the * parent of @p dev. */ int bus_suspend_intr(device_t dev, struct resource *r) { if (dev->parent == NULL) return (EINVAL); return (BUS_SUSPEND_INTR(dev->parent, dev, r)); } /** * @brief Wrapper function for BUS_RESUME_INTR(). * * This function simply calls the BUS_RESUME_INTR() method of the * parent of @p dev. */ int bus_resume_intr(device_t dev, struct resource *r) { if (dev->parent == NULL) return (EINVAL); return (BUS_RESUME_INTR(dev->parent, dev, r)); } /** * @brief Wrapper function for BUS_BIND_INTR(). * * This function simply calls the BUS_BIND_INTR() method of the * parent of @p dev. */ int bus_bind_intr(device_t dev, struct resource *r, int cpu) { if (dev->parent == NULL) return (EINVAL); return (BUS_BIND_INTR(dev->parent, dev, r, cpu)); } /** * @brief Wrapper function for BUS_DESCRIBE_INTR(). * * This function first formats the requested description into a * temporary buffer and then calls the BUS_DESCRIBE_INTR() method of * the parent of @p dev. */ int bus_describe_intr(device_t dev, struct resource *irq, void *cookie, const char *fmt, ...) { va_list ap; char descr[MAXCOMLEN + 1]; if (dev->parent == NULL) return (EINVAL); va_start(ap, fmt); vsnprintf(descr, sizeof(descr), fmt, ap); va_end(ap); return (BUS_DESCRIBE_INTR(dev->parent, dev, irq, cookie, descr)); } /** * @brief Wrapper function for BUS_SET_RESOURCE(). * * This function simply calls the BUS_SET_RESOURCE() method of the * parent of @p dev. */ int bus_set_resource(device_t dev, int type, int rid, rman_res_t start, rman_res_t count) { return (BUS_SET_RESOURCE(device_get_parent(dev), dev, type, rid, start, count)); } /** * @brief Wrapper function for BUS_GET_RESOURCE(). * * This function simply calls the BUS_GET_RESOURCE() method of the * parent of @p dev. */ int bus_get_resource(device_t dev, int type, int rid, rman_res_t *startp, rman_res_t *countp) { return (BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid, startp, countp)); } /** * @brief Wrapper function for BUS_GET_RESOURCE(). * * This function simply calls the BUS_GET_RESOURCE() method of the * parent of @p dev and returns the start value. */ rman_res_t bus_get_resource_start(device_t dev, int type, int rid) { rman_res_t start; rman_res_t count; int error; error = BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid, &start, &count); if (error) return (0); return (start); } /** * @brief Wrapper function for BUS_GET_RESOURCE(). * * This function simply calls the BUS_GET_RESOURCE() method of the * parent of @p dev and returns the count value. */ rman_res_t bus_get_resource_count(device_t dev, int type, int rid) { rman_res_t start; rman_res_t count; int error; error = BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid, &start, &count); if (error) return (0); return (count); } /** * @brief Wrapper function for BUS_DELETE_RESOURCE(). * * This function simply calls the BUS_DELETE_RESOURCE() method of the * parent of @p dev. */ void bus_delete_resource(device_t dev, int type, int rid) { BUS_DELETE_RESOURCE(device_get_parent(dev), dev, type, rid); } /** * @brief Wrapper function for BUS_CHILD_PRESENT(). * * This function simply calls the BUS_CHILD_PRESENT() method of the * parent of @p dev. */ int bus_child_present(device_t child) { return (BUS_CHILD_PRESENT(device_get_parent(child), child)); } /** * @brief Wrapper function for BUS_CHILD_PNPINFO(). * * This function simply calls the BUS_CHILD_PNPINFO() method of the parent of @p * dev. */ int bus_child_pnpinfo(device_t child, struct sbuf *sb) { device_t parent; parent = device_get_parent(child); if (parent == NULL) return (0); return (BUS_CHILD_PNPINFO(parent, child, sb)); } /** * @brief Generic implementation that does nothing for bus_child_pnpinfo * * This function has the right signature and returns 0 since the sbuf is passed * to us to append to. */ int bus_generic_child_pnpinfo(device_t dev, device_t child, struct sbuf *sb) { return (0); } /** * @brief Wrapper function for BUS_CHILD_LOCATION(). * * This function simply calls the BUS_CHILD_LOCATION() method of the parent of * @p dev. */ int bus_child_location(device_t child, struct sbuf *sb) { device_t parent; parent = device_get_parent(child); if (parent == NULL) return (0); return (BUS_CHILD_LOCATION(parent, child, sb)); } /** * @brief Generic implementation that does nothing for bus_child_location * * This function has the right signature and returns 0 since the sbuf is passed * to us to append to. */ int bus_generic_child_location(device_t dev, device_t child, struct sbuf *sb) { return (0); } /** * @brief Wrapper function for BUS_GET_CPUS(). * * This function simply calls the BUS_GET_CPUS() method of the * parent of @p dev. */ int bus_get_cpus(device_t dev, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { device_t parent; parent = device_get_parent(dev); if (parent == NULL) return (EINVAL); return (BUS_GET_CPUS(parent, dev, op, setsize, cpuset)); } /** * @brief Wrapper function for BUS_GET_DMA_TAG(). * * This function simply calls the BUS_GET_DMA_TAG() method of the * parent of @p dev. */ bus_dma_tag_t bus_get_dma_tag(device_t dev) { device_t parent; parent = device_get_parent(dev); if (parent == NULL) return (NULL); return (BUS_GET_DMA_TAG(parent, dev)); } /** * @brief Wrapper function for BUS_GET_BUS_TAG(). * * This function simply calls the BUS_GET_BUS_TAG() method of the * parent of @p dev. */ bus_space_tag_t bus_get_bus_tag(device_t dev) { device_t parent; parent = device_get_parent(dev); if (parent == NULL) return ((bus_space_tag_t)0); return (BUS_GET_BUS_TAG(parent, dev)); } /** * @brief Wrapper function for BUS_GET_DOMAIN(). * * This function simply calls the BUS_GET_DOMAIN() method of the * parent of @p dev. */ int bus_get_domain(device_t dev, int *domain) { return (BUS_GET_DOMAIN(device_get_parent(dev), dev, domain)); } /* Resume all devices and then notify userland that we're up again. */ static int root_resume(device_t dev) { int error; error = bus_generic_resume(dev); if (error == 0) { devctl_notify("kernel", "power", "resume", NULL); } return (error); } static int root_print_child(device_t dev, device_t child) { int retval = 0; retval += bus_print_child_header(dev, child); retval += printf("\n"); return (retval); } static int root_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { /* * If an interrupt mapping gets to here something bad has happened. */ panic("root_setup_intr"); } /* * If we get here, assume that the device is permanent and really is * present in the system. Removable bus drivers are expected to intercept * this call long before it gets here. We return -1 so that drivers that * really care can check vs -1 or some ERRNO returned higher in the food * chain. */ static int root_child_present(device_t dev, device_t child) { return (-1); } static int root_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { switch (op) { case INTR_CPUS: /* Default to returning the set of all CPUs. */ if (setsize != sizeof(cpuset_t)) return (EINVAL); *cpuset = all_cpus; return (0); default: return (EINVAL); } } static kobj_method_t root_methods[] = { /* Device interface */ KOBJMETHOD(device_shutdown, bus_generic_shutdown), KOBJMETHOD(device_suspend, bus_generic_suspend), KOBJMETHOD(device_resume, root_resume), /* Bus interface */ KOBJMETHOD(bus_print_child, root_print_child), KOBJMETHOD(bus_read_ivar, bus_generic_read_ivar), KOBJMETHOD(bus_write_ivar, bus_generic_write_ivar), KOBJMETHOD(bus_setup_intr, root_setup_intr), KOBJMETHOD(bus_child_present, root_child_present), KOBJMETHOD(bus_get_cpus, root_get_cpus), KOBJMETHOD_END }; static driver_t root_driver = { "root", root_methods, 1, /* no softc */ }; device_t root_bus; devclass_t root_devclass; static int root_bus_module_handler(module_t mod, int what, void* arg) { switch (what) { case MOD_LOAD: TAILQ_INIT(&bus_data_devices); kobj_class_compile((kobj_class_t) &root_driver); root_bus = make_device(NULL, "root", 0); root_bus->desc = "System root bus"; kobj_init((kobj_t) root_bus, (kobj_class_t) &root_driver); root_bus->driver = &root_driver; root_bus->state = DS_ATTACHED; root_devclass = devclass_find_internal("root", NULL, FALSE); devctl2_init(); return (0); case MOD_SHUTDOWN: device_shutdown(root_bus); return (0); default: return (EOPNOTSUPP); } return (0); } static moduledata_t root_bus_mod = { "rootbus", root_bus_module_handler, NULL }; DECLARE_MODULE(rootbus, root_bus_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); /** * @brief Automatically configure devices * * This function begins the autoconfiguration process by calling * device_probe_and_attach() for each child of the @c root0 device. */ void root_bus_configure(void) { PDEBUG((".")); /* Eventually this will be split up, but this is sufficient for now. */ bus_set_pass(BUS_PASS_DEFAULT); } /** * @brief Module handler for registering device drivers * * This module handler is used to automatically register device * drivers when modules are loaded. If @p what is MOD_LOAD, it calls * devclass_add_driver() for the driver described by the * driver_module_data structure pointed to by @p arg */ int driver_module_handler(module_t mod, int what, void *arg) { struct driver_module_data *dmd; devclass_t bus_devclass; kobj_class_t driver; int error, pass; dmd = (struct driver_module_data *)arg; bus_devclass = devclass_find_internal(dmd->dmd_busname, NULL, TRUE); error = 0; switch (what) { case MOD_LOAD: if (dmd->dmd_chainevh) error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg); pass = dmd->dmd_pass; driver = dmd->dmd_driver; PDEBUG(("Loading module: driver %s on bus %s (pass %d)", DRIVERNAME(driver), dmd->dmd_busname, pass)); error = devclass_add_driver(bus_devclass, driver, pass, dmd->dmd_devclass); break; case MOD_UNLOAD: PDEBUG(("Unloading module: driver %s from bus %s", DRIVERNAME(dmd->dmd_driver), dmd->dmd_busname)); error = devclass_delete_driver(bus_devclass, dmd->dmd_driver); if (!error && dmd->dmd_chainevh) error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg); break; case MOD_QUIESCE: PDEBUG(("Quiesce module: driver %s from bus %s", DRIVERNAME(dmd->dmd_driver), dmd->dmd_busname)); error = devclass_quiesce_driver(bus_devclass, dmd->dmd_driver); if (!error && dmd->dmd_chainevh) error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg); break; default: error = EOPNOTSUPP; break; } return (error); } /** * @brief Enumerate all hinted devices for this bus. * * Walks through the hints for this bus and calls the bus_hinted_child * routine for each one it fines. It searches first for the specific * bus that's being probed for hinted children (eg isa0), and then for * generic children (eg isa). * * @param dev bus device to enumerate */ void bus_enumerate_hinted_children(device_t bus) { int i; const char *dname, *busname; int dunit; /* * enumerate all devices on the specific bus */ busname = device_get_nameunit(bus); i = 0; while (resource_find_match(&i, &dname, &dunit, "at", busname) == 0) BUS_HINTED_CHILD(bus, dname, dunit); /* * and all the generic ones. */ busname = device_get_name(bus); i = 0; while (resource_find_match(&i, &dname, &dunit, "at", busname) == 0) BUS_HINTED_CHILD(bus, dname, dunit); } #ifdef BUS_DEBUG /* the _short versions avoid iteration by not calling anything that prints * more than oneliners. I love oneliners. */ static void print_device_short(device_t dev, int indent) { if (!dev) return; indentprintf(("device %d: <%s> %sparent,%schildren,%s%s%s%s%s,%sivars,%ssoftc,busy=%d\n", dev->unit, dev->desc, (dev->parent? "":"no "), (TAILQ_EMPTY(&dev->children)? "no ":""), (dev->flags&DF_ENABLED? "enabled,":"disabled,"), (dev->flags&DF_FIXEDCLASS? "fixed,":""), (dev->flags&DF_WILDCARD? "wildcard,":""), (dev->flags&DF_DESCMALLOCED? "descmalloced,":""), (dev->flags&DF_SUSPENDED? "suspended,":""), (dev->ivars? "":"no "), (dev->softc? "":"no "), dev->busy)); } static void print_device(device_t dev, int indent) { if (!dev) return; print_device_short(dev, indent); indentprintf(("Parent:\n")); print_device_short(dev->parent, indent+1); indentprintf(("Driver:\n")); print_driver_short(dev->driver, indent+1); indentprintf(("Devclass:\n")); print_devclass_short(dev->devclass, indent+1); } void print_device_tree_short(device_t dev, int indent) /* print the device and all its children (indented) */ { device_t child; if (!dev) return; print_device_short(dev, indent); TAILQ_FOREACH(child, &dev->children, link) { print_device_tree_short(child, indent+1); } } void print_device_tree(device_t dev, int indent) /* print the device and all its children (indented) */ { device_t child; if (!dev) return; print_device(dev, indent); TAILQ_FOREACH(child, &dev->children, link) { print_device_tree(child, indent+1); } } static void print_driver_short(driver_t *driver, int indent) { if (!driver) return; indentprintf(("driver %s: softc size = %zd\n", driver->name, driver->size)); } static void print_driver(driver_t *driver, int indent) { if (!driver) return; print_driver_short(driver, indent); } static void print_driver_list(driver_list_t drivers, int indent) { driverlink_t driver; TAILQ_FOREACH(driver, &drivers, link) { print_driver(driver->driver, indent); } } static void print_devclass_short(devclass_t dc, int indent) { if ( !dc ) return; indentprintf(("devclass %s: max units = %d\n", dc->name, dc->maxunit)); } static void print_devclass(devclass_t dc, int indent) { int i; if ( !dc ) return; print_devclass_short(dc, indent); indentprintf(("Drivers:\n")); print_driver_list(dc->drivers, indent+1); indentprintf(("Devices:\n")); for (i = 0; i < dc->maxunit; i++) if (dc->devices[i]) print_device(dc->devices[i], indent+1); } void print_devclass_list_short(void) { devclass_t dc; printf("Short listing of devclasses, drivers & devices:\n"); TAILQ_FOREACH(dc, &devclasses, link) { print_devclass_short(dc, 0); } } void print_devclass_list(void) { devclass_t dc; printf("Full listing of devclasses, drivers & devices:\n"); TAILQ_FOREACH(dc, &devclasses, link) { print_devclass(dc, 0); } } #endif /* * User-space access to the device tree. * * We implement a small set of nodes: * * hw.bus Single integer read method to obtain the * current generation count. * hw.bus.devices Reads the entire device tree in flat space. * hw.bus.rman Resource manager interface * * We might like to add the ability to scan devclasses and/or drivers to * determine what else is currently loaded/available. */ static int sysctl_bus_info(SYSCTL_HANDLER_ARGS) { struct u_businfo ubus; ubus.ub_version = BUS_USER_VERSION; ubus.ub_generation = bus_data_generation; return (SYSCTL_OUT(req, &ubus, sizeof(ubus))); } SYSCTL_PROC(_hw_bus, OID_AUTO, info, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, sysctl_bus_info, "S,u_businfo", "bus-related data"); static int sysctl_devices(SYSCTL_HANDLER_ARGS) { struct sbuf sb; int *name = (int *)arg1; u_int namelen = arg2; int index; device_t dev; struct u_device *udev; int error; if (namelen != 2) return (EINVAL); if (bus_data_generation_check(name[0])) return (EINVAL); index = name[1]; /* * Scan the list of devices, looking for the requested index. */ TAILQ_FOREACH(dev, &bus_data_devices, devlink) { if (index-- == 0) break; } if (dev == NULL) return (ENOENT); /* * Populate the return item, careful not to overflow the buffer. */ udev = malloc(sizeof(*udev), M_BUS, M_WAITOK | M_ZERO); if (udev == NULL) return (ENOMEM); udev->dv_handle = (uintptr_t)dev; udev->dv_parent = (uintptr_t)dev->parent; udev->dv_devflags = dev->devflags; udev->dv_flags = dev->flags; udev->dv_state = dev->state; sbuf_new(&sb, udev->dv_fields, sizeof(udev->dv_fields), SBUF_FIXEDLEN); if (dev->nameunit != NULL) sbuf_cat(&sb, dev->nameunit); sbuf_putc(&sb, '\0'); if (dev->desc != NULL) sbuf_cat(&sb, dev->desc); sbuf_putc(&sb, '\0'); if (dev->driver != NULL) sbuf_cat(&sb, dev->driver->name); sbuf_putc(&sb, '\0'); bus_child_pnpinfo(dev, &sb); sbuf_putc(&sb, '\0'); bus_child_location(dev, &sb); sbuf_putc(&sb, '\0'); error = sbuf_finish(&sb); if (error == 0) error = SYSCTL_OUT(req, udev, sizeof(*udev)); sbuf_delete(&sb); free(udev, M_BUS); return (error); } SYSCTL_NODE(_hw_bus, OID_AUTO, devices, CTLFLAG_RD | CTLFLAG_NEEDGIANT, sysctl_devices, "system device tree"); int bus_data_generation_check(int generation) { if (generation != bus_data_generation) return (1); /* XXX generate optimised lists here? */ return (0); } void bus_data_generation_update(void) { atomic_add_int(&bus_data_generation, 1); } int bus_free_resource(device_t dev, int type, struct resource *r) { if (r == NULL) return (0); return (bus_release_resource(dev, type, rman_get_rid(r), r)); } device_t device_lookup_by_name(const char *name) { device_t dev; TAILQ_FOREACH(dev, &bus_data_devices, devlink) { if (dev->nameunit != NULL && strcmp(dev->nameunit, name) == 0) return (dev); } return (NULL); } /* * /dev/devctl2 implementation. The existing /dev/devctl device has * implicit semantics on open, so it could not be reused for this. * Another option would be to call this /dev/bus? */ static int find_device(struct devreq *req, device_t *devp) { device_t dev; /* * First, ensure that the name is nul terminated. */ if (memchr(req->dr_name, '\0', sizeof(req->dr_name)) == NULL) return (EINVAL); /* * Second, try to find an attached device whose name matches * 'name'. */ dev = device_lookup_by_name(req->dr_name); if (dev != NULL) { *devp = dev; return (0); } /* Finally, give device enumerators a chance. */ dev = NULL; EVENTHANDLER_DIRECT_INVOKE(dev_lookup, req->dr_name, &dev); if (dev == NULL) return (ENOENT); *devp = dev; return (0); } static bool driver_exists(device_t bus, const char *driver) { devclass_t dc; for (dc = bus->devclass; dc != NULL; dc = dc->parent) { if (devclass_find_driver_internal(dc, driver) != NULL) return (true); } return (false); } static void device_gen_nomatch(device_t dev) { device_t child; if (dev->flags & DF_NEEDNOMATCH && dev->state == DS_NOTPRESENT) { device_handle_nomatch(dev); } dev->flags &= ~DF_NEEDNOMATCH; TAILQ_FOREACH(child, &dev->children, link) { device_gen_nomatch(child); } } static void device_do_deferred_actions(void) { devclass_t dc; driverlink_t dl; /* * Walk through the devclasses to find all the drivers we've tagged as * deferred during the freeze and call the driver added routines. They * have already been added to the lists in the background, so the driver * added routines that trigger a probe will have all the right bidders * for the probe auction. */ TAILQ_FOREACH(dc, &devclasses, link) { TAILQ_FOREACH(dl, &dc->drivers, link) { if (dl->flags & DL_DEFERRED_PROBE) { devclass_driver_added(dc, dl->driver); dl->flags &= ~DL_DEFERRED_PROBE; } } } /* * We also defer no-match events during a freeze. Walk the tree and * generate all the pent-up events that are still relevant. */ device_gen_nomatch(root_bus); bus_data_generation_update(); } static int device_get_path(device_t dev, const char *locator, struct sbuf *sb) { device_t parent; int error; KASSERT(sb != NULL, ("sb is NULL")); parent = device_get_parent(dev); if (parent == NULL) { error = sbuf_putc(sb, '/'); } else { error = BUS_GET_DEVICE_PATH(parent, dev, locator, sb); if (error == 0) { error = sbuf_error(sb); if (error == 0 && sbuf_len(sb) <= 1) error = EIO; } } sbuf_finish(sb); return (error); } static int devctl2_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct devreq *req; device_t dev; int error, old; /* Locate the device to control. */ bus_topo_lock(); req = (struct devreq *)data; switch (cmd) { case DEV_ATTACH: case DEV_DETACH: case DEV_ENABLE: case DEV_DISABLE: case DEV_SUSPEND: case DEV_RESUME: case DEV_SET_DRIVER: case DEV_CLEAR_DRIVER: case DEV_RESCAN: case DEV_DELETE: case DEV_RESET: error = priv_check(td, PRIV_DRIVER); if (error == 0) error = find_device(req, &dev); break; case DEV_FREEZE: case DEV_THAW: error = priv_check(td, PRIV_DRIVER); break; case DEV_GET_PATH: error = find_device(req, &dev); break; default: error = ENOTTY; break; } if (error) { bus_topo_unlock(); return (error); } /* Perform the requested operation. */ switch (cmd) { case DEV_ATTACH: if (device_is_attached(dev)) error = EBUSY; else if (!device_is_enabled(dev)) error = ENXIO; else error = device_probe_and_attach(dev); break; case DEV_DETACH: if (!device_is_attached(dev)) { error = ENXIO; break; } if (!(req->dr_flags & DEVF_FORCE_DETACH)) { error = device_quiesce(dev); if (error) break; } error = device_detach(dev); break; case DEV_ENABLE: if (device_is_enabled(dev)) { error = EBUSY; break; } /* * If the device has been probed but not attached (e.g. * when it has been disabled by a loader hint), just * attach the device rather than doing a full probe. */ device_enable(dev); if (device_is_alive(dev)) { /* * If the device was disabled via a hint, clear * the hint. */ if (resource_disabled(dev->driver->name, dev->unit)) resource_unset_value(dev->driver->name, dev->unit, "disabled"); error = device_attach(dev); } else error = device_probe_and_attach(dev); break; case DEV_DISABLE: if (!device_is_enabled(dev)) { error = ENXIO; break; } if (!(req->dr_flags & DEVF_FORCE_DETACH)) { error = device_quiesce(dev); if (error) break; } /* * Force DF_FIXEDCLASS on around detach to preserve * the existing name. */ old = dev->flags; dev->flags |= DF_FIXEDCLASS; error = device_detach(dev); if (!(old & DF_FIXEDCLASS)) dev->flags &= ~DF_FIXEDCLASS; if (error == 0) device_disable(dev); break; case DEV_SUSPEND: if (device_is_suspended(dev)) { error = EBUSY; break; } if (device_get_parent(dev) == NULL) { error = EINVAL; break; } error = BUS_SUSPEND_CHILD(device_get_parent(dev), dev); break; case DEV_RESUME: if (!device_is_suspended(dev)) { error = EINVAL; break; } if (device_get_parent(dev) == NULL) { error = EINVAL; break; } error = BUS_RESUME_CHILD(device_get_parent(dev), dev); break; case DEV_SET_DRIVER: { devclass_t dc; char driver[128]; error = copyinstr(req->dr_data, driver, sizeof(driver), NULL); if (error) break; if (driver[0] == '\0') { error = EINVAL; break; } if (dev->devclass != NULL && strcmp(driver, dev->devclass->name) == 0) /* XXX: Could possibly force DF_FIXEDCLASS on? */ break; /* * Scan drivers for this device's bus looking for at * least one matching driver. */ if (dev->parent == NULL) { error = EINVAL; break; } if (!driver_exists(dev->parent, driver)) { error = ENOENT; break; } dc = devclass_create(driver); if (dc == NULL) { error = ENOMEM; break; } /* Detach device if necessary. */ if (device_is_attached(dev)) { if (req->dr_flags & DEVF_SET_DRIVER_DETACH) error = device_detach(dev); else error = EBUSY; if (error) break; } /* Clear any previously-fixed device class and unit. */ if (dev->flags & DF_FIXEDCLASS) devclass_delete_device(dev->devclass, dev); dev->flags |= DF_WILDCARD; dev->unit = -1; /* Force the new device class. */ error = devclass_add_device(dc, dev); if (error) break; dev->flags |= DF_FIXEDCLASS; error = device_probe_and_attach(dev); break; } case DEV_CLEAR_DRIVER: if (!(dev->flags & DF_FIXEDCLASS)) { error = 0; break; } if (device_is_attached(dev)) { if (req->dr_flags & DEVF_CLEAR_DRIVER_DETACH) error = device_detach(dev); else error = EBUSY; if (error) break; } dev->flags &= ~DF_FIXEDCLASS; dev->flags |= DF_WILDCARD; devclass_delete_device(dev->devclass, dev); error = device_probe_and_attach(dev); break; case DEV_RESCAN: if (!device_is_attached(dev)) { error = ENXIO; break; } error = BUS_RESCAN(dev); break; case DEV_DELETE: { device_t parent; parent = device_get_parent(dev); if (parent == NULL) { error = EINVAL; break; } if (!(req->dr_flags & DEVF_FORCE_DELETE)) { if (bus_child_present(dev) != 0) { error = EBUSY; break; } } error = device_delete_child(parent, dev); break; } case DEV_FREEZE: if (device_frozen) error = EBUSY; else device_frozen = true; break; case DEV_THAW: if (!device_frozen) error = EBUSY; else { device_do_deferred_actions(); device_frozen = false; } break; case DEV_RESET: if ((req->dr_flags & ~(DEVF_RESET_DETACH)) != 0) { error = EINVAL; break; } error = BUS_RESET_CHILD(device_get_parent(dev), dev, req->dr_flags); break; case DEV_GET_PATH: { struct sbuf *sb; char locator[64]; ssize_t len; error = copyinstr(req->dr_buffer.buffer, locator, sizeof(locator), NULL); if (error != 0) break; sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND | SBUF_INCLUDENUL /* | SBUF_WAITOK */); error = device_get_path(dev, locator, sb); if (error == 0) { len = sbuf_len(sb); if (req->dr_buffer.length < len) { error = ENAMETOOLONG; } else { error = copyout(sbuf_data(sb), req->dr_buffer.buffer, len); } req->dr_buffer.length = len; } sbuf_delete(sb); break; } } bus_topo_unlock(); return (error); } static struct cdevsw devctl2_cdevsw = { .d_version = D_VERSION, .d_ioctl = devctl2_ioctl, .d_name = "devctl2", }; static void devctl2_init(void) { make_dev_credf(MAKEDEV_ETERNAL, &devctl2_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0644, "devctl2"); } /* * For maintaining device 'at' location info to avoid recomputing it */ struct device_location_node { const char *dln_locator; const char *dln_path; TAILQ_ENTRY(device_location_node) dln_link; }; typedef TAILQ_HEAD(device_location_list, device_location_node) device_location_list_t; struct device_location_cache { device_location_list_t dlc_list; }; /* * Location cache for wired devices. */ device_location_cache_t * dev_wired_cache_init(void) { device_location_cache_t *dcp; dcp = malloc(sizeof(*dcp), M_BUS, M_WAITOK | M_ZERO); TAILQ_INIT(&dcp->dlc_list); return (dcp); } void dev_wired_cache_fini(device_location_cache_t *dcp) { struct device_location_node *dln, *tdln; TAILQ_FOREACH_SAFE(dln, &dcp->dlc_list, dln_link, tdln) { free(dln, M_BUS); } free(dcp, M_BUS); } static struct device_location_node * dev_wired_cache_lookup(device_location_cache_t *dcp, const char *locator) { struct device_location_node *dln; TAILQ_FOREACH(dln, &dcp->dlc_list, dln_link) { if (strcmp(locator, dln->dln_locator) == 0) return (dln); } return (NULL); } static struct device_location_node * dev_wired_cache_add(device_location_cache_t *dcp, const char *locator, const char *path) { struct device_location_node *dln; size_t loclen, pathlen; loclen = strlen(locator) + 1; pathlen = strlen(path) + 1; dln = malloc(sizeof(*dln) + loclen + pathlen, M_BUS, M_WAITOK | M_ZERO); dln->dln_locator = (char *)(dln + 1); memcpy(__DECONST(char *, dln->dln_locator), locator, loclen); dln->dln_path = dln->dln_locator + loclen; memcpy(__DECONST(char *, dln->dln_path), path, pathlen); TAILQ_INSERT_HEAD(&dcp->dlc_list, dln, dln_link); return (dln); } bool dev_wired_cache_match(device_location_cache_t *dcp, device_t dev, const char *at) { struct sbuf *sb; const char *cp; char locator[32]; int error, len; struct device_location_node *res; cp = strchr(at, ':'); if (cp == NULL) return (false); len = cp - at; if (len > sizeof(locator) - 1) /* Skip too long locator */ return (false); memcpy(locator, at, len); locator[len] = '\0'; cp++; error = 0; /* maybe cache this inside device_t and look that up, but not yet */ res = dev_wired_cache_lookup(dcp, locator); if (res == NULL) { sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND | SBUF_INCLUDENUL | SBUF_NOWAIT); if (sb != NULL) { error = device_get_path(dev, locator, sb); if (error == 0) { res = dev_wired_cache_add(dcp, locator, sbuf_data(sb)); } sbuf_delete(sb); } } if (error != 0 || res == NULL || res->dln_path == NULL) return (false); return (strcmp(res->dln_path, cp) == 0); } /* * APIs to manage deprecation and obsolescence. */ static int obsolete_panic = 0; SYSCTL_INT(_debug, OID_AUTO, obsolete_panic, CTLFLAG_RWTUN, &obsolete_panic, 0, "Panic when obsolete features are used (0 = never, 1 = if obsolete, " "2 = if deprecated)"); static void gone_panic(int major, int running, const char *msg) { switch (obsolete_panic) { case 0: return; case 1: if (running < major) return; /* FALLTHROUGH */ default: panic("%s", msg); } } void _gone_in(int major, const char *msg) { gone_panic(major, P_OSREL_MAJOR(__FreeBSD_version), msg); if (P_OSREL_MAJOR(__FreeBSD_version) >= major) printf("Obsolete code will be removed soon: %s\n", msg); else printf("Deprecated code (to be removed in FreeBSD %d): %s\n", major, msg); } void _gone_in_dev(device_t dev, int major, const char *msg) { gone_panic(major, P_OSREL_MAJOR(__FreeBSD_version), msg); if (P_OSREL_MAJOR(__FreeBSD_version) >= major) device_printf(dev, "Obsolete code will be removed soon: %s\n", msg); else device_printf(dev, "Deprecated code (to be removed in FreeBSD %d): %s\n", major, msg); } #ifdef DDB DB_SHOW_COMMAND(device, db_show_device) { device_t dev; if (!have_addr) return; dev = (device_t)addr; db_printf("name: %s\n", device_get_nameunit(dev)); db_printf(" driver: %s\n", DRIVERNAME(dev->driver)); db_printf(" class: %s\n", DEVCLANAME(dev->devclass)); db_printf(" addr: %p\n", dev); db_printf(" parent: %p\n", dev->parent); db_printf(" softc: %p\n", dev->softc); db_printf(" ivars: %p\n", dev->ivars); } DB_SHOW_ALL_COMMAND(devices, db_show_all_devices) { device_t dev; TAILQ_FOREACH(dev, &bus_data_devices, devlink) { db_show_device((db_expr_t)dev, true, count, modif); } } #endif diff --git a/sys/powerpc/mpc85xx/lbc.c b/sys/powerpc/mpc85xx/lbc.c index f6f38f22beb6..afac89b7597a 100644 --- a/sys/powerpc/mpc85xx/lbc.c +++ b/sys/powerpc/mpc85xx/lbc.c @@ -1,943 +1,939 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2006-2008, Juniper Networks, Inc. * Copyright (c) 2008 Semihalf, Rafal Czubak * Copyright (c) 2009 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Semihalf * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ofw_bus_if.h" #include "lbc.h" #ifdef DEBUG #define debugf(fmt, args...) do { printf("%s(): ", __func__); \ printf(fmt,##args); } while (0) #else #define debugf(fmt, args...) #endif static MALLOC_DEFINE(M_LBC, "localbus", "localbus devices information"); static int lbc_probe(device_t); static int lbc_attach(device_t); static int lbc_shutdown(device_t); static int lbc_map_resource(device_t, device_t, int, struct resource *, struct resource_map_request *, struct resource_map *); static int lbc_unmap_resource(device_t, device_t, int, struct resource *, struct resource_map *map); static int lbc_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r); static int lbc_deactivate_resource(device_t bus, device_t child, int type __unused, int rid, struct resource *r); static struct rman *lbc_get_rman(device_t, int, u_int); static struct resource *lbc_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); -static int lbc_adjust_resource(device_t, device_t, int, struct resource *, +static int lbc_adjust_resource(device_t, device_t, struct resource *, rman_res_t, rman_res_t); static int lbc_print_child(device_t, device_t); static int lbc_release_resource(device_t, device_t, int, int, struct resource *); static const struct ofw_bus_devinfo *lbc_get_devinfo(device_t, device_t); /* * Bus interface definition */ static device_method_t lbc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, lbc_probe), DEVMETHOD(device_attach, lbc_attach), DEVMETHOD(device_shutdown, lbc_shutdown), /* Bus interface */ DEVMETHOD(bus_print_child, lbc_print_child), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, NULL), DEVMETHOD(bus_get_rman, lbc_get_rman), DEVMETHOD(bus_alloc_resource, lbc_alloc_resource), DEVMETHOD(bus_adjust_resource, lbc_adjust_resource), DEVMETHOD(bus_release_resource, lbc_release_resource), DEVMETHOD(bus_activate_resource, lbc_activate_resource), DEVMETHOD(bus_deactivate_resource, lbc_deactivate_resource), DEVMETHOD(bus_map_resource, lbc_map_resource), DEVMETHOD(bus_unmap_resource, lbc_unmap_resource), /* OFW bus interface */ DEVMETHOD(ofw_bus_get_devinfo, lbc_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), { 0, 0 } }; static driver_t lbc_driver = { "lbc", lbc_methods, sizeof(struct lbc_softc) }; EARLY_DRIVER_MODULE(lbc, ofwbus, lbc_driver, 0, 0, BUS_PASS_BUS); /* * Calculate address mask used by OR(n) registers. Use memory region size to * determine mask value. The size must be a power of two and within the range * of 32KB - 4GB. Otherwise error code is returned. Value representing * 4GB size can be passed as 0xffffffff. */ static uint32_t lbc_address_mask(uint32_t size) { int n = 15; if (size == ~0) return (0); while (n < 32) { if (size == (1U << n)) break; n++; } if (n == 32) return (EINVAL); return (0xffff8000 << (n - 15)); } static void lbc_banks_unmap(struct lbc_softc *sc) { int r; r = 0; while (r < LBC_DEV_MAX) { if (sc->sc_range[r].size == 0) return; pmap_unmapdev((void *)sc->sc_range[r].kva, sc->sc_range[r].size); law_disable(OCP85XX_TGTIF_LBC, sc->sc_range[r].addr, sc->sc_range[r].size); r++; } } static int lbc_banks_map(struct lbc_softc *sc) { vm_paddr_t end, start; vm_size_t size; u_int i, r, ranges, s; int error; bzero(sc->sc_range, sizeof(sc->sc_range)); /* * Determine number of discontiguous address ranges to program. */ ranges = 0; for (i = 0; i < LBC_DEV_MAX; i++) { size = sc->sc_banks[i].size; if (size == 0) continue; start = sc->sc_banks[i].addr; for (r = 0; r < ranges; r++) { /* Avoid wrap-around bugs. */ end = sc->sc_range[r].addr - 1 + sc->sc_range[r].size; if (start > 0 && end == start - 1) { sc->sc_range[r].size += size; break; } /* Avoid wrap-around bugs. */ end = start - 1 + size; if (sc->sc_range[r].addr > 0 && end == sc->sc_range[r].addr - 1) { sc->sc_range[r].addr = start; sc->sc_range[r].size += size; break; } } if (r == ranges) { /* New range; add using insertion sort */ r = 0; while (r < ranges && sc->sc_range[r].addr < start) r++; for (s = ranges; s > r; s--) sc->sc_range[s] = sc->sc_range[s-1]; sc->sc_range[r].addr = start; sc->sc_range[r].size = size; ranges++; } } /* * Ranges are sorted so quickly go over the list to merge ranges * that grew toward each other while building the ranges. */ r = 0; while (r < ranges - 1) { end = sc->sc_range[r].addr + sc->sc_range[r].size; if (end != sc->sc_range[r+1].addr) { r++; continue; } sc->sc_range[r].size += sc->sc_range[r+1].size; for (s = r + 1; s < ranges - 1; s++) sc->sc_range[s] = sc->sc_range[s+1]; bzero(&sc->sc_range[s], sizeof(sc->sc_range[s])); ranges--; } /* * Configure LAW for the LBC ranges and map the physical memory * range into KVA. */ for (r = 0; r < ranges; r++) { start = sc->sc_range[r].addr; size = sc->sc_range[r].size; error = law_enable(OCP85XX_TGTIF_LBC, start, size); if (error) return (error); sc->sc_range[r].kva = (vm_offset_t)pmap_mapdev(start, size); } /* XXX: need something better here? */ if (ranges == 0) return (EINVAL); /* Assign KVA to banks based on the enclosing range. */ for (i = 0; i < LBC_DEV_MAX; i++) { size = sc->sc_banks[i].size; if (size == 0) continue; start = sc->sc_banks[i].addr; for (r = 0; r < ranges; r++) { end = sc->sc_range[r].addr - 1 + sc->sc_range[r].size; if (start >= sc->sc_range[r].addr && start - 1 + size <= end) break; } if (r < ranges) { sc->sc_banks[i].kva = sc->sc_range[r].kva + (start - sc->sc_range[r].addr); } } return (0); } static int lbc_banks_enable(struct lbc_softc *sc) { uint32_t size; uint32_t regval; int error, i; for (i = 0; i < LBC_DEV_MAX; i++) { size = sc->sc_banks[i].size; if (size == 0) continue; /* * Compute and program BR value. */ regval = sc->sc_banks[i].addr; switch (sc->sc_banks[i].width) { case 8: regval |= (1 << 11); break; case 16: regval |= (2 << 11); break; case 32: regval |= (3 << 11); break; default: error = EINVAL; goto fail; } regval |= (sc->sc_banks[i].decc << 9); regval |= (sc->sc_banks[i].wp << 8); regval |= (sc->sc_banks[i].msel << 5); regval |= (sc->sc_banks[i].atom << 2); regval |= 1; bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_BR(i), regval); /* * Compute and program OR value. */ regval = lbc_address_mask(size); switch (sc->sc_banks[i].msel) { case LBCRES_MSEL_GPCM: /* TODO Add flag support for option registers */ regval |= 0x0ff7; break; case LBCRES_MSEL_FCM: /* TODO Add flag support for options register */ regval |= 0x0796; break; case LBCRES_MSEL_UPMA: case LBCRES_MSEL_UPMB: case LBCRES_MSEL_UPMC: printf("UPM mode not supported yet!"); error = ENOSYS; goto fail; } bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_OR(i), regval); } return (0); fail: lbc_banks_unmap(sc); return (error); } static void fdt_lbc_fixup(phandle_t node, struct lbc_softc *sc, struct lbc_devinfo *di) { pcell_t width; int bank; if (OF_getprop(node, "bank-width", (void *)&width, sizeof(width)) <= 0) return; bank = di->di_bank; if (sc->sc_banks[bank].size == 0) return; /* Express width in bits. */ sc->sc_banks[bank].width = width * 8; } static int fdt_lbc_reg_decode(phandle_t node, struct lbc_softc *sc, struct lbc_devinfo *di) { rman_res_t start, end, count; pcell_t *reg, *regptr; pcell_t addr_cells, size_cells; int tuple_size, tuples; int i, j, rv, bank; if (fdt_addrsize_cells(OF_parent(node), &addr_cells, &size_cells) != 0) return (ENXIO); tuple_size = sizeof(pcell_t) * (addr_cells + size_cells); tuples = OF_getencprop_alloc_multi(node, "reg", tuple_size, (void **)®); debugf("addr_cells = %d, size_cells = %d\n", addr_cells, size_cells); debugf("tuples = %d, tuple size = %d\n", tuples, tuple_size); if (tuples <= 0) /* No 'reg' property in this node. */ return (0); regptr = reg; for (i = 0; i < tuples; i++) { bank = fdt_data_get((void *)reg, 1); di->di_bank = bank; reg += 1; /* Get address/size. */ start = count = 0; for (j = 0; j < addr_cells - 1; j++) { start <<= 32; start |= reg[j]; } for (j = 0; j < size_cells; j++) { count <<= 32; count |= reg[addr_cells + j - 1]; } reg += addr_cells - 1 + size_cells; /* Calculate address range relative to VA base. */ start = sc->sc_banks[bank].kva + start; end = start + count - 1; debugf("reg addr bank = %d, start = %jx, end = %jx, " "count = %jx\n", bank, start, end, count); /* Use bank (CS) cell as rid. */ resource_list_add(&di->di_res, SYS_RES_MEMORY, bank, start, end, count); } rv = 0; OF_prop_free(regptr); return (rv); } static void lbc_intr(void *arg) { struct lbc_softc *sc = arg; uint32_t ltesr; ltesr = bus_space_read_4(sc->sc_bst, sc->sc_bsh, LBC85XX_LTESR); sc->sc_ltesr = ltesr; bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_LTESR, ltesr); wakeup(sc->sc_dev); } static int lbc_probe(device_t dev) { if (!(ofw_bus_is_compatible(dev, "fsl,lbc") || ofw_bus_is_compatible(dev, "fsl,elbc"))) return (ENXIO); device_set_desc(dev, "Freescale Local Bus Controller"); return (BUS_PROBE_DEFAULT); } static int lbc_attach(device_t dev) { struct lbc_softc *sc; struct lbc_devinfo *di; struct rman *rm; uintmax_t offset, size; vm_paddr_t start; device_t cdev; phandle_t node, child; pcell_t *ranges, *rangesptr; int tuple_size, tuples; int par_addr_cells; int bank, error, i, j; sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_mrid = 0; sc->sc_mres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_mrid, RF_ACTIVE); if (sc->sc_mres == NULL) return (ENXIO); sc->sc_bst = rman_get_bustag(sc->sc_mres); sc->sc_bsh = rman_get_bushandle(sc->sc_mres); for (bank = 0; bank < LBC_DEV_MAX; bank++) { bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_BR(bank), 0); bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_OR(bank), 0); } /* * Initialize configuration register: * - enable Local Bus * - set data buffer control signal function * - disable parity byte select * - set ECC parity type * - set bus monitor timing and timer prescale */ bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_LBCR, 0); /* * Initialize clock ratio register: * - disable PLL bypass mode * - configure LCLK delay cycles for the assertion of LALE * - set system clock divider */ bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_LCRR, 0x00030008); bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_LTEDR, 0); bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_LTESR, ~0); bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_LTEIR, 0x64080001); sc->sc_irid = 0; sc->sc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irid, RF_ACTIVE | RF_SHAREABLE); if (sc->sc_ires != NULL) { error = bus_setup_intr(dev, sc->sc_ires, INTR_TYPE_MISC | INTR_MPSAFE, NULL, lbc_intr, sc, &sc->sc_icookie); if (error) { device_printf(dev, "could not activate interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irid, sc->sc_ires); sc->sc_ires = NULL; } } sc->sc_ltesr = ~0; rangesptr = NULL; rm = &sc->sc_rman; rm->rm_type = RMAN_ARRAY; rm->rm_descr = "Local Bus Space"; error = rman_init(rm); if (error) goto fail; error = rman_manage_region(rm, rm->rm_start, rm->rm_end); if (error) { rman_fini(rm); goto fail; } /* * Process 'ranges' property. */ node = ofw_bus_get_node(dev); if ((fdt_addrsize_cells(node, &sc->sc_addr_cells, &sc->sc_size_cells)) != 0) { error = ENXIO; goto fail; } par_addr_cells = fdt_parent_addr_cells(node); if (par_addr_cells > 2) { device_printf(dev, "unsupported parent #addr-cells\n"); error = ERANGE; goto fail; } tuple_size = sizeof(pcell_t) * (sc->sc_addr_cells + par_addr_cells + sc->sc_size_cells); tuples = OF_getencprop_alloc_multi(node, "ranges", tuple_size, (void **)&ranges); if (tuples < 0) { device_printf(dev, "could not retrieve 'ranges' property\n"); error = ENXIO; goto fail; } rangesptr = ranges; debugf("par addr_cells = %d, addr_cells = %d, size_cells = %d, " "tuple_size = %d, tuples = %d\n", par_addr_cells, sc->sc_addr_cells, sc->sc_size_cells, tuple_size, tuples); start = 0; size = 0; for (i = 0; i < tuples; i++) { /* The first cell is the bank (chip select) number. */ bank = fdt_data_get(ranges, 1); if (bank < 0 || bank > LBC_DEV_MAX) { device_printf(dev, "bank out of range: %d\n", bank); error = ERANGE; goto fail; } ranges += 1; /* * Remaining cells of the child address define offset into * this CS. */ offset = 0; for (j = 0; j < sc->sc_addr_cells - 1; j++) { offset <<= sizeof(pcell_t) * 8; offset |= *ranges; ranges++; } /* Parent bus start address of this bank. */ start = 0; for (j = 0; j < par_addr_cells; j++) { start <<= sizeof(pcell_t) * 8; start |= *ranges; ranges++; } size = fdt_data_get((void *)ranges, sc->sc_size_cells); ranges += sc->sc_size_cells; debugf("bank = %d, start = %jx, size = %jx\n", bank, (uintmax_t)start, size); sc->sc_banks[bank].addr = start + offset; sc->sc_banks[bank].size = size; /* * Attributes for the bank. * * XXX Note there are no DT bindings defined for them at the * moment, so we need to provide some defaults. */ sc->sc_banks[bank].width = 16; sc->sc_banks[bank].msel = LBCRES_MSEL_GPCM; sc->sc_banks[bank].decc = LBCRES_DECC_DISABLED; sc->sc_banks[bank].atom = LBCRES_ATOM_DISABLED; sc->sc_banks[bank].wp = 0; } /* * Initialize mem-mappings for the LBC banks (i.e. chip selects). */ error = lbc_banks_map(sc); if (error) goto fail; /* * Walk the localbus and add direct subordinates as our children. */ for (child = OF_child(node); child != 0; child = OF_peer(child)) { di = malloc(sizeof(*di), M_LBC, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(&di->di_ofw, child) != 0) { free(di, M_LBC); device_printf(dev, "could not set up devinfo\n"); continue; } resource_list_init(&di->di_res); if (fdt_lbc_reg_decode(child, sc, di)) { device_printf(dev, "could not process 'reg' " "property\n"); ofw_bus_gen_destroy_devinfo(&di->di_ofw); free(di, M_LBC); continue; } fdt_lbc_fixup(child, sc, di); /* Add newbus device for this FDT node */ cdev = device_add_child(dev, NULL, -1); if (cdev == NULL) { device_printf(dev, "could not add child: %s\n", di->di_ofw.obd_name); resource_list_free(&di->di_res); ofw_bus_gen_destroy_devinfo(&di->di_ofw); free(di, M_LBC); continue; } debugf("added child name='%s', node=%x\n", di->di_ofw.obd_name, child); device_set_ivars(cdev, di); } /* * Enable the LBC. */ lbc_banks_enable(sc); OF_prop_free(rangesptr); return (bus_generic_attach(dev)); fail: OF_prop_free(rangesptr); bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mrid, sc->sc_mres); return (error); } static int lbc_shutdown(device_t dev) { /* TODO */ return(0); } static struct rman * lbc_get_rman(device_t bus, int type, u_int flags) { struct lbc_softc *sc; sc = device_get_softc(bus); switch (type) { case SYS_RES_MEMORY: return (&sc->sc_rman); default: return (NULL); } } static struct resource * lbc_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct lbc_devinfo *di; struct resource_list_entry *rle; /* We only support default allocations. */ if (!RMAN_IS_DEFAULT_RANGE(start, end)) return (NULL); if (type == SYS_RES_IRQ) return (bus_alloc_resource(bus, type, rid, start, end, count, flags)); /* * Request for the default allocation with a given rid: use resource * list stored in the local device info. */ if ((di = device_get_ivars(child)) == NULL) return (NULL); if (type == SYS_RES_IOPORT) type = SYS_RES_MEMORY; /* * XXX: We are supposed to return a value to the user, so this * doesn't seem right. */ rid = &di->di_bank; rle = resource_list_find(&di->di_res, type, *rid); if (rle == NULL) { device_printf(bus, "no default resources for " "rid = %d, type = %d\n", *rid, type); return (NULL); } start = rle->start; count = rle->count; end = start + count - 1; return (bus_generic_rman_alloc_resource(bus, child, type, rid, start, end, count, flags)); } static int lbc_print_child(device_t dev, device_t child) { struct lbc_devinfo *di; struct resource_list *rl; int rv; di = device_get_ivars(child); rl = &di->di_res; rv = 0; rv += bus_print_child_header(dev, child); rv += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); rv += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); rv += bus_print_child_footer(dev, child); return (rv); } static int -lbc_adjust_resource(device_t dev, device_t child, int type, struct resource *r, +lbc_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { - switch (type) { - case SYS_RES_IOPORT: - type = SYS_RES_MEMORY; - /* FALLTHROUGH */ + switch (rman_get_type(r)) { case SYS_RES_MEMORY: - return (bus_generic_rman_adjust_resource(dev, child, type, r, - start, end)); - case SYS_RES_IRQ: - return (bus_generic_adjust_resource(dev, child, type, r, start, + return (bus_generic_rman_adjust_resource(dev, child, r, start, end)); + case SYS_RES_IRQ: + return (bus_generic_adjust_resource(dev, child, r, start, end)); default: return (EINVAL); } } static int lbc_release_resource(device_t dev, device_t child, int type, int rid, struct resource *res) { switch (type) { case SYS_RES_IOPORT: type = SYS_RES_MEMORY; /* FALLTHROUGH */ case SYS_RES_MEMORY: return (bus_generic_rman_release_resource(dev, child, type, rid, res)); case SYS_RES_IRQ: return (bus_generic_release_resource(dev, child, type, rid, res)); default: return (EINVAL); } } static int lbc_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { switch (type) { case SYS_RES_IOPORT: type = SYS_RES_MEMORY; /* FALLTHROUGH */ case SYS_RES_MEMORY: return (bus_generic_rman_activate_resource(bus, child, type, rid, r)); case SYS_RES_IRQ: return (bus_generic_activate_resource(bus, child, type, rid, r)); default: return (EINVAL); } } static int lbc_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { switch (type) { case SYS_RES_IOPORT: type = SYS_RES_MEMORY; /* FALLTHROUGH */ case SYS_RES_MEMORY: return (bus_generic_rman_deactivate_resource(bus, child, type, rid, r)); case SYS_RES_IRQ: return (bus_generic_deactivate_resource(bus, child, type, rid, r)); default: return (EINVAL); } } static int lbc_map_resource(device_t bus, device_t child, int type, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); /* Mappings are only supported on I/O and memory resources. */ switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); map->r_bustag = &bs_be_tag; map->r_bushandle = start; map->r_size = length; map->r_vaddr = NULL; return (0); } static int lbc_unmap_resource(device_t bus, device_t child, int type, struct resource *r, struct resource_map *map) { /* Mappings are only supported on I/O and memory resources. */ switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (EINVAL); } return (0); } static const struct ofw_bus_devinfo * lbc_get_devinfo(device_t bus, device_t child) { struct lbc_devinfo *di; di = device_get_ivars(child); return (&di->di_ofw); } void lbc_write_reg(device_t child, u_int off, uint32_t val) { device_t dev; struct lbc_softc *sc; dev = device_get_parent(child); if (off >= 0x1000) { device_printf(dev, "%s(%s): invalid offset %#x\n", __func__, device_get_nameunit(child), off); return; } sc = device_get_softc(dev); if (off == LBC85XX_LTESR && sc->sc_ltesr != ~0u) { sc->sc_ltesr ^= (val & sc->sc_ltesr); return; } if (off == LBC85XX_LTEATR && (val & 1) == 0) sc->sc_ltesr = ~0u; bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, val); } uint32_t lbc_read_reg(device_t child, u_int off) { device_t dev; struct lbc_softc *sc; uint32_t val; dev = device_get_parent(child); if (off >= 0x1000) { device_printf(dev, "%s(%s): invalid offset %#x\n", __func__, device_get_nameunit(child), off); return (~0U); } sc = device_get_softc(dev); if (off == LBC85XX_LTESR && sc->sc_ltesr != ~0U) val = sc->sc_ltesr; else val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, off); return (val); } diff --git a/sys/powerpc/powermac/macio.c b/sys/powerpc/powermac/macio.c index f6ad815064e8..b443f277ec89 100644 --- a/sys/powerpc/powermac/macio.c +++ b/sys/powerpc/powermac/macio.c @@ -1,801 +1,800 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright 2002 by Peter Grehan. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Driver for KeyLargo/Pangea, the MacPPC south bridge ASIC. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Macio softc */ struct macio_softc { phandle_t sc_node; vm_offset_t sc_base; vm_offset_t sc_size; struct rman sc_mem_rman; /* FCR registers */ int sc_memrid; struct resource *sc_memr; /* GPIO offsets */ int sc_timebase; }; static MALLOC_DEFINE(M_MACIO, "macio", "macio device information"); static int macio_probe(device_t); static int macio_attach(device_t); static int macio_print_child(device_t dev, device_t child); static void macio_probe_nomatch(device_t, device_t); static struct rman *macio_get_rman(device_t, int, u_int); static struct resource *macio_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); -static int macio_adjust_resource(device_t, device_t, int, struct resource *, +static int macio_adjust_resource(device_t, device_t, struct resource *, rman_res_t, rman_res_t); static int macio_activate_resource(device_t, device_t, int, int, struct resource *); static int macio_deactivate_resource(device_t, device_t, int, int, struct resource *); static int macio_release_resource(device_t, device_t, int, int, struct resource *); static int macio_map_resource(device_t, device_t, int, struct resource *, struct resource_map_request *, struct resource_map *); static int macio_unmap_resource(device_t, device_t, int, struct resource *, struct resource_map *); static struct resource_list *macio_get_resource_list (device_t, device_t); static ofw_bus_get_devinfo_t macio_get_devinfo; #if !defined(__powerpc64__) && defined(SMP) static void macio_freeze_timebase(device_t, bool); #endif /* * Bus interface definition */ static device_method_t macio_methods[] = { /* Device interface */ DEVMETHOD(device_probe, macio_probe), DEVMETHOD(device_attach, macio_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_print_child, macio_print_child), DEVMETHOD(bus_probe_nomatch, macio_probe_nomatch), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_rman, macio_get_rman), DEVMETHOD(bus_alloc_resource, macio_alloc_resource), DEVMETHOD(bus_adjust_resource, macio_adjust_resource), DEVMETHOD(bus_release_resource, macio_release_resource), DEVMETHOD(bus_activate_resource, macio_activate_resource), DEVMETHOD(bus_deactivate_resource, macio_deactivate_resource), DEVMETHOD(bus_map_resource, macio_map_resource), DEVMETHOD(bus_unmap_resource, macio_unmap_resource), DEVMETHOD(bus_get_resource_list, macio_get_resource_list), DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, macio_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), { 0, 0 } }; static driver_t macio_pci_driver = { "macio", macio_methods, sizeof(struct macio_softc) }; EARLY_DRIVER_MODULE(macio, pci, macio_pci_driver, 0, 0, BUS_PASS_BUS); /* * PCI ID search table */ static struct macio_pci_dev { u_int32_t mpd_devid; char *mpd_desc; } macio_pci_devlist[] = { { 0x0017106b, "Paddington I/O Controller" }, { 0x0022106b, "KeyLargo I/O Controller" }, { 0x0025106b, "Pangea I/O Controller" }, { 0x003e106b, "Intrepid I/O Controller" }, { 0x0041106b, "K2 KeyLargo I/O Controller" }, { 0x004f106b, "Shasta I/O Controller" }, { 0, NULL } }; /* * Devices to exclude from the probe * XXX some of these may be required in the future... */ #define MACIO_QUIRK_IGNORE 0x00000001 #define MACIO_QUIRK_CHILD_HAS_INTR 0x00000002 #define MACIO_QUIRK_USE_CHILD_REG 0x00000004 struct macio_quirk_entry { const char *mq_name; int mq_quirks; }; static struct macio_quirk_entry macio_quirks[] = { { "escc-legacy", MACIO_QUIRK_IGNORE }, { "timer", MACIO_QUIRK_IGNORE }, { "escc", MACIO_QUIRK_CHILD_HAS_INTR }, { "i2s", MACIO_QUIRK_CHILD_HAS_INTR | MACIO_QUIRK_USE_CHILD_REG }, { NULL, 0 } }; static int macio_get_quirks(const char *name) { struct macio_quirk_entry *mqe; for (mqe = macio_quirks; mqe->mq_name != NULL; mqe++) if (strcmp(name, mqe->mq_name) == 0) return (mqe->mq_quirks); return (0); } /* * Add an interrupt to the dev's resource list if present */ static void macio_add_intr(phandle_t devnode, struct macio_devinfo *dinfo) { phandle_t iparent; int *intr; int i, nintr; int icells; if (dinfo->mdi_ninterrupts >= 6) { printf("macio: device has more than 6 interrupts\n"); return; } nintr = OF_getprop_alloc_multi(devnode, "interrupts", sizeof(*intr), (void **)&intr); if (nintr == -1) { nintr = OF_getprop_alloc_multi(devnode, "AAPL,interrupts", sizeof(*intr), (void **)&intr); if (nintr == -1) return; } if (intr[0] == -1) return; if (OF_getprop(devnode, "interrupt-parent", &iparent, sizeof(iparent)) <= 0) panic("Interrupt but no interrupt parent!\n"); if (OF_getprop(OF_node_from_xref(iparent), "#interrupt-cells", &icells, sizeof(icells)) <= 0) icells = 1; for (i = 0; i < nintr; i+=icells) { u_int irq = MAP_IRQ(iparent, intr[i]); resource_list_add(&dinfo->mdi_resources, SYS_RES_IRQ, dinfo->mdi_ninterrupts, irq, irq, 1); dinfo->mdi_interrupts[dinfo->mdi_ninterrupts] = irq; dinfo->mdi_ninterrupts++; } } static void macio_add_reg(phandle_t devnode, struct macio_devinfo *dinfo) { struct macio_reg *reg, *regp; phandle_t child; char buf[8]; int i, layout_id = 0, nreg, res; nreg = OF_getprop_alloc_multi(devnode, "reg", sizeof(*reg), (void **)®); if (nreg == -1) return; /* * Some G5's have broken properties in the i2s-a area. If so we try * to fix it. Right now we know of two different cases, one for * sound layout-id 36 and the other one for sound layout-id 76. * What is missing is the base address for the memory addresses. * We take them from the parent node (i2s) and use the size * information from the child. */ if (reg[0].mr_base == 0) { child = OF_child(devnode); while (child != 0) { res = OF_getprop(child, "name", buf, sizeof(buf)); if (res > 0 && strcmp(buf, "sound") == 0) break; child = OF_peer(child); } res = OF_getprop(child, "layout-id", &layout_id, sizeof(layout_id)); if (res > 0 && (layout_id == 36 || layout_id == 76)) { res = OF_getprop_alloc_multi(OF_parent(devnode), "reg", sizeof(*regp), (void **)®p); reg[0] = regp[0]; reg[1].mr_base = regp[1].mr_base; reg[2].mr_base = regp[1].mr_base + reg[1].mr_size; } } for (i = 0; i < nreg; i++) { resource_list_add(&dinfo->mdi_resources, SYS_RES_MEMORY, i, reg[i].mr_base, reg[i].mr_base + reg[i].mr_size, reg[i].mr_size); } } /* * PCI probe */ static int macio_probe(device_t dev) { int i; u_int32_t devid; devid = pci_get_devid(dev); for (i = 0; macio_pci_devlist[i].mpd_desc != NULL; i++) { if (devid == macio_pci_devlist[i].mpd_devid) { device_set_desc(dev, macio_pci_devlist[i].mpd_desc); return (0); } } return (ENXIO); } /* * PCI attach: scan Open Firmware child nodes, and attach these as children * of the macio bus */ static int macio_attach(device_t dev) { struct macio_softc *sc; struct macio_devinfo *dinfo; phandle_t root; phandle_t child; phandle_t subchild; device_t cdev; u_int reg[3]; char compat[32]; int error, quirks; sc = device_get_softc(dev); root = sc->sc_node = ofw_bus_get_node(dev); /* * Locate the device node and it's base address */ if (OF_getprop(root, "assigned-addresses", reg, sizeof(reg)) < (ssize_t)sizeof(reg)) { return (ENXIO); } /* Used later to see if we have to enable the I2S part. */ OF_getprop(root, "compatible", compat, sizeof(compat)); sc->sc_base = reg[2]; sc->sc_size = MACIO_REG_SIZE; sc->sc_memrid = PCIR_BAR(0); sc->sc_memr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_memrid, RF_ACTIVE); sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "MacIO Device Memory"; error = rman_init(&sc->sc_mem_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); return (error); } error = rman_manage_region(&sc->sc_mem_rman, 0, sc->sc_size); if (error) { device_printf(dev, "rman_manage_region() failed. error = %d\n", error); return (error); } /* * Iterate through the sub-devices */ for (child = OF_child(root); child != 0; child = OF_peer(child)) { dinfo = malloc(sizeof(*dinfo), M_MACIO, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(&dinfo->mdi_obdinfo, child) != 0) { free(dinfo, M_MACIO); continue; } quirks = macio_get_quirks(dinfo->mdi_obdinfo.obd_name); if ((quirks & MACIO_QUIRK_IGNORE) != 0) { ofw_bus_gen_destroy_devinfo(&dinfo->mdi_obdinfo); free(dinfo, M_MACIO); continue; } resource_list_init(&dinfo->mdi_resources); dinfo->mdi_ninterrupts = 0; macio_add_intr(child, dinfo); if ((quirks & MACIO_QUIRK_USE_CHILD_REG) != 0) macio_add_reg(OF_child(child), dinfo); else macio_add_reg(child, dinfo); if ((quirks & MACIO_QUIRK_CHILD_HAS_INTR) != 0) for (subchild = OF_child(child); subchild != 0; subchild = OF_peer(subchild)) macio_add_intr(subchild, dinfo); cdev = device_add_child(dev, NULL, -1); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", dinfo->mdi_obdinfo.obd_name); resource_list_free(&dinfo->mdi_resources); ofw_bus_gen_destroy_devinfo(&dinfo->mdi_obdinfo); free(dinfo, M_MACIO); continue; } device_set_ivars(cdev, dinfo); /* Set FCRs to enable some devices */ if (sc->sc_memr == NULL) continue; if (strcmp(ofw_bus_get_name(cdev), "bmac") == 0 || (ofw_bus_get_compat(cdev) != NULL && strcmp(ofw_bus_get_compat(cdev), "bmac+") == 0)) { uint32_t fcr; fcr = bus_read_4(sc->sc_memr, HEATHROW_FCR); fcr |= FCR_ENET_ENABLE & ~FCR_ENET_RESET; bus_write_4(sc->sc_memr, HEATHROW_FCR, fcr); DELAY(50000); fcr |= FCR_ENET_RESET; bus_write_4(sc->sc_memr, HEATHROW_FCR, fcr); DELAY(50000); fcr &= ~FCR_ENET_RESET; bus_write_4(sc->sc_memr, HEATHROW_FCR, fcr); DELAY(50000); bus_write_4(sc->sc_memr, HEATHROW_FCR, fcr); } /* * Make sure the I2S0 and the I2S0_CLK are enabled. * On certain G5's they are not. */ if ((strcmp(ofw_bus_get_name(cdev), "i2s") == 0) && (strcmp(compat, "K2-Keylargo") == 0)) { uint32_t fcr1; fcr1 = bus_read_4(sc->sc_memr, KEYLARGO_FCR1); fcr1 |= FCR1_I2S0_CLK_ENABLE | FCR1_I2S0_ENABLE; bus_write_4(sc->sc_memr, KEYLARGO_FCR1, fcr1); } } #if !defined(__powerpc64__) && defined(SMP) /* * Detect an SMP G4 machine. * * On SMP G4, timebase freeze is via a GPIO on macio. * * When we are on an SMP G4, we need to install a handler to * perform timebase freeze/unfreeze on behalf of the platform. */ if ((child = OF_finddevice("/cpus/PowerPC,G4@0")) != -1 && OF_peer(child) != -1) { if (OF_getprop(child, "timebase-enable", &sc->sc_timebase, sizeof(sc->sc_timebase)) <= 0) sc->sc_timebase = KEYLARGO_GPIO_BASE + 0x09; powermac_register_timebase(dev, macio_freeze_timebase); device_printf(dev, "GPIO timebase control at 0x%x\n", sc->sc_timebase); } #endif return (bus_generic_attach(dev)); } static int macio_print_child(device_t dev, device_t child) { struct macio_devinfo *dinfo; struct resource_list *rl; int retval = 0; dinfo = device_get_ivars(child); rl = &dinfo->mdi_resources; retval += bus_print_child_header(dev, child); retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); retval += bus_print_child_footer(dev, child); return (retval); } static void macio_probe_nomatch(device_t dev, device_t child) { struct macio_devinfo *dinfo; struct resource_list *rl; const char *type; if (bootverbose) { dinfo = device_get_ivars(child); rl = &dinfo->mdi_resources; if ((type = ofw_bus_get_type(child)) == NULL) type = "(unknown)"; device_printf(dev, "<%s, %s>", type, ofw_bus_get_name(child)); resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); printf(" (no driver attached)\n"); } } static struct rman * macio_get_rman(device_t bus, int type, u_int flags) { struct macio_softc *sc; sc = device_get_softc(bus); switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (&sc->sc_mem_rman); default: return (NULL); } } static struct resource * macio_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { rman_res_t adjstart, adjend, adjcount; struct macio_devinfo *dinfo; struct resource_list_entry *rle; dinfo = device_get_ivars(child); switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: rle = resource_list_find(&dinfo->mdi_resources, SYS_RES_MEMORY, *rid); if (rle == NULL) { device_printf(bus, "no rle for %s memory %d\n", device_get_nameunit(child), *rid); return (NULL); } if (start < rle->start) adjstart = rle->start; else if (start > rle->end) adjstart = rle->end; else adjstart = start; if (end < rle->start) adjend = rle->start; else if (end > rle->end) adjend = rle->end; else adjend = end; adjcount = adjend - adjstart; return (bus_generic_rman_alloc_resource(bus, child, type, rid, adjstart, adjend, adjcount, flags)); case SYS_RES_IRQ: /* Check for passthrough from subattachments like macgpio */ if (device_get_parent(child) != bus) return BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags); rle = resource_list_find(&dinfo->mdi_resources, SYS_RES_IRQ, *rid); if (rle == NULL) { if (dinfo->mdi_ninterrupts >= 6) { device_printf(bus, "%s has more than 6 interrupts\n", device_get_nameunit(child)); return (NULL); } resource_list_add(&dinfo->mdi_resources, SYS_RES_IRQ, dinfo->mdi_ninterrupts, start, start, 1); dinfo->mdi_interrupts[dinfo->mdi_ninterrupts] = start; dinfo->mdi_ninterrupts++; } return (resource_list_alloc(&dinfo->mdi_resources, bus, child, type, rid, start, end, count, flags)); default: device_printf(bus, "unknown resource request from %s\n", device_get_nameunit(child)); return (NULL); } } static int -macio_adjust_resource(device_t bus, device_t child, int type, - struct resource *r, rman_res_t start, rman_res_t end) +macio_adjust_resource(device_t bus, device_t child, struct resource *r, + rman_res_t start, rman_res_t end) { - switch (type) { + switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: - return (bus_generic_rman_adjust_resource(bus, child, type, r, - start, end)); - case SYS_RES_IRQ: - return (bus_generic_adjust_resource(bus, child, type, r, start, + return (bus_generic_rman_adjust_resource(bus, child, r, start, end)); + case SYS_RES_IRQ: + return (bus_generic_adjust_resource(bus, child, r, start, end)); default: return (EINVAL); } } static int macio_release_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_release_resource(bus, child, type, rid, res)); case SYS_RES_IRQ: return (bus_generic_rl_release_resource(bus, child, type, rid, res)); default: return (EINVAL); } } static int macio_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_activate_resource(bus, child, type, rid, res)); case SYS_RES_IRQ: return (bus_generic_activate_resource(bus, child, type, rid, res)); default: return (EINVAL); } } static int macio_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_deactivate_resource(bus, child, type, rid, res)); case SYS_RES_IRQ: return (bus_generic_deactivate_resource(bus, child, type, rid, res)); default: return (EINVAL); } } static int macio_map_resource(device_t bus, device_t child, int type, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; struct macio_softc *sc; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); /* Mappings are only supported on I/O and memory resources. */ switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); if (bootverbose) printf("nexus mapdev: start %jx, len %jd\n", (uintmax_t)start, (uintmax_t)length); sc = device_get_softc(bus); map->r_vaddr = pmap_mapdev_attr((vm_paddr_t)start + sc->sc_base, length, args.memattr); if (map->r_vaddr == NULL) return (ENOMEM); map->r_bustag = &bs_le_tag; map->r_bushandle = (bus_space_handle_t)map->r_vaddr; return (0); } static int macio_unmap_resource(device_t bus, device_t child, int type, struct resource *r, struct resource_map *map) { /* * If this is a memory resource, unmap it. */ switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: pmap_unmapdev(map->r_vaddr, map->r_size); break; default: return (EINVAL); } return (0); } static struct resource_list * macio_get_resource_list (device_t dev, device_t child) { struct macio_devinfo *dinfo; dinfo = device_get_ivars(child); return (&dinfo->mdi_resources); } static const struct ofw_bus_devinfo * macio_get_devinfo(device_t dev, device_t child) { struct macio_devinfo *dinfo; dinfo = device_get_ivars(child); return (&dinfo->mdi_obdinfo); } int macio_enable_wireless(device_t dev, bool enable) { struct macio_softc *sc = device_get_softc(dev); uint32_t x; if (enable) { x = bus_read_4(sc->sc_memr, KEYLARGO_FCR2); x |= 0x4; bus_write_4(sc->sc_memr, KEYLARGO_FCR2, x); /* Enable card slot. */ bus_write_1(sc->sc_memr, KEYLARGO_GPIO_BASE + 0x0f, 5); DELAY(1000); bus_write_1(sc->sc_memr, KEYLARGO_GPIO_BASE + 0x0f, 4); DELAY(1000); x = bus_read_4(sc->sc_memr, KEYLARGO_FCR2); x &= ~0x80000000; bus_write_4(sc->sc_memr, KEYLARGO_FCR2, x); /* out8(gpio + 0x10, 4); */ bus_write_1(sc->sc_memr, KEYLARGO_EXTINT_GPIO_REG_BASE + 0x0b, 0); bus_write_1(sc->sc_memr, KEYLARGO_EXTINT_GPIO_REG_BASE + 0x0a, 0x28); bus_write_1(sc->sc_memr, KEYLARGO_EXTINT_GPIO_REG_BASE + 0x0d, 0x28); bus_write_1(sc->sc_memr, KEYLARGO_GPIO_BASE + 0x0d, 0x28); bus_write_1(sc->sc_memr, KEYLARGO_GPIO_BASE + 0x0e, 0x28); bus_write_4(sc->sc_memr, 0x1c000, 0); /* Initialize the card. */ bus_write_4(sc->sc_memr, 0x1a3e0, 0x41); x = bus_read_4(sc->sc_memr, KEYLARGO_FCR2); x |= 0x80000000; bus_write_4(sc->sc_memr, KEYLARGO_FCR2, x); } else { x = bus_read_4(sc->sc_memr, KEYLARGO_FCR2); x &= ~0x4; bus_write_4(sc->sc_memr, KEYLARGO_FCR2, x); /* out8(gpio + 0x10, 0); */ } return (0); } #if !defined(__powerpc64__) && defined(SMP) static void macio_freeze_timebase(device_t dev, bool freeze) { struct macio_softc *sc = device_get_softc(dev); if (freeze) { bus_write_1(sc->sc_memr, sc->sc_timebase, 4); } else { bus_write_1(sc->sc_memr, sc->sc_timebase, 0); } bus_read_1(sc->sc_memr, sc->sc_timebase); } #endif diff --git a/sys/powerpc/powermac/uninorth.c b/sys/powerpc/powermac/uninorth.c index b64bbc8ade62..b9cb4814b986 100644 --- a/sys/powerpc/powermac/uninorth.c +++ b/sys/powerpc/powermac/uninorth.c @@ -1,729 +1,728 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (C) 2002 Benno Rice. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Driver for the Uninorth chip itself. */ static MALLOC_DEFINE(M_UNIN, "unin", "unin device information"); /* * Device interface. */ static int unin_chip_probe(device_t); static int unin_chip_attach(device_t); /* * Bus interface. */ static int unin_chip_print_child(device_t dev, device_t child); static void unin_chip_probe_nomatch(device_t, device_t); static struct rman *unin_chip_get_rman(device_t, int, u_int); static struct resource *unin_chip_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); -static int unin_chip_adjust_resource(device_t, device_t, int, +static int unin_chip_adjust_resource(device_t, device_t, struct resource *, rman_res_t, rman_res_t); static int unin_chip_activate_resource(device_t, device_t, int, int, struct resource *); static int unin_chip_deactivate_resource(device_t, device_t, int, int, struct resource *); static int unin_chip_map_resource(device_t, device_t, int, struct resource *, struct resource_map_request *, struct resource_map *); static int unin_chip_unmap_resource(device_t, device_t, int, struct resource *, struct resource_map *); static int unin_chip_release_resource(device_t, device_t, int, int, struct resource *); static struct resource_list *unin_chip_get_resource_list (device_t, device_t); /* * OFW Bus interface */ static ofw_bus_get_devinfo_t unin_chip_get_devinfo; /* * Local routines */ static void unin_enable_gmac(device_t dev); static void unin_enable_mpic(device_t dev); /* * Driver methods. */ static device_method_t unin_chip_methods[] = { /* Device interface */ DEVMETHOD(device_probe, unin_chip_probe), DEVMETHOD(device_attach, unin_chip_attach), /* Bus interface */ DEVMETHOD(bus_print_child, unin_chip_print_child), DEVMETHOD(bus_probe_nomatch, unin_chip_probe_nomatch), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_rman, unin_chip_get_rman), DEVMETHOD(bus_alloc_resource, unin_chip_alloc_resource), DEVMETHOD(bus_adjust_resource, unin_chip_adjust_resource), DEVMETHOD(bus_release_resource, unin_chip_release_resource), DEVMETHOD(bus_activate_resource, unin_chip_activate_resource), DEVMETHOD(bus_deactivate_resource, unin_chip_deactivate_resource), DEVMETHOD(bus_map_resource, unin_chip_map_resource), DEVMETHOD(bus_unmap_resource, unin_chip_unmap_resource), DEVMETHOD(bus_get_resource_list, unin_chip_get_resource_list), DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, unin_chip_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), { 0, 0 } }; static driver_t unin_chip_driver = { "unin", unin_chip_methods, sizeof(struct unin_chip_softc) }; /* * Assume there is only one unin chip in a PowerMac, so that pmu.c functions can * suspend the chip after the whole rest of the device tree is suspended, not * earlier. */ static device_t unin_chip; EARLY_DRIVER_MODULE(unin, ofwbus, unin_chip_driver, 0, 0, BUS_PASS_BUS); /* * Add an interrupt to the dev's resource list if present */ static void unin_chip_add_intr(phandle_t devnode, struct unin_chip_devinfo *dinfo) { phandle_t iparent; int *intr; int i, nintr; int icells; if (dinfo->udi_ninterrupts >= 6) { printf("unin: device has more than 6 interrupts\n"); return; } nintr = OF_getprop_alloc_multi(devnode, "interrupts", sizeof(*intr), (void **)&intr); if (nintr == -1) { nintr = OF_getprop_alloc_multi(devnode, "AAPL,interrupts", sizeof(*intr), (void **)&intr); if (nintr == -1) return; } if (intr[0] == -1) return; if (OF_getprop(devnode, "interrupt-parent", &iparent, sizeof(iparent)) <= 0) panic("Interrupt but no interrupt parent!\n"); if (OF_searchprop(iparent, "#interrupt-cells", &icells, sizeof(icells)) <= 0) icells = 1; for (i = 0; i < nintr; i+=icells) { u_int irq = MAP_IRQ(iparent, intr[i]); resource_list_add(&dinfo->udi_resources, SYS_RES_IRQ, dinfo->udi_ninterrupts, irq, irq, 1); if (icells > 1) { powerpc_config_intr(irq, (intr[i+1] & 1) ? INTR_TRIGGER_LEVEL : INTR_TRIGGER_EDGE, INTR_POLARITY_LOW); } dinfo->udi_interrupts[dinfo->udi_ninterrupts] = irq; dinfo->udi_ninterrupts++; } } static void unin_chip_add_reg(phandle_t devnode, struct unin_chip_devinfo *dinfo) { struct unin_chip_reg *reg; int i, nreg; nreg = OF_getprop_alloc_multi(devnode, "reg", sizeof(*reg), (void **)®); if (nreg == -1) return; for (i = 0; i < nreg; i++) { resource_list_add(&dinfo->udi_resources, SYS_RES_MEMORY, i, reg[i].mr_base, reg[i].mr_base + reg[i].mr_size, reg[i].mr_size); } } static void unin_update_reg(device_t dev, uint32_t regoff, uint32_t set, uint32_t clr) { volatile u_int *reg; struct unin_chip_softc *sc; u_int32_t tmpl; sc = device_get_softc(dev); reg = (void *)(sc->sc_addr + regoff); tmpl = inl(reg); tmpl &= ~clr; tmpl |= set; outl(reg, tmpl); } static void unin_enable_gmac(device_t dev) { unin_update_reg(dev, UNIN_CLOCKCNTL, UNIN_CLOCKCNTL_GMAC, 0); } static void unin_enable_mpic(device_t dev) { unin_update_reg(dev, UNIN_TOGGLE_REG, UNIN_MPIC_RESET | UNIN_MPIC_OUTPUT_ENABLE, 0); } static int unin_chip_probe(device_t dev) { const char *name; name = ofw_bus_get_name(dev); if (name == NULL) return (ENXIO); if (strcmp(name, "uni-n") != 0 && strcmp(name, "u3") != 0 && strcmp(name, "u4") != 0) return (ENXIO); device_set_desc(dev, "Apple UniNorth System Controller"); return (0); } static int unin_chip_attach(device_t dev) { struct unin_chip_softc *sc; struct unin_chip_devinfo *dinfo; phandle_t root; phandle_t child; phandle_t iparent; device_t cdev; cell_t acells, scells; char compat[32]; char name[32]; u_int irq, reg[3]; int error, i = 0; sc = device_get_softc(dev); root = ofw_bus_get_node(dev); if (OF_getprop(root, "reg", reg, sizeof(reg)) < 8) return (ENXIO); acells = scells = 1; OF_getprop(OF_parent(root), "#address-cells", &acells, sizeof(acells)); OF_getprop(OF_parent(root), "#size-cells", &scells, sizeof(scells)); i = 0; sc->sc_physaddr = reg[i++]; if (acells == 2) { sc->sc_physaddr <<= 32; sc->sc_physaddr |= reg[i++]; } sc->sc_size = reg[i++]; if (scells == 2) { sc->sc_size <<= 32; sc->sc_size |= reg[i++]; } sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "UniNorth Device Memory"; error = rman_init(&sc->sc_mem_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); return (error); } error = rman_manage_region(&sc->sc_mem_rman, sc->sc_physaddr, sc->sc_physaddr + sc->sc_size - 1); if (error) { device_printf(dev, "rman_manage_region() failed. error = %d\n", error); return (error); } if (unin_chip == NULL) unin_chip = dev; /* * Iterate through the sub-devices */ for (child = OF_child(root); child != 0; child = OF_peer(child)) { dinfo = malloc(sizeof(*dinfo), M_UNIN, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(&dinfo->udi_obdinfo, child) != 0) { free(dinfo, M_UNIN); continue; } resource_list_init(&dinfo->udi_resources); dinfo->udi_ninterrupts = 0; unin_chip_add_intr(child, dinfo); /* * Some Apple machines do have a bug in OF, they miss * the interrupt entries on the U3 I2C node. That means they * do not have an entry with number of interrupts nor the * entry of the interrupt parent handle. * We define an interrupt and hardwire it to the /u3/mpic * handle. */ if (OF_getprop(child, "name", name, sizeof(name)) <= 0) device_printf(dev, "device has no name!\n"); if (dinfo->udi_ninterrupts == 0 && (strcmp(name, "i2c-bus") == 0 || strcmp(name, "i2c") == 0)) { if (OF_getprop(child, "interrupt-parent", &iparent, sizeof(iparent)) <= 0) { iparent = OF_finddevice("/u3/mpic"); device_printf(dev, "Set /u3/mpic as iparent!\n"); } /* Add an interrupt number 0 to the parent. */ irq = MAP_IRQ(iparent, 0); resource_list_add(&dinfo->udi_resources, SYS_RES_IRQ, dinfo->udi_ninterrupts, irq, irq, 1); dinfo->udi_interrupts[dinfo->udi_ninterrupts] = irq; dinfo->udi_ninterrupts++; } unin_chip_add_reg(child, dinfo); cdev = device_add_child(dev, NULL, -1); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", dinfo->udi_obdinfo.obd_name); resource_list_free(&dinfo->udi_resources); ofw_bus_gen_destroy_devinfo(&dinfo->udi_obdinfo); free(dinfo, M_UNIN); continue; } device_set_ivars(cdev, dinfo); } /* * Only map the first page, since that is where the registers * of interest lie. */ sc->sc_addr = (vm_offset_t)pmap_mapdev(sc->sc_physaddr, PAGE_SIZE); sc->sc_version = *(u_int *)sc->sc_addr; device_printf(dev, "Version %d\n", sc->sc_version); /* * Enable the GMAC Ethernet cell and the integrated OpenPIC * if Open Firmware says they are used. */ for (child = OF_child(root); child; child = OF_peer(child)) { memset(compat, 0, sizeof(compat)); OF_getprop(child, "compatible", compat, sizeof(compat)); if (strcmp(compat, "gmac") == 0) unin_enable_gmac(dev); if (strcmp(compat, "chrp,open-pic") == 0) unin_enable_mpic(dev); } /* * GMAC lives under the PCI bus, so just check if enet is gmac. */ child = OF_finddevice("enet"); memset(compat, 0, sizeof(compat)); OF_getprop(child, "compatible", compat, sizeof(compat)); if (strcmp(compat, "gmac") == 0) unin_enable_gmac(dev); return (bus_generic_attach(dev)); } static int unin_chip_print_child(device_t dev, device_t child) { struct unin_chip_devinfo *dinfo; struct resource_list *rl; int retval = 0; dinfo = device_get_ivars(child); rl = &dinfo->udi_resources; retval += bus_print_child_header(dev, child); retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); retval += bus_print_child_footer(dev, child); return (retval); } static void unin_chip_probe_nomatch(device_t dev, device_t child) { struct unin_chip_devinfo *dinfo; struct resource_list *rl; const char *type; if (bootverbose) { dinfo = device_get_ivars(child); rl = &dinfo->udi_resources; if ((type = ofw_bus_get_type(child)) == NULL) type = "(unknown)"; device_printf(dev, "<%s, %s>", type, ofw_bus_get_name(child)); resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); printf(" (no driver attached)\n"); } } static struct rman * unin_chip_get_rman(device_t bus, int type, u_int flags) { struct unin_chip_softc *sc; sc = device_get_softc(bus); switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (&sc->sc_mem_rman); default: return (NULL); } } static struct resource * unin_chip_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { rman_res_t adjstart, adjend, adjcount; struct unin_chip_devinfo *dinfo; struct resource_list_entry *rle; dinfo = device_get_ivars(child); switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: rle = resource_list_find(&dinfo->udi_resources, SYS_RES_MEMORY, *rid); if (rle == NULL) { device_printf(bus, "no rle for %s memory %d\n", device_get_nameunit(child), *rid); return (NULL); } rle->end = rle->end - 1; /* Hack? */ if (start < rle->start) adjstart = rle->start; else if (start > rle->end) adjstart = rle->end; else adjstart = start; if (end < rle->start) adjend = rle->start; else if (end > rle->end) adjend = rle->end; else adjend = end; adjcount = adjend - adjstart; return (bus_generic_rman_alloc_resource(bus, child, SYS_RES_MEMORY, rid, adjstart, adjend, adjcount, flags)); case SYS_RES_IRQ: /* Check for passthrough from subattachments. */ if (device_get_parent(child) != bus) return BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags); rle = resource_list_find(&dinfo->udi_resources, SYS_RES_IRQ, *rid); if (rle == NULL) { if (dinfo->udi_ninterrupts >= 6) { device_printf(bus, "%s has more than 6 interrupts\n", device_get_nameunit(child)); return (NULL); } resource_list_add(&dinfo->udi_resources, SYS_RES_IRQ, dinfo->udi_ninterrupts, start, start, 1); dinfo->udi_interrupts[dinfo->udi_ninterrupts] = start; dinfo->udi_ninterrupts++; } return (resource_list_alloc(&dinfo->udi_resources, bus, child, type, rid, start, end, count, flags)); default: device_printf(bus, "unknown resource request from %s\n", device_get_nameunit(child)); return (NULL); } } static int -unin_chip_adjust_resource(device_t bus, device_t child, int type, - struct resource *r, rman_res_t start, rman_res_t end) +unin_chip_adjust_resource(device_t bus, device_t child, struct resource *r, + rman_res_t start, rman_res_t end) { - switch (type) { + switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: - return (bus_generic_rman_adjust_resource(bus, child, type, r, - start, end)); - case SYS_RES_IRQ: - return (bus_generic_adjust_resource(bus, child, type, r, start, + return (bus_generic_rman_adjust_resource(bus, child, r, start, end)); + case SYS_RES_IRQ: + return (bus_generic_adjust_resource(bus, child, r, start, end)); default: return (EINVAL); } } static int unin_chip_release_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_release_resource(bus, child, type, rid, res)); case SYS_RES_IRQ: return (bus_generic_rl_release_resource(bus, child, type, rid, res)); default: return (EINVAL); } } static int unin_chip_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_activate_resource(bus, child, type, rid, res)); case SYS_RES_IRQ: return (bus_generic_activate_resource(bus, child, type, rid, res)); default: return (EINVAL); } } static int unin_chip_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_deactivate_resource(bus, child, type, rid, res)); case SYS_RES_IRQ: return (bus_generic_deactivate_resource(bus, child, type, rid, res)); default: return (EINVAL); } } static int unin_chip_map_resource(device_t bus, device_t child, int type, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); /* Mappings are only supported on I/O and memory resources. */ switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); if (bootverbose) printf("nexus mapdev: start %jx, len %jd\n", (uintmax_t)start, (uintmax_t)length); map->r_vaddr = pmap_mapdev_attr(start, length, args.memattr); if (map->r_vaddr == NULL) return (ENOMEM); map->r_bustag = &bs_be_tag; map->r_size = length; map->r_bushandle = (bus_space_handle_t)map->r_vaddr; return (0); } static int unin_chip_unmap_resource(device_t bus, device_t child, int type, struct resource *r, struct resource_map *map) { /* * If this is a memory resource, unmap it. */ switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: pmap_unmapdev(map->r_vaddr, map->r_size); break; default: return (EINVAL); } return (0); } static struct resource_list * unin_chip_get_resource_list (device_t dev, device_t child) { struct unin_chip_devinfo *dinfo; dinfo = device_get_ivars(child); return (&dinfo->udi_resources); } static const struct ofw_bus_devinfo * unin_chip_get_devinfo(device_t dev, device_t child) { struct unin_chip_devinfo *dinfo; dinfo = device_get_ivars(child); return (&dinfo->udi_obdinfo); } int unin_chip_wake(device_t dev) { if (dev == NULL) dev = unin_chip; unin_update_reg(dev, UNIN_PWR_MGMT, UNIN_PWR_NORMAL, UNIN_PWR_MASK); DELAY(10); unin_update_reg(dev, UNIN_HWINIT_STATE, UNIN_RUNNING, 0); DELAY(100); return (0); } int unin_chip_sleep(device_t dev, int idle) { if (dev == NULL) dev = unin_chip; unin_update_reg(dev, UNIN_HWINIT_STATE, UNIN_SLEEPING, 0); DELAY(10); if (idle) unin_update_reg(dev, UNIN_PWR_MGMT, UNIN_PWR_IDLE2, UNIN_PWR_MASK); else unin_update_reg(dev, UNIN_PWR_MGMT, UNIN_PWR_SLEEP, UNIN_PWR_MASK); DELAY(10); return (0); } diff --git a/sys/powerpc/psim/iobus.c b/sys/powerpc/psim/iobus.c index eea0255aa21c..79befbc8bd86 100644 --- a/sys/powerpc/psim/iobus.c +++ b/sys/powerpc/psim/iobus.c @@ -1,464 +1,463 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright 2002 by Peter Grehan. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * PSIM 'iobus' local bus. Should be set up in the device tree like: * * /iobus@0x80000000/name psim-iobus * * Code borrowed from various nexus.c and uninorth.c :-) */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct iobus_softc { phandle_t sc_node; vm_offset_t sc_addr; vm_offset_t sc_size; struct rman sc_mem_rman; }; static MALLOC_DEFINE(M_IOBUS, "iobus", "iobus device information"); static int iobus_probe(device_t); static int iobus_attach(device_t); static int iobus_print_child(device_t dev, device_t child); static void iobus_probe_nomatch(device_t, device_t); static int iobus_read_ivar(device_t, device_t, int, uintptr_t *); static int iobus_write_ivar(device_t, device_t, int, uintptr_t); static struct rman *iobus_get_rman(device_t, int, u_int); static struct resource *iobus_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); -static int iobus_adjust_resource(device_t, device_t, int, struct resource *, +static int iobus_adjust_resource(device_t, device_t, struct resource *, rman_res_t, rman_res_t); static int iobus_activate_resource(device_t, device_t, int, int, struct resource *); static int iobus_deactivate_resource(device_t, device_t, int, int, struct resource *); static int iobus_map_resource(device_t, device_t, int, struct resource *, struct resource_map_request *, struct resource_map *); static int iobus_unmap_resource(device_t, device_t, int, struct resource *, struct resource_map *); static int iobus_release_resource(device_t, device_t, int, int, struct resource *); /* * Bus interface definition */ static device_method_t iobus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, iobus_probe), DEVMETHOD(device_attach, iobus_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_print_child, iobus_print_child), DEVMETHOD(bus_probe_nomatch, iobus_probe_nomatch), DEVMETHOD(bus_read_ivar, iobus_read_ivar), DEVMETHOD(bus_write_ivar, iobus_write_ivar), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_rman, iobus_get_rman), DEVMETHOD(bus_alloc_resource, iobus_alloc_resource), DEVMETHOD(bus_adjust_resource, iobus_adjust_resource), DEVMETHOD(bus_release_resource, iobus_release_resource), DEVMETHOD(bus_activate_resource, iobus_activate_resource), DEVMETHOD(bus_deactivate_resource, iobus_deactivate_resource), DEVMETHOD(bus_map_resource, iobus_map_resource), DEVMETHOD(bus_unmap_resource, iobus_unmap_resource), { 0, 0 } }; static driver_t iobus_driver = { "iobus", iobus_methods, sizeof(struct iobus_softc) }; DRIVER_MODULE(iobus, ofwbus, iobus_driver, 0, 0); static int iobus_probe(device_t dev) { const char *type = ofw_bus_get_name(dev); if (strcmp(type, "psim-iobus") != 0) return (ENXIO); device_set_desc(dev, "PSIM local bus"); return (0); } /* * Add interrupt/addr range to the dev's resource list if present */ static void iobus_add_intr(phandle_t devnode, struct iobus_devinfo *dinfo) { u_int intr = -1; if (OF_getprop(devnode, "interrupt", &intr, sizeof(intr)) != -1) { resource_list_add(&dinfo->id_resources, SYS_RES_IRQ, 0, intr, intr, 1); } dinfo->id_interrupt = intr; } static void iobus_add_reg(phandle_t devnode, struct iobus_devinfo *dinfo, vm_offset_t iobus_off) { u_int size; int i; size = OF_getprop(devnode, "reg", dinfo->id_reg,sizeof(dinfo->id_reg)); if (size != -1) { dinfo->id_nregs = size / (sizeof(dinfo->id_reg[0])); for (i = 0; i < dinfo->id_nregs; i+= 3) { /* * Scale the absolute addresses back to iobus * relative offsets. This is to better simulate * macio */ dinfo->id_reg[i+1] -= iobus_off; resource_list_add(&dinfo->id_resources, SYS_RES_MEMORY, 0, dinfo->id_reg[i+1], dinfo->id_reg[i+1] + dinfo->id_reg[i+2], dinfo->id_reg[i+2]); } } } static int iobus_attach(device_t dev) { struct iobus_softc *sc; struct iobus_devinfo *dinfo; phandle_t root; phandle_t child; device_t cdev; char *name; u_int reg[2]; int size; sc = device_get_softc(dev); sc->sc_node = ofw_bus_get_node(dev); /* * Find the base addr/size of the iobus, and initialize the * resource manager */ size = OF_getprop(sc->sc_node, "reg", reg, sizeof(reg)); if (size == sizeof(reg)) { sc->sc_addr = reg[0]; sc->sc_size = reg[1]; } else { return (ENXIO); } sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "IOBus Device Memory"; if (rman_init(&sc->sc_mem_rman) != 0) { device_printf(dev, "failed to init mem range resources\n"); return (ENXIO); } rman_manage_region(&sc->sc_mem_rman, 0, sc->sc_size); /* * Iterate through the sub-devices */ root = sc->sc_node; for (child = OF_child(root); child != 0; child = OF_peer(child)) { OF_getprop_alloc(child, "name", (void **)&name); cdev = device_add_child(dev, NULL, -1); if (cdev != NULL) { dinfo = malloc(sizeof(*dinfo), M_IOBUS, M_WAITOK); memset(dinfo, 0, sizeof(*dinfo)); resource_list_init(&dinfo->id_resources); dinfo->id_node = child; dinfo->id_name = name; iobus_add_intr(child, dinfo); iobus_add_reg(child, dinfo, sc->sc_addr); device_set_ivars(cdev, dinfo); } else { OF_prop_free(name); } } return (bus_generic_attach(dev)); } static int iobus_print_child(device_t dev, device_t child) { struct iobus_devinfo *dinfo; struct resource_list *rl; int retval = 0; dinfo = device_get_ivars(child); rl = &dinfo->id_resources; retval += bus_print_child_header(dev, child); retval += printf(" offset 0x%x", dinfo->id_reg[1]); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); retval += bus_print_child_footer(dev, child); return (retval); } static void iobus_probe_nomatch(device_t dev, device_t child) { } static int iobus_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct iobus_devinfo *dinfo; if ((dinfo = device_get_ivars(child)) == NULL) return (ENOENT); switch (which) { case IOBUS_IVAR_NODE: *result = dinfo->id_node; break; case IOBUS_IVAR_NAME: *result = (uintptr_t)dinfo->id_name; break; case IOBUS_IVAR_NREGS: *result = dinfo->id_nregs; break; case IOBUS_IVAR_REGS: *result = (uintptr_t)dinfo->id_reg; break; default: return (ENOENT); } return (0); } static int iobus_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { return (EINVAL); } static struct rman * iobus_get_rman(device_t bus, int type, u_int flags) { struct iobus_softc *sc; sc = device_get_softc(bus); switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (&sc->sc_mem_rman); default: return (NULL); } } static struct resource * iobus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_alloc_resource(bus, child, type, rid, start, end, count, flags)); case SYS_RES_IRQ: return (bus_alloc_resource(bus, type, rid, start, end, count, flags)); default: device_printf(bus, "unknown resource request from %s\n", device_get_nameunit(child)); return (NULL); } } static int -iobus_adjust_resource(device_t bus, device_t child, int type, - struct resource *r, rman_res_t start, rman_res_t end) +iobus_adjust_resource(device_t bus, device_t child, struct resource *r, + rman_res_t start, rman_res_t end) { - switch (type) { + switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: - return (bus_generic_rman_adjust_resource(bus, child, type, r, - start, end)); - case SYS_RES_IRQ: - return (bus_generic_adjust_resource(bus, child, type, r, start, + return (bus_generic_rman_adjust_resource(bus, child, r, start, end)); + case SYS_RES_IRQ: + return (bus_generic_adjust_resource(bus, child, r, start, end)); default: return (EINVAL); } } static int iobus_release_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_release_resource(bus, child, type, rid, res)); case SYS_RES_IRQ: return (bus_generic_release_resource(bus, child, type, rid, res)); default: return (EINVAL); } } static int iobus_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { switch (type) { case SYS_RES_IRQ: return (bus_generic_activate_resource(bus, child, type, rid, res)); case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_activate_resource(bus, child, type, rid, res)); default: return (EINVAL); } } static int iobus_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { switch (type) { case SYS_RES_IRQ: return (bus_generic_deactivate_resource(bus, child, type, rid, res)); case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_deactivate_resource(bus, child, type, rid, res)); default: return (EINVAL); } } static int iobus_map_resource(device_t bus, device_t child, int type, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; struct iobus_softc *sc; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); /* Mappings are only supported on I/O and memory resources. */ switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); sc = device_get_softc(bus); map->r_vaddr = pmap_mapdev_attr((vm_paddr_t)start + sc->sc_addr, (vm_size_t)length, args.memattr); if (map->r_vaddr == NULL) return (ENOMEM); map->r_bustag = &bs_le_tag; map->r_bushandle = (vm_offset_t)map->r_vaddr; map->r_size = length; return (0); } static int iobus_unmap_resource(device_t bus, device_t child, int type, struct resource *r, struct resource_map *map) { switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: pmap_unmapdev(map->r_vaddr, map->r_size); return (0); default: return (EINVAL); } } diff --git a/sys/sys/bus.h b/sys/sys/bus.h index 6fcd414dc7be..310b3646f49f 100644 --- a/sys/sys/bus.h +++ b/sys/sys/bus.h @@ -1,1083 +1,1083 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1997,1998,2003 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _SYS_BUS_H_ #define _SYS_BUS_H_ #include #include #include #include /** * @defgroup NEWBUS newbus - a generic framework for managing devices * @{ */ /** * @brief Interface information structure. */ struct u_businfo { int ub_version; /**< @brief interface version */ #define BUS_USER_VERSION 2 int ub_generation; /**< @brief generation count */ }; /** * @brief State of the device. */ typedef enum device_state { DS_NOTPRESENT = 10, /**< @brief not probed or probe failed */ DS_ALIVE = 20, /**< @brief probe succeeded */ DS_ATTACHING = 25, /**< @brief currently attaching */ DS_ATTACHED = 30, /**< @brief attach method called */ } device_state_t; /** * @brief Device proprty types. * * Those are used by bus logic to encode requested properties, * e.g. in DT all properties are stored as BE and need to be converted * to host endianness. */ typedef enum device_property_type { DEVICE_PROP_ANY = 0, DEVICE_PROP_BUFFER = 1, DEVICE_PROP_UINT32 = 2, DEVICE_PROP_UINT64 = 3, DEVICE_PROP_HANDLE = 4, } device_property_type_t; /** * @brief Device information exported to userspace. * The strings are placed one after the other, separated by NUL characters. * Fields should be added after the last one and order maintained for compatibility */ #define BUS_USER_BUFFER (3*1024) struct u_device { uintptr_t dv_handle; uintptr_t dv_parent; uint32_t dv_devflags; /**< @brief API Flags for device */ uint16_t dv_flags; /**< @brief flags for dev state */ device_state_t dv_state; /**< @brief State of attachment */ char dv_fields[BUS_USER_BUFFER]; /**< @brief NUL terminated fields */ /* name (name of the device in tree) */ /* desc (driver description) */ /* drivername (Name of driver without unit number) */ /* pnpinfo (Plug and play information from bus) */ /* location (Location of device on parent */ /* NUL */ }; /* Flags exported via dv_flags. */ #define DF_ENABLED 0x01 /* device should be probed/attached */ #define DF_FIXEDCLASS 0x02 /* devclass specified at create time */ #define DF_WILDCARD 0x04 /* unit was originally wildcard */ #define DF_DESCMALLOCED 0x08 /* description was malloced */ #define DF_QUIET 0x10 /* don't print verbose attach message */ #define DF_DONENOMATCH 0x20 /* don't execute DEVICE_NOMATCH again */ #define DF_EXTERNALSOFTC 0x40 /* softc not allocated by us */ #define DF_SUSPENDED 0x100 /* Device is suspended. */ #define DF_QUIET_CHILDREN 0x200 /* Default to quiet for all my children */ #define DF_ATTACHED_ONCE 0x400 /* Has been attached at least once */ #define DF_NEEDNOMATCH 0x800 /* Has a pending NOMATCH event */ /** * @brief Device request structure used for ioctl's. * * Used for ioctl's on /dev/devctl2. All device ioctl's * must have parameter definitions which begin with dr_name. */ struct devreq_buffer { void *buffer; size_t length; }; struct devreq { char dr_name[128]; int dr_flags; /* request-specific flags */ union { struct devreq_buffer dru_buffer; void *dru_data; } dr_dru; #define dr_buffer dr_dru.dru_buffer /* variable-sized buffer */ #define dr_data dr_dru.dru_data /* fixed-size buffer */ }; #define DEV_ATTACH _IOW('D', 1, struct devreq) #define DEV_DETACH _IOW('D', 2, struct devreq) #define DEV_ENABLE _IOW('D', 3, struct devreq) #define DEV_DISABLE _IOW('D', 4, struct devreq) #define DEV_SUSPEND _IOW('D', 5, struct devreq) #define DEV_RESUME _IOW('D', 6, struct devreq) #define DEV_SET_DRIVER _IOW('D', 7, struct devreq) #define DEV_CLEAR_DRIVER _IOW('D', 8, struct devreq) #define DEV_RESCAN _IOW('D', 9, struct devreq) #define DEV_DELETE _IOW('D', 10, struct devreq) #define DEV_FREEZE _IOW('D', 11, struct devreq) #define DEV_THAW _IOW('D', 12, struct devreq) #define DEV_RESET _IOW('D', 13, struct devreq) #define DEV_GET_PATH _IOWR('D', 14, struct devreq) /* Flags for DEV_DETACH and DEV_DISABLE. */ #define DEVF_FORCE_DETACH 0x0000001 /* Flags for DEV_SET_DRIVER. */ #define DEVF_SET_DRIVER_DETACH 0x0000001 /* Detach existing driver. */ /* Flags for DEV_CLEAR_DRIVER. */ #define DEVF_CLEAR_DRIVER_DETACH 0x0000001 /* Detach existing driver. */ /* Flags for DEV_DELETE. */ #define DEVF_FORCE_DELETE 0x0000001 /* Flags for DEV_RESET */ #define DEVF_RESET_DETACH 0x0000001 /* Detach drivers vs suspend device */ #ifdef _KERNEL #include #include #include #include /** * Device name parsers. Hook to allow device enumerators to map * scheme-specific names to a device. */ typedef void (*dev_lookup_fn)(void *arg, const char *name, device_t *result); EVENTHANDLER_DECLARE(dev_lookup, dev_lookup_fn); /** * @brief A device driver. * * Provides an abstraction layer for driver dispatch. */ typedef struct kobj_class driver_t; /** * @brief A device class * * The devclass object has two main functions in the system. The first * is to manage the allocation of unit numbers for device instances * and the second is to hold the list of device drivers for a * particular bus type. Each devclass has a name and there cannot be * two devclasses with the same name. This ensures that unique unit * numbers are allocated to device instances. * * Drivers that support several different bus attachments (e.g. isa, * pci, pccard) should all use the same devclass to ensure that unit * numbers do not conflict. * * Each devclass may also have a parent devclass. This is used when * searching for device drivers to allow a form of inheritance. When * matching drivers with devices, first the driver list of the parent * device's devclass is searched. If no driver is found in that list, * the search continues in the parent devclass (if any). */ typedef struct devclass *devclass_t; /** * @brief A device method */ #define device_method_t kobj_method_t /** * @brief Driver interrupt filter return values * * If a driver provides an interrupt filter routine it must return an * integer consisting of oring together zero or more of the following * flags: * * FILTER_STRAY - this device did not trigger the interrupt * FILTER_HANDLED - the interrupt has been fully handled and can be EOId * FILTER_SCHEDULE_THREAD - the threaded interrupt handler should be * scheduled to execute * * If the driver does not provide a filter, then the interrupt code will * act is if the filter had returned FILTER_SCHEDULE_THREAD. Note that it * is illegal to specify any other flag with FILTER_STRAY and that it is * illegal to not specify either of FILTER_HANDLED or FILTER_SCHEDULE_THREAD * if FILTER_STRAY is not specified. */ #define FILTER_STRAY 0x01 #define FILTER_HANDLED 0x02 #define FILTER_SCHEDULE_THREAD 0x04 /** * @brief Driver interrupt service routines * * The filter routine is run in primary interrupt context and may not * block or use regular mutexes. It may only use spin mutexes for * synchronization. The filter may either completely handle the * interrupt or it may perform some of the work and defer more * expensive work to the regular interrupt handler. If a filter * routine is not registered by the driver, then the regular interrupt * handler is always used to handle interrupts from this device. * * The regular interrupt handler executes in its own thread context * and may use regular mutexes. However, it is prohibited from * sleeping on a sleep queue. */ typedef int driver_filter_t(void*); typedef void driver_intr_t(void*); /** * @brief Interrupt type bits. * * These flags may be passed by drivers to bus_setup_intr(9) when * registering a new interrupt handler. The field is overloaded to * specify both the interrupt's type and any special properties. * * The INTR_TYPE* bits will be passed to intr_priority(9) to determine * the scheduling priority of the handler's ithread. Historically, each * type was assigned a unique scheduling preference, but now only * INTR_TYPE_CLK receives a default priority higher than other * interrupts. See sys/priority.h. * * Buses may choose to modify or augment these flags as appropriate, * e.g. nexus may apply INTR_EXCL. */ enum intr_type { INTR_TYPE_TTY = 1, INTR_TYPE_BIO = 2, INTR_TYPE_NET = 4, INTR_TYPE_CAM = 8, INTR_TYPE_MISC = 16, INTR_TYPE_CLK = 32, INTR_TYPE_AV = 64, INTR_EXCL = 256, /* exclusive interrupt */ INTR_MPSAFE = 512, /* this interrupt is SMP safe */ INTR_ENTROPY = 1024, /* this interrupt provides entropy */ INTR_MD1 = 4096, /* flag reserved for MD use */ INTR_MD2 = 8192, /* flag reserved for MD use */ INTR_MD3 = 16384, /* flag reserved for MD use */ INTR_MD4 = 32768 /* flag reserved for MD use */ }; enum intr_trigger { INTR_TRIGGER_INVALID = -1, INTR_TRIGGER_CONFORM = 0, INTR_TRIGGER_EDGE = 1, INTR_TRIGGER_LEVEL = 2 }; enum intr_polarity { INTR_POLARITY_CONFORM = 0, INTR_POLARITY_HIGH = 1, INTR_POLARITY_LOW = 2 }; /** * CPU sets supported by bus_get_cpus(). Note that not all sets may be * supported for a given device. If a request is not supported by a * device (or its parents), then bus_get_cpus() will fail with EINVAL. */ enum cpu_sets { LOCAL_CPUS = 0, INTR_CPUS }; typedef int (*devop_t)(void); /** * @brief This structure is deprecated. * * Use the kobj(9) macro DEFINE_CLASS to * declare classes which implement device drivers. */ struct driver { KOBJ_CLASS_FIELDS; }; struct resource; /** * @brief A resource mapping. */ struct resource_map { bus_space_tag_t r_bustag; bus_space_handle_t r_bushandle; bus_size_t r_size; void *r_vaddr; }; /** * @brief Optional properties of a resource mapping request. */ struct resource_map_request { size_t size; rman_res_t offset; rman_res_t length; vm_memattr_t memattr; }; void resource_init_map_request_impl(struct resource_map_request *_args, size_t _sz); #define resource_init_map_request(rmr) \ resource_init_map_request_impl((rmr), sizeof(*(rmr))) int resource_validate_map_request(struct resource *r, struct resource_map_request *in, struct resource_map_request *out, rman_res_t *startp, rman_res_t *lengthp); /* * Definitions for drivers which need to keep simple lists of resources * for their child devices. */ /** * @brief An entry for a single resource in a resource list. */ struct resource_list_entry { STAILQ_ENTRY(resource_list_entry) link; int type; /**< @brief type argument to alloc_resource */ int rid; /**< @brief resource identifier */ int flags; /**< @brief resource flags */ struct resource *res; /**< @brief the real resource when allocated */ rman_res_t start; /**< @brief start of resource range */ rman_res_t end; /**< @brief end of resource range */ rman_res_t count; /**< @brief count within range */ }; STAILQ_HEAD(resource_list, resource_list_entry); #define RLE_RESERVED 0x0001 /* Reserved by the parent bus. */ #define RLE_ALLOCATED 0x0002 /* Reserved resource is allocated. */ #define RLE_PREFETCH 0x0004 /* Resource is a prefetch range. */ void resource_list_init(struct resource_list *rl); void resource_list_free(struct resource_list *rl); struct resource_list_entry * resource_list_add(struct resource_list *rl, int type, int rid, rman_res_t start, rman_res_t end, rman_res_t count); int resource_list_add_next(struct resource_list *rl, int type, rman_res_t start, rman_res_t end, rman_res_t count); int resource_list_busy(struct resource_list *rl, int type, int rid); int resource_list_reserved(struct resource_list *rl, int type, int rid); struct resource_list_entry* resource_list_find(struct resource_list *rl, int type, int rid); void resource_list_delete(struct resource_list *rl, int type, int rid); struct resource * resource_list_alloc(struct resource_list *rl, device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int resource_list_release(struct resource_list *rl, device_t bus, device_t child, int type, int rid, struct resource *res); int resource_list_release_active(struct resource_list *rl, device_t bus, device_t child, int type); struct resource * resource_list_reserve(struct resource_list *rl, device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int resource_list_unreserve(struct resource_list *rl, device_t bus, device_t child, int type, int rid); void resource_list_purge(struct resource_list *rl); int resource_list_print_type(struct resource_list *rl, const char *name, int type, const char *format); /* * The root bus, to which all top-level buses are attached. */ extern device_t root_bus; extern devclass_t root_devclass; void root_bus_configure(void); /* * Useful functions for implementing buses. */ struct _cpuset; int bus_generic_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r); device_t bus_generic_add_child(device_t dev, u_int order, const char *name, int unit); -int bus_generic_adjust_resource(device_t bus, device_t child, int type, +int bus_generic_adjust_resource(device_t bus, device_t child, struct resource *r, rman_res_t start, rman_res_t end); struct resource * bus_generic_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int bus_generic_translate_resource(device_t dev, int type, rman_res_t start, rman_res_t *newstart); int bus_generic_attach(device_t dev); int bus_generic_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu); int bus_generic_child_location(device_t dev, device_t child, struct sbuf *sb); int bus_generic_child_pnpinfo(device_t dev, device_t child, struct sbuf *sb); int bus_generic_child_present(device_t dev, device_t child); int bus_generic_config_intr(device_t, int, enum intr_trigger, enum intr_polarity); int bus_generic_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr); int bus_generic_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r); int bus_generic_detach(device_t dev); void bus_generic_driver_added(device_t dev, driver_t *driver); int bus_generic_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, struct _cpuset *cpuset); bus_dma_tag_t bus_generic_get_dma_tag(device_t dev, device_t child); bus_space_tag_t bus_generic_get_bus_tag(device_t dev, device_t child); int bus_generic_get_domain(device_t dev, device_t child, int *domain); ssize_t bus_generic_get_property(device_t dev, device_t child, const char *propname, void *propvalue, size_t size, device_property_type_t type); struct resource_list * bus_generic_get_resource_list(device_t, device_t); int bus_generic_map_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map_request *args, struct resource_map *map); void bus_generic_new_pass(device_t dev); int bus_print_child_header(device_t dev, device_t child); int bus_print_child_domain(device_t dev, device_t child); int bus_print_child_footer(device_t dev, device_t child); int bus_generic_print_child(device_t dev, device_t child); int bus_generic_probe(device_t dev); int bus_generic_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); int bus_generic_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r); int bus_generic_resume(device_t dev); int bus_generic_resume_child(device_t dev, device_t child); int bus_generic_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep); struct resource * bus_generic_rl_alloc_resource (device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); void bus_generic_rl_delete_resource (device_t, device_t, int, int); int bus_generic_rl_get_resource (device_t, device_t, int, int, rman_res_t *, rman_res_t *); int bus_generic_rl_set_resource (device_t, device_t, int, int, rman_res_t, rman_res_t); int bus_generic_rl_release_resource (device_t, device_t, int, int, struct resource *); struct resource * bus_generic_rman_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); -int bus_generic_rman_adjust_resource(device_t dev, device_t child, int type, +int bus_generic_rman_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end); int bus_generic_rman_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r); int bus_generic_rman_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r); int bus_generic_rman_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r); int bus_generic_shutdown(device_t dev); int bus_generic_suspend(device_t dev); int bus_generic_suspend_child(device_t dev, device_t child); int bus_generic_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie); int bus_generic_suspend_intr(device_t dev, device_t child, struct resource *irq); int bus_generic_resume_intr(device_t dev, device_t child, struct resource *irq); int bus_generic_unmap_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map *map); int bus_generic_write_ivar(device_t dev, device_t child, int which, uintptr_t value); int bus_generic_get_device_path(device_t bus, device_t child, const char *locator, struct sbuf *sb); int bus_helper_reset_post(device_t dev, int flags); int bus_helper_reset_prepare(device_t dev, int flags); int bus_null_rescan(device_t dev); /* * Wrapper functions for the BUS_*_RESOURCE methods to make client code * a little simpler. */ struct resource_spec { int type; int rid; int flags; }; #define RESOURCE_SPEC_END {-1, 0, 0} int bus_alloc_resources(device_t dev, struct resource_spec *rs, struct resource **res); void bus_release_resources(device_t dev, const struct resource_spec *rs, struct resource **res); -int bus_adjust_resource(device_t child, int type, struct resource *r, +int bus_adjust_resource(device_t child, struct resource *r, rman_res_t start, rman_res_t end); int bus_translate_resource(device_t child, int type, rman_res_t start, rman_res_t *newstart); struct resource *bus_alloc_resource(device_t dev, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int bus_activate_resource(device_t dev, int type, int rid, struct resource *r); int bus_deactivate_resource(device_t dev, int type, int rid, struct resource *r); int bus_map_resource(device_t dev, int type, struct resource *r, struct resource_map_request *args, struct resource_map *map); int bus_unmap_resource(device_t dev, int type, struct resource *r, struct resource_map *map); int bus_get_cpus(device_t dev, enum cpu_sets op, size_t setsize, struct _cpuset *cpuset); bus_dma_tag_t bus_get_dma_tag(device_t dev); bus_space_tag_t bus_get_bus_tag(device_t dev); int bus_get_domain(device_t dev, int *domain); int bus_release_resource(device_t dev, int type, int rid, struct resource *r); int bus_free_resource(device_t dev, int type, struct resource *r); int bus_setup_intr(device_t dev, struct resource *r, int flags, driver_filter_t filter, driver_intr_t handler, void *arg, void **cookiep); int bus_teardown_intr(device_t dev, struct resource *r, void *cookie); int bus_suspend_intr(device_t dev, struct resource *r); int bus_resume_intr(device_t dev, struct resource *r); int bus_bind_intr(device_t dev, struct resource *r, int cpu); int bus_describe_intr(device_t dev, struct resource *irq, void *cookie, const char *fmt, ...) __printflike(4, 5); int bus_set_resource(device_t dev, int type, int rid, rman_res_t start, rman_res_t count); int bus_get_resource(device_t dev, int type, int rid, rman_res_t *startp, rman_res_t *countp); rman_res_t bus_get_resource_start(device_t dev, int type, int rid); rman_res_t bus_get_resource_count(device_t dev, int type, int rid); void bus_delete_resource(device_t dev, int type, int rid); int bus_child_present(device_t child); int bus_child_pnpinfo(device_t child, struct sbuf *sb); int bus_child_location(device_t child, struct sbuf *sb); void bus_enumerate_hinted_children(device_t bus); int bus_delayed_attach_children(device_t bus); static __inline struct resource * bus_alloc_resource_any(device_t dev, int type, int *rid, u_int flags) { return (bus_alloc_resource(dev, type, rid, 0, ~0, 1, flags)); } static __inline struct resource * bus_alloc_resource_anywhere(device_t dev, int type, int *rid, rman_res_t count, u_int flags) { return (bus_alloc_resource(dev, type, rid, 0, ~0, count, flags)); } /* Compat shims for simpler bus resource API. */ -int bus_adjust_resource_new(device_t child, struct resource *r, +int bus_adjust_resource_old(device_t child, int type, struct resource *r, rman_res_t start, rman_res_t end); int bus_activate_resource_new(device_t dev, struct resource *r); int bus_deactivate_resource_new(device_t dev, struct resource *r); int bus_map_resource_new(device_t dev, struct resource *r, struct resource_map_request *args, struct resource_map *map); int bus_unmap_resource_new(device_t dev, struct resource *r, struct resource_map *map); int bus_release_resource_new(device_t dev, struct resource *r); #define _BUS_API_MACRO(_1, _2, _3, _4, _5, NAME, ...) NAME #define bus_adjust_resource(...) \ - _BUS_API_MACRO(__VA_ARGS__, bus_adjust_resource, \ - bus_adjust_resource_new)(__VA_ARGS__) + _BUS_API_MACRO(__VA_ARGS__, bus_adjust_resource_old, \ + bus_adjust_resource)(__VA_ARGS__) #define bus_activate_resource(...) \ _BUS_API_MACRO(__VA_ARGS__, INVALID, bus_activate_resource, \ INVALID, bus_activate_resource_new)(__VA_ARGS__) #define bus_deactivate_resource(...) \ _BUS_API_MACRO(__VA_ARGS__, INVALID, bus_deactivate_resource, \ INVALID, bus_deactivate_resource_new)(__VA_ARGS__) #define bus_map_resource(...) \ _BUS_API_MACRO(__VA_ARGS__, bus_map_resource, \ bus_map_resource_new)(__VA_ARGS__) #define bus_unmap_resource(...) \ _BUS_API_MACRO(__VA_ARGS__, INVALID, bus_unmap_resource, \ bus_unmap_resource_new)(__VA_ARGS__) #define bus_release_resource(...) \ _BUS_API_MACRO(__VA_ARGS__, INVALID, bus_release_resource, \ INVALID, bus_release_resource_new)(__VA_ARGS__) /* * Access functions for device. */ device_t device_add_child(device_t dev, const char *name, int unit); device_t device_add_child_ordered(device_t dev, u_int order, const char *name, int unit); void device_busy(device_t dev); int device_delete_child(device_t dev, device_t child); int device_delete_children(device_t dev); int device_attach(device_t dev); int device_detach(device_t dev); void device_disable(device_t dev); void device_enable(device_t dev); device_t device_find_child(device_t dev, const char *classname, int unit); const char *device_get_desc(device_t dev); devclass_t device_get_devclass(device_t dev); driver_t *device_get_driver(device_t dev); u_int32_t device_get_flags(device_t dev); device_t device_get_parent(device_t dev); int device_get_children(device_t dev, device_t **listp, int *countp); void *device_get_ivars(device_t dev); void device_set_ivars(device_t dev, void *ivars); const char *device_get_name(device_t dev); const char *device_get_nameunit(device_t dev); void *device_get_softc(device_t dev); device_state_t device_get_state(device_t dev); int device_get_unit(device_t dev); struct sysctl_ctx_list *device_get_sysctl_ctx(device_t dev); struct sysctl_oid *device_get_sysctl_tree(device_t dev); int device_has_quiet_children(device_t dev); int device_is_alive(device_t dev); /* did probe succeed? */ int device_is_attached(device_t dev); /* did attach succeed? */ int device_is_enabled(device_t dev); int device_is_suspended(device_t dev); int device_is_quiet(device_t dev); device_t device_lookup_by_name(const char *name); int device_print_prettyname(device_t dev); int device_printf(device_t dev, const char *, ...) __printflike(2, 3); int device_log(device_t dev, int pri, const char *, ...) __printflike(3, 4); int device_probe(device_t dev); int device_probe_and_attach(device_t dev); int device_probe_child(device_t bus, device_t dev); int device_quiesce(device_t dev); void device_quiet(device_t dev); void device_quiet_children(device_t dev); void device_set_desc(device_t dev, const char* desc); void device_set_descf(device_t dev, const char* fmt, ...) __printflike(2, 3); void device_set_desc_copy(device_t dev, const char* desc); int device_set_devclass(device_t dev, const char *classname); int device_set_devclass_fixed(device_t dev, const char *classname); bool device_is_devclass_fixed(device_t dev); int device_set_driver(device_t dev, driver_t *driver); void device_set_flags(device_t dev, u_int32_t flags); void device_set_softc(device_t dev, void *softc); void device_free_softc(void *softc); void device_claim_softc(device_t dev); int device_set_unit(device_t dev, int unit); /* XXX DONT USE XXX */ int device_shutdown(device_t dev); void device_unbusy(device_t dev); void device_verbose(device_t dev); ssize_t device_get_property(device_t dev, const char *prop, void *val, size_t sz, device_property_type_t type); bool device_has_property(device_t dev, const char *prop); /* * Access functions for devclass. */ int devclass_add_driver(devclass_t dc, driver_t *driver, int pass, devclass_t *dcp); devclass_t devclass_create(const char *classname); int devclass_delete_driver(devclass_t busclass, driver_t *driver); devclass_t devclass_find(const char *classname); const char *devclass_get_name(devclass_t dc); device_t devclass_get_device(devclass_t dc, int unit); void *devclass_get_softc(devclass_t dc, int unit); int devclass_get_devices(devclass_t dc, device_t **listp, int *countp); int devclass_get_drivers(devclass_t dc, driver_t ***listp, int *countp); int devclass_get_count(devclass_t dc); int devclass_get_maxunit(devclass_t dc); int devclass_find_free_unit(devclass_t dc, int unit); void devclass_set_parent(devclass_t dc, devclass_t pdc); devclass_t devclass_get_parent(devclass_t dc); struct sysctl_ctx_list *devclass_get_sysctl_ctx(devclass_t dc); struct sysctl_oid *devclass_get_sysctl_tree(devclass_t dc); /* * Access functions for device resources. */ int resource_int_value(const char *name, int unit, const char *resname, int *result); int resource_long_value(const char *name, int unit, const char *resname, long *result); int resource_string_value(const char *name, int unit, const char *resname, const char **result); int resource_disabled(const char *name, int unit); int resource_find_match(int *anchor, const char **name, int *unit, const char *resname, const char *value); int resource_find_dev(int *anchor, const char *name, int *unit, const char *resname, const char *value); int resource_unset_value(const char *name, int unit, const char *resname); /* * Functions for maintaining and checking consistency of * bus information exported to userspace. */ int bus_data_generation_check(int generation); void bus_data_generation_update(void); /** * Some convenience defines for probe routines to return. These are just * suggested values, and there's nothing magical about them. * BUS_PROBE_SPECIFIC is for devices that cannot be reprobed, and that no * possible other driver may exist (typically legacy drivers who don't follow * all the rules, or special needs drivers). BUS_PROBE_VENDOR is the * suggested value that vendor supplied drivers use. This is for source or * binary drivers that are not yet integrated into the FreeBSD tree. Its use * in the base OS is prohibited. BUS_PROBE_DEFAULT is the normal return value * for drivers to use. It is intended that nearly all of the drivers in the * tree should return this value. BUS_PROBE_LOW_PRIORITY are for drivers that * have special requirements like when there are two drivers that support * overlapping series of hardware devices. In this case the one that supports * the older part of the line would return this value, while the one that * supports the newer ones would return BUS_PROBE_DEFAULT. BUS_PROBE_GENERIC * is for drivers that wish to have a generic form and a specialized form, * like is done with the pci bus and the acpi pci bus. BUS_PROBE_HOOVER is * for those buses that implement a generic device placeholder for devices on * the bus that have no more specific driver for them (aka ugen). * BUS_PROBE_NOWILDCARD or lower means that the device isn't really bidding * for a device node, but accepts only devices that its parent has told it * use this driver. */ #define BUS_PROBE_SPECIFIC 0 /* Only I can use this device */ #define BUS_PROBE_VENDOR (-10) /* Vendor supplied driver */ #define BUS_PROBE_DEFAULT (-20) /* Base OS default driver */ #define BUS_PROBE_LOW_PRIORITY (-40) /* Older, less desirable drivers */ #define BUS_PROBE_GENERIC (-100) /* generic driver for dev */ #define BUS_PROBE_HOOVER (-1000000) /* Driver for any dev on bus */ #define BUS_PROBE_NOWILDCARD (-2000000000) /* No wildcard device matches */ /** * During boot, the device tree is scanned multiple times. Each scan, * or pass, drivers may be attached to devices. Each driver * attachment is assigned a pass number. Drivers may only probe and * attach to devices if their pass number is less than or equal to the * current system-wide pass number. The default pass is the last pass * and is used by most drivers. Drivers needed by the scheduler are * probed in earlier passes. */ #define BUS_PASS_ROOT 0 /* Used to attach root0. */ #define BUS_PASS_BUS 10 /* Buses and bridges. */ #define BUS_PASS_CPU 20 /* CPU devices. */ #define BUS_PASS_RESOURCE 30 /* Resource discovery. */ #define BUS_PASS_INTERRUPT 40 /* Interrupt controllers. */ #define BUS_PASS_TIMER 50 /* Timers and clocks. */ #define BUS_PASS_SCHEDULER 60 /* Start scheduler. */ #define BUS_PASS_SUPPORTDEV 100000 /* Drivers which support DEFAULT drivers. */ #define BUS_PASS_DEFAULT __INT_MAX /* Everything else. */ #define BUS_PASS_ORDER_FIRST 0 #define BUS_PASS_ORDER_EARLY 2 #define BUS_PASS_ORDER_MIDDLE 5 #define BUS_PASS_ORDER_LATE 7 #define BUS_PASS_ORDER_LAST 9 #define BUS_LOCATOR_ACPI "ACPI" #define BUS_LOCATOR_FREEBSD "FreeBSD" #define BUS_LOCATOR_UEFI "UEFI" #define BUS_LOCATOR_OFW "OFW" extern int bus_current_pass; void bus_set_pass(int pass); /** * Routines to lock / unlock the newbus lock. * Must be taken out to interact with newbus. */ void bus_topo_lock(void); void bus_topo_unlock(void); struct mtx * bus_topo_mtx(void); void bus_topo_assert(void); /** * Shorthands for constructing method tables. */ #define DEVMETHOD KOBJMETHOD #define DEVMETHOD_END KOBJMETHOD_END /* * Some common device interfaces. */ #include "device_if.h" #include "bus_if.h" struct module; int driver_module_handler(struct module *, int, void *); /** * Module support for automatically adding drivers to buses. */ struct driver_module_data { int (*dmd_chainevh)(struct module *, int, void *); void *dmd_chainarg; const char *dmd_busname; kobj_class_t dmd_driver; devclass_t *dmd_devclass; int dmd_pass; }; #define EARLY_DRIVER_MODULE_ORDERED(name, busname, driver, evh, arg, \ order, pass) \ \ static struct driver_module_data name##_##busname##_driver_mod = { \ evh, arg, \ #busname, \ (kobj_class_t) &driver, \ NULL, \ pass \ }; \ \ static moduledata_t name##_##busname##_mod = { \ #busname "/" #name, \ driver_module_handler, \ &name##_##busname##_driver_mod \ }; \ DECLARE_MODULE(name##_##busname, name##_##busname##_mod, \ SI_SUB_DRIVERS, order) #define EARLY_DRIVER_MODULE(name, busname, driver, evh, arg, pass) \ EARLY_DRIVER_MODULE_ORDERED(name, busname, driver, evh, arg, \ SI_ORDER_MIDDLE, pass) #define DRIVER_MODULE_ORDERED(name, busname, driver, evh, arg, order) \ EARLY_DRIVER_MODULE_ORDERED(name, busname, driver, evh, arg, \ order, BUS_PASS_DEFAULT) #define DRIVER_MODULE(name, busname, driver, evh, arg) \ EARLY_DRIVER_MODULE(name, busname, driver, evh, arg, \ BUS_PASS_DEFAULT) /** * Generic ivar accessor generation macros for bus drivers */ #define __BUS_ACCESSOR(varp, var, ivarp, ivar, type) \ \ static __inline type varp ## _get_ ## var(device_t dev) \ { \ uintptr_t v; \ int e __diagused; \ e = BUS_READ_IVAR(device_get_parent(dev), dev, \ ivarp ## _IVAR_ ## ivar, &v); \ KASSERT(e == 0, ("%s failed for %s on bus %s, error = %d", \ __func__, device_get_nameunit(dev), \ device_get_nameunit(device_get_parent(dev)), e)); \ return ((type) v); \ } \ \ static __inline void varp ## _set_ ## var(device_t dev, type t) \ { \ uintptr_t v = (uintptr_t) t; \ int e __diagused; \ e = BUS_WRITE_IVAR(device_get_parent(dev), dev, \ ivarp ## _IVAR_ ## ivar, v); \ KASSERT(e == 0, ("%s failed for %s on bus %s, error = %d", \ __func__, device_get_nameunit(dev), \ device_get_nameunit(device_get_parent(dev)), e)); \ } struct device_location_cache; typedef struct device_location_cache device_location_cache_t; device_location_cache_t *dev_wired_cache_init(void); void dev_wired_cache_fini(device_location_cache_t *dcp); bool dev_wired_cache_match(device_location_cache_t *dcp, device_t dev, const char *at); /** * Shorthand macros, taking resource argument * Generated with sys/tools/bus_macro.sh */ #define bus_barrier(r, o, l, f) \ bus_space_barrier((r)->r_bustag, (r)->r_bushandle, (o), (l), (f)) #define bus_poke_1(r, o, v) \ bus_space_poke_1((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_peek_1(r, o, vp) \ bus_space_peek_1((r)->r_bustag, (r)->r_bushandle, (o), (vp)) #define bus_read_1(r, o) \ bus_space_read_1((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_1(r, o, d, c) \ bus_space_read_multi_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_1(r, o, d, c) \ bus_space_read_region_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_1(r, o, v, c) \ bus_space_set_multi_1((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_1(r, o, v, c) \ bus_space_set_region_1((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_1(r, o, v) \ bus_space_write_1((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_1(r, o, d, c) \ bus_space_write_multi_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_1(r, o, d, c) \ bus_space_write_region_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_stream_1(r, o) \ bus_space_read_stream_1((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_stream_1(r, o, d, c) \ bus_space_read_multi_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_stream_1(r, o, d, c) \ bus_space_read_region_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_stream_1(r, o, v, c) \ bus_space_set_multi_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_stream_1(r, o, v, c) \ bus_space_set_region_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_stream_1(r, o, v) \ bus_space_write_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_stream_1(r, o, d, c) \ bus_space_write_multi_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_stream_1(r, o, d, c) \ bus_space_write_region_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_poke_2(r, o, v) \ bus_space_poke_2((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_peek_2(r, o, vp) \ bus_space_peek_2((r)->r_bustag, (r)->r_bushandle, (o), (vp)) #define bus_read_2(r, o) \ bus_space_read_2((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_2(r, o, d, c) \ bus_space_read_multi_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_2(r, o, d, c) \ bus_space_read_region_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_2(r, o, v, c) \ bus_space_set_multi_2((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_2(r, o, v, c) \ bus_space_set_region_2((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_2(r, o, v) \ bus_space_write_2((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_2(r, o, d, c) \ bus_space_write_multi_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_2(r, o, d, c) \ bus_space_write_region_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_stream_2(r, o) \ bus_space_read_stream_2((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_stream_2(r, o, d, c) \ bus_space_read_multi_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_stream_2(r, o, d, c) \ bus_space_read_region_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_stream_2(r, o, v, c) \ bus_space_set_multi_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_stream_2(r, o, v, c) \ bus_space_set_region_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_stream_2(r, o, v) \ bus_space_write_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_stream_2(r, o, d, c) \ bus_space_write_multi_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_stream_2(r, o, d, c) \ bus_space_write_region_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_poke_4(r, o, v) \ bus_space_poke_4((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_peek_4(r, o, vp) \ bus_space_peek_4((r)->r_bustag, (r)->r_bushandle, (o), (vp)) #define bus_read_4(r, o) \ bus_space_read_4((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_4(r, o, d, c) \ bus_space_read_multi_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_4(r, o, d, c) \ bus_space_read_region_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_4(r, o, v, c) \ bus_space_set_multi_4((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_4(r, o, v, c) \ bus_space_set_region_4((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_4(r, o, v) \ bus_space_write_4((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_4(r, o, d, c) \ bus_space_write_multi_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_4(r, o, d, c) \ bus_space_write_region_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_stream_4(r, o) \ bus_space_read_stream_4((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_stream_4(r, o, d, c) \ bus_space_read_multi_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_stream_4(r, o, d, c) \ bus_space_read_region_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_stream_4(r, o, v, c) \ bus_space_set_multi_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_stream_4(r, o, v, c) \ bus_space_set_region_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_stream_4(r, o, v) \ bus_space_write_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_stream_4(r, o, d, c) \ bus_space_write_multi_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_stream_4(r, o, d, c) \ bus_space_write_region_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_poke_8(r, o, v) \ bus_space_poke_8((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_peek_8(r, o, vp) \ bus_space_peek_8((r)->r_bustag, (r)->r_bushandle, (o), (vp)) #define bus_read_8(r, o) \ bus_space_read_8((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_8(r, o, d, c) \ bus_space_read_multi_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_8(r, o, d, c) \ bus_space_read_region_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_8(r, o, v, c) \ bus_space_set_multi_8((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_8(r, o, v, c) \ bus_space_set_region_8((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_8(r, o, v) \ bus_space_write_8((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_8(r, o, d, c) \ bus_space_write_multi_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_8(r, o, d, c) \ bus_space_write_region_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_stream_8(r, o) \ bus_space_read_stream_8((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_stream_8(r, o, d, c) \ bus_space_read_multi_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_stream_8(r, o, d, c) \ bus_space_read_region_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_stream_8(r, o, v, c) \ bus_space_set_multi_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_stream_8(r, o, v, c) \ bus_space_set_region_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_stream_8(r, o, v) \ bus_space_write_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_stream_8(r, o, d, c) \ bus_space_write_multi_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_stream_8(r, o, d, c) \ bus_space_write_region_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #endif /* _KERNEL */ #endif /* !_SYS_BUS_H_ */ diff --git a/sys/x86/include/legacyvar.h b/sys/x86/include/legacyvar.h index 5627e9b90306..789c6c55f198 100644 --- a/sys/x86/include/legacyvar.h +++ b/sys/x86/include/legacyvar.h @@ -1,75 +1,75 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2000 Peter Wemm * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _X86_LEGACYVAR_H_ #define _X86_LEGACYVAR_H_ enum legacy_device_ivars { LEGACY_IVAR_PCIDOMAIN, LEGACY_IVAR_PCIBUS, LEGACY_IVAR_PCISLOT, LEGACY_IVAR_PCIFUNC }; #define LEGACY_ACCESSOR(var, ivar, type) \ __BUS_ACCESSOR(legacy, var, LEGACY, ivar, type) LEGACY_ACCESSOR(pcidomain, PCIDOMAIN, uint32_t) LEGACY_ACCESSOR(pcibus, PCIBUS, uint32_t) LEGACY_ACCESSOR(pcislot, PCISLOT, int) LEGACY_ACCESSOR(pcifunc, PCIFUNC, int) #undef LEGACY_ACCESSOR int legacy_pcib_maxslots(device_t dev); uint32_t legacy_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes); int legacy_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); void legacy_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes); int legacy_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value); struct resource *legacy_pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); -int legacy_pcib_adjust_resource(device_t dev, device_t child, int type, +int legacy_pcib_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end); int legacy_pcib_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r); int legacy_pcib_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r); int legacy_pcib_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r); int legacy_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs); int legacy_pcib_alloc_msix(device_t pcib, device_t dev, int *irq); int legacy_pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data); #endif /* !_X86_LEGACYVAR_H_ */ diff --git a/sys/x86/pci/pci_bus.c b/sys/x86/pci/pci_bus.c index cfe0a3974187..c7715c47d3c8 100644 --- a/sys/x86/pci/pci_bus.c +++ b/sys/x86/pci/pci_bus.c @@ -1,776 +1,776 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1997, Stefan Esser * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include "opt_cpu.h" #include #include #include #include #include #include #include #include #include #include #include #include #ifdef CPU_ELAN #include #endif #include #include #include #include "pcib_if.h" int legacy_pcib_maxslots(device_t dev) { return 31; } /* read configuration space register */ uint32_t legacy_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { return(pci_cfgregread(0, bus, slot, func, reg, bytes)); } /* write configuration space register */ void legacy_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { pci_cfgregwrite(0, bus, slot, func, reg, data, bytes); } /* route interrupt */ static int legacy_pcib_route_interrupt(device_t pcib, device_t dev, int pin) { #ifdef __HAVE_PIR return (pci_pir_route_interrupt(pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pin)); #else /* No routing possible */ return (PCI_INVALID_IRQ); #endif } /* Pass MSI requests up to the nexus. */ int legacy_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs) { device_t bus; bus = device_get_parent(pcib); return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount, irqs)); } int legacy_pcib_alloc_msix(device_t pcib, device_t dev, int *irq) { device_t bus; bus = device_get_parent(pcib); return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq)); } int legacy_pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data) { device_t bus, hostb; int error, func, slot; bus = device_get_parent(pcib); error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data); if (error) return (error); slot = legacy_get_pcislot(pcib); func = legacy_get_pcifunc(pcib); if (slot == -1 || func == -1) return (0); hostb = pci_find_bsf(0, slot, func); KASSERT(hostb != NULL, ("%s: missing hostb for 0:%d:%d", __func__, slot, func)); pci_ht_map_msi(hostb, *addr); return (0); } static const char * legacy_pcib_is_host_bridge(int bus, int slot, int func, uint32_t id, uint8_t class, uint8_t subclass, uint8_t *busnum) { #ifdef __i386__ const char *s = NULL; static uint8_t pxb[4]; /* hack for 450nx */ *busnum = 0; switch (id) { case 0x12258086: s = "Intel 824?? host to PCI bridge"; /* XXX This is a guess */ /* *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x41, 1); */ *busnum = bus; break; case 0x71208086: s = "Intel 82810 (i810 GMCH) Host To Hub bridge"; break; case 0x71228086: s = "Intel 82810-DC100 (i810-DC100 GMCH) Host To Hub bridge"; break; case 0x71248086: s = "Intel 82810E (i810E GMCH) Host To Hub bridge"; break; case 0x11308086: s = "Intel 82815 (i815 GMCH) Host To Hub bridge"; break; case 0x71808086: s = "Intel 82443LX (440 LX) host to PCI bridge"; break; case 0x71908086: s = "Intel 82443BX (440 BX) host to PCI bridge"; break; case 0x71928086: s = "Intel 82443BX host to PCI bridge (AGP disabled)"; break; case 0x71948086: s = "Intel 82443MX host to PCI bridge"; break; case 0x71a08086: s = "Intel 82443GX host to PCI bridge"; break; case 0x71a18086: s = "Intel 82443GX host to AGP bridge"; break; case 0x71a28086: s = "Intel 82443GX host to PCI bridge (AGP disabled)"; break; case 0x84c48086: s = "Intel 82454KX/GX (Orion) host to PCI bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x4a, 1); break; case 0x84ca8086: /* * For the 450nx chipset, there is a whole bundle of * things pretending to be host bridges. The MIOC will * be seen first and isn't really a pci bridge (the * actual buses are attached to the PXB's). We need to * read the registers of the MIOC to figure out the * bus numbers for the PXB channels. * * Since the MIOC doesn't have a pci bus attached, we * pretend it wasn't there. */ pxb[0] = legacy_pcib_read_config(0, bus, slot, func, 0xd0, 1); /* BUSNO[0] */ pxb[1] = legacy_pcib_read_config(0, bus, slot, func, 0xd1, 1) + 1; /* SUBA[0]+1 */ pxb[2] = legacy_pcib_read_config(0, bus, slot, func, 0xd3, 1); /* BUSNO[1] */ pxb[3] = legacy_pcib_read_config(0, bus, slot, func, 0xd4, 1) + 1; /* SUBA[1]+1 */ return NULL; case 0x84cb8086: switch (slot) { case 0x12: s = "Intel 82454NX PXB#0, Bus#A"; *busnum = pxb[0]; break; case 0x13: s = "Intel 82454NX PXB#0, Bus#B"; *busnum = pxb[1]; break; case 0x14: s = "Intel 82454NX PXB#1, Bus#A"; *busnum = pxb[2]; break; case 0x15: s = "Intel 82454NX PXB#1, Bus#B"; *busnum = pxb[3]; break; } break; case 0x1A308086: s = "Intel 82845 Host to PCI bridge"; break; /* AMD -- vendor 0x1022 */ case 0x30001022: s = "AMD Elan SC520 host to PCI bridge"; #ifdef CPU_ELAN init_AMD_Elan_sc520(); #else printf( "*** WARNING: missing CPU_ELAN -- timekeeping may be wrong\n"); #endif break; case 0x70061022: s = "AMD-751 host to PCI bridge"; break; case 0x700e1022: s = "AMD-761 host to PCI bridge"; break; /* SiS -- vendor 0x1039 */ case 0x04961039: s = "SiS 85c496"; break; case 0x04061039: s = "SiS 85c501"; break; case 0x06011039: s = "SiS 85c601"; break; case 0x55911039: s = "SiS 5591 host to PCI bridge"; break; case 0x00011039: s = "SiS 5591 host to AGP bridge"; break; /* VLSI -- vendor 0x1004 */ case 0x00051004: s = "VLSI 82C592 Host to PCI bridge"; break; /* XXX Here is MVP3, I got the datasheet but NO M/B to test it */ /* totally. Please let me know if anything wrong. -F */ /* XXX need info on the MVP3 -- any takers? */ case 0x05981106: s = "VIA 82C598MVP (Apollo MVP3) host bridge"; break; /* AcerLabs -- vendor 0x10b9 */ /* Funny : The datasheet told me vendor id is "10b8",sub-vendor */ /* id is '10b9" but the register always shows "10b9". -Foxfair */ case 0x154110b9: s = "AcerLabs M1541 (Aladdin-V) PCI host bridge"; break; /* OPTi -- vendor 0x1045 */ case 0xc7011045: s = "OPTi 82C700 host to PCI bridge"; break; case 0xc8221045: s = "OPTi 82C822 host to PCI Bridge"; break; /* ServerWorks -- vendor 0x1166 */ case 0x00051166: s = "ServerWorks NB6536 2.0HE host to PCI bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x44, 1); break; case 0x00061166: /* FALLTHROUGH */ case 0x00081166: /* FALLTHROUGH */ case 0x02011166: /* FALLTHROUGH */ case 0x010f1014: /* IBM re-badged ServerWorks chipset */ s = "ServerWorks host to PCI bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x44, 1); break; case 0x00091166: s = "ServerWorks NB6635 3.0LE host to PCI bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x44, 1); break; case 0x00101166: s = "ServerWorks CIOB30 host to PCI bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x44, 1); break; case 0x00111166: /* FALLTHROUGH */ case 0x03021014: /* IBM re-badged ServerWorks chipset */ s = "ServerWorks CMIC-HE host to PCI-X bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x44, 1); break; /* XXX unknown chipset, but working */ case 0x00171166: /* FALLTHROUGH */ case 0x01011166: case 0x01101166: case 0x02251166: s = "ServerWorks host to PCI bridge(unknown chipset)"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x44, 1); break; /* Compaq/HP -- vendor 0x0e11 */ case 0x60100e11: s = "Compaq/HP Model 6010 HotPlug PCI Bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0xc8, 1); break; /* Integrated Micro Solutions -- vendor 0x10e0 */ case 0x884910e0: s = "Integrated Micro Solutions VL Bridge"; break; default: if (class == PCIC_BRIDGE && subclass == PCIS_BRIDGE_HOST) s = "Host to PCI bridge"; break; } return s; #else const char *s = NULL; *busnum = 0; if (class == PCIC_BRIDGE && subclass == PCIS_BRIDGE_HOST) s = "Host to PCI bridge"; return s; #endif } /* * Scan the first pci bus for host-pci bridges and add pcib instances * to the nexus for each bridge. */ static void legacy_pcib_identify(driver_t *driver, device_t parent) { int bus, slot, func; uint8_t hdrtype; int found = 0; int pcifunchigh; int found824xx = 0; int found_orion = 0; device_t child; devclass_t pci_devclass; if (pci_cfgregopen() == 0) return; /* * Check to see if we haven't already had a PCI bus added * via some other means. If we have, bail since otherwise * we're going to end up duplicating it. */ if ((pci_devclass = devclass_find("pci")) && devclass_get_device(pci_devclass, 0)) return; bus = 0; retry: for (slot = 0; slot <= PCI_SLOTMAX; slot++) { func = 0; hdrtype = legacy_pcib_read_config(0, bus, slot, func, PCIR_HDRTYPE, 1); /* * When enumerating bus devices, the standard says that * one should check the header type and ignore the slots whose * header types that the software doesn't know about. We use * this to filter out devices. */ if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) continue; if ((hdrtype & PCIM_MFDEV) && (!found_orion || hdrtype != 0xff)) pcifunchigh = PCI_FUNCMAX; else pcifunchigh = 0; for (func = 0; func <= pcifunchigh; func++) { /* * Read the IDs and class from the device. */ uint32_t id; uint8_t class, subclass, busnum; const char *s; device_t *devs; int ndevs, i; id = legacy_pcib_read_config(0, bus, slot, func, PCIR_DEVVENDOR, 4); if (id == -1) continue; class = legacy_pcib_read_config(0, bus, slot, func, PCIR_CLASS, 1); subclass = legacy_pcib_read_config(0, bus, slot, func, PCIR_SUBCLASS, 1); s = legacy_pcib_is_host_bridge(bus, slot, func, id, class, subclass, &busnum); if (s == NULL) continue; /* * Check to see if the physical bus has already * been seen. Eg: hybrid 32 and 64 bit host * bridges to the same logical bus. */ if (device_get_children(parent, &devs, &ndevs) == 0) { for (i = 0; s != NULL && i < ndevs; i++) { if (strcmp(device_get_name(devs[i]), "pcib") != 0) continue; if (legacy_get_pcibus(devs[i]) == busnum) s = NULL; } free(devs, M_TEMP); } if (s == NULL) continue; /* * Add at priority 100 to make sure we * go after any motherboard resources */ child = BUS_ADD_CHILD(parent, 100, "pcib", busnum); device_set_desc(child, s); legacy_set_pcibus(child, busnum); legacy_set_pcislot(child, slot); legacy_set_pcifunc(child, func); found = 1; if (id == 0x12258086) found824xx = 1; if (id == 0x84c48086) found_orion = 1; } } if (found824xx && bus == 0) { bus++; goto retry; } /* * Make sure we add at least one bridge since some old * hardware doesn't actually have a host-pci bridge device. * Note that pci_cfgregopen() thinks we have PCI devices.. */ if (!found) { #ifndef NO_LEGACY_PCIB if (bootverbose) printf( "legacy_pcib_identify: no bridge found, adding pcib0 anyway\n"); child = BUS_ADD_CHILD(parent, 100, "pcib", 0); legacy_set_pcibus(child, 0); #endif } } static int legacy_pcib_probe(device_t dev) { if (pci_cfgregopen() == 0) return ENXIO; return -100; } static int legacy_pcib_attach(device_t dev) { #ifdef __HAVE_PIR device_t pir; int bus; bus = pcib_get_bus(dev); /* * Look for a PCI BIOS interrupt routing table as that will be * our method of routing interrupts if we have one. */ if (pci_pir_probe(bus, 0)) { pir = BUS_ADD_CHILD(device_get_parent(dev), 0, "pir", 0); if (pir != NULL) device_probe_and_attach(pir); } #endif device_add_child(dev, "pci", -1); return bus_generic_attach(dev); } int legacy_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return 0; case PCIB_IVAR_BUS: *result = legacy_get_pcibus(dev); return 0; } return ENOENT; } int legacy_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { switch (which) { case PCIB_IVAR_DOMAIN: return EINVAL; case PCIB_IVAR_BUS: legacy_set_pcibus(dev, value); return 0; } return ENOENT; } /* * Helper routine for x86 Host-PCI bridge driver resource allocation. * This is used to adjust the start address of wildcard allocation * requests to avoid low addresses that are known to be problematic. * * If no memory preference is given, use upper 32MB slot most BIOSes * use for their memory window. This is typically only used on older * laptops that don't have PCI buses behind a PCI bridge, so assuming * > 32MB is likely OK. * * However, this can cause problems for other chipsets, so we make * this tunable by hw.pci.host_mem_start. */ SYSCTL_DECL(_hw_pci); static unsigned long host_mem_start = 0x80000000; SYSCTL_ULONG(_hw_pci, OID_AUTO, host_mem_start, CTLFLAG_RDTUN, &host_mem_start, 0, "Limit the host bridge memory to being above this address."); rman_res_t hostb_alloc_start(int type, rman_res_t start, rman_res_t end, rman_res_t count) { if (start + count - 1 != end) { if (type == SYS_RES_MEMORY && start < host_mem_start) start = host_mem_start; if (type == SYS_RES_IOPORT && start < 0x1000) start = 0x1000; } return (start); } struct resource * legacy_pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) if (type == PCI_RES_BUS) return (pci_domain_alloc_bus(0, child, rid, start, end, count, flags)); #endif start = hostb_alloc_start(type, start, end, count); return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); } #if defined(NEW_PCIB) && defined(PCI_RES_BUS) int -legacy_pcib_adjust_resource(device_t dev, device_t child, int type, +legacy_pcib_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { - if (type == PCI_RES_BUS) + if (rman_get_type(r) == PCI_RES_BUS) return (pci_domain_adjust_bus(0, child, r, start, end)); - return (bus_generic_adjust_resource(dev, child, type, r, start, end)); + return (bus_generic_adjust_resource(dev, child, r, start, end)); } int legacy_pcib_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { if (type == PCI_RES_BUS) return (pci_domain_release_bus(0, child, rid, r)); return (bus_generic_release_resource(dev, child, type, rid, r)); } int legacy_pcib_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { if (type == PCI_RES_BUS) return (pci_domain_activate_bus(0, child, rid, r)); return (bus_generic_activate_resource(dev, child, type, rid, r)); } int legacy_pcib_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { if (type == PCI_RES_BUS) return (pci_domain_deactivate_bus(0, child, rid, r)); return (bus_generic_deactivate_resource(dev, child, type, rid, r)); } #endif static device_method_t legacy_pcib_methods[] = { /* Device interface */ DEVMETHOD(device_identify, legacy_pcib_identify), DEVMETHOD(device_probe, legacy_pcib_probe), DEVMETHOD(device_attach, legacy_pcib_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, legacy_pcib_read_ivar), DEVMETHOD(bus_write_ivar, legacy_pcib_write_ivar), DEVMETHOD(bus_alloc_resource, legacy_pcib_alloc_resource), #if defined(NEW_PCIB) && defined(PCI_RES_BUS) DEVMETHOD(bus_adjust_resource, legacy_pcib_adjust_resource), DEVMETHOD(bus_release_resource, legacy_pcib_release_resource), DEVMETHOD(bus_activate_resource, legacy_pcib_activate_resource), DEVMETHOD(bus_deactivate_resource, legacy_pcib_deactivate_resource), #else DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), #endif DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, legacy_pcib_maxslots), DEVMETHOD(pcib_read_config, legacy_pcib_read_config), DEVMETHOD(pcib_write_config, legacy_pcib_write_config), DEVMETHOD(pcib_route_interrupt, legacy_pcib_route_interrupt), DEVMETHOD(pcib_alloc_msi, legacy_pcib_alloc_msi), DEVMETHOD(pcib_release_msi, pcib_release_msi), DEVMETHOD(pcib_alloc_msix, legacy_pcib_alloc_msix), DEVMETHOD(pcib_release_msix, pcib_release_msix), DEVMETHOD(pcib_map_msi, legacy_pcib_map_msi), DEVMETHOD(pcib_request_feature, pcib_request_feature_allow), DEVMETHOD_END }; DEFINE_CLASS_0(pcib, legacy_pcib_driver, legacy_pcib_methods, 1); DRIVER_MODULE(pcib, legacy, legacy_pcib_driver, 0, 0); /* * Install placeholder to claim the resources owned by the * PCI bus interface. This could be used to extract the * config space registers in the extreme case where the PnP * ID is available and the PCI BIOS isn't, but for now we just * eat the PnP ID and do nothing else. * * we silence this probe, as it will generally confuse people. */ static struct isa_pnp_id pcibus_pnp_ids[] = { { 0x030ad041 /* PNP0A03 */, "PCI Bus" }, { 0x080ad041 /* PNP0A08 */, "PCIe Bus" }, { 0 } }; static int pcibus_pnp_probe(device_t dev) { int result; if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, pcibus_pnp_ids)) <= 0) device_quiet(dev); return(result); } static int pcibus_pnp_attach(device_t dev) { return(0); } static device_method_t pcibus_pnp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pcibus_pnp_probe), DEVMETHOD(device_attach, pcibus_pnp_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), { 0, 0 } }; DEFINE_CLASS_0(pcibus_pnp, pcibus_pnp_driver, pcibus_pnp_methods, 1); DRIVER_MODULE(pcibus_pnp, isa, pcibus_pnp_driver, 0, 0); #ifdef __HAVE_PIR /* * Provide a PCI-PCI bridge driver for PCI buses behind PCI-PCI bridges * that appear in the PCIBIOS Interrupt Routing Table to use the routing * table for interrupt routing when possible. */ static int pcibios_pcib_probe(device_t bus); static device_method_t pcibios_pcib_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pcibios_pcib_probe), /* pcib interface */ DEVMETHOD(pcib_route_interrupt, legacy_pcib_route_interrupt), {0, 0} }; DEFINE_CLASS_1(pcib, pcibios_pcib_driver, pcibios_pcib_pci_methods, sizeof(struct pcib_softc), pcib_driver); DRIVER_MODULE(pcibios_pcib, pci, pcibios_pcib_driver, 0, 0); ISA_PNP_INFO(pcibus_pnp_ids); static int pcibios_pcib_probe(device_t dev) { int bus; if ((pci_get_class(dev) != PCIC_BRIDGE) || (pci_get_subclass(dev) != PCIS_BRIDGE_PCI)) return (ENXIO); bus = pci_read_config(dev, PCIR_SECBUS_1, 1); if (bus == 0) return (ENXIO); if (!pci_pir_probe(bus, 1)) return (ENXIO); device_set_desc(dev, "PCIBIOS PCI-PCI bridge"); return (-2000); } #endif diff --git a/sys/x86/x86/mptable_pci.c b/sys/x86/x86/mptable_pci.c index 5792b0cb7387..de659ca75163 100644 --- a/sys/x86/x86/mptable_pci.c +++ b/sys/x86/x86/mptable_pci.c @@ -1,235 +1,234 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2003 John Baldwin * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Host to PCI and PCI to PCI bridge drivers that use the MP Table to route * interrupts from PCI devices to I/O APICs. */ #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" /* Host to PCI bridge driver. */ static int mptable_hostb_probe(device_t dev) { if (pci_cfgregopen() == 0) return (ENXIO); if (mptable_pci_probe_table(legacy_get_pcibus(dev)) != 0) return (ENXIO); device_set_desc(dev, "MPTable Host-PCI bridge"); return (0); } static int mptable_hostb_attach(device_t dev) { #ifdef NEW_PCIB mptable_pci_host_res_init(dev); #endif device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } #ifdef NEW_PCIB static int mptable_is_isa_range(rman_res_t start, rman_res_t end) { if (end >= 0x10000) return (0); if ((start & 0xfc00) != (end & 0xfc00)) return (0); start &= ~0xfc00; end &= ~0xfc00; return (start >= 0x100 && end <= 0x3ff); } static int mptable_is_vga_range(rman_res_t start, rman_res_t end) { if (end >= 0x10000) return (0); if ((start & 0xfc00) != (end & 0xfc00)) return (0); start &= ~0xfc00; end &= ~0xfc00; return (pci_is_vga_ioport_range(start, end)); } static struct resource * mptable_hostb_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct mptable_hostb_softc *sc; #ifdef PCI_RES_BUS if (type == PCI_RES_BUS) return (pci_domain_alloc_bus(0, child, rid, start, end, count, flags)); #endif sc = device_get_softc(dev); if (type == SYS_RES_IOPORT && start + count - 1 == end) { if (mptable_is_isa_range(start, end)) { switch (sc->sc_decodes_isa_io) { case -1: return (NULL); case 1: return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); default: break; } } if (mptable_is_vga_range(start, end)) { switch (sc->sc_decodes_vga_io) { case -1: return (NULL); case 1: return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); default: break; } } } start = hostb_alloc_start(type, start, end, count); return (pcib_host_res_alloc(&sc->sc_host_res, child, type, rid, start, end, count, flags)); } static int -mptable_hostb_adjust_resource(device_t dev, device_t child, int type, +mptable_hostb_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct mptable_hostb_softc *sc; #ifdef PCI_RES_BUS - if (type == PCI_RES_BUS) + if (rman_get_type(r) == PCI_RES_BUS) return (pci_domain_adjust_bus(0, child, r, start, end)); #endif sc = device_get_softc(dev); - return (pcib_host_res_adjust(&sc->sc_host_res, child, type, r, start, - end)); + return (pcib_host_res_adjust(&sc->sc_host_res, child, r, start, end)); } #endif static device_method_t mptable_hostb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mptable_hostb_probe), DEVMETHOD(device_attach, mptable_hostb_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, legacy_pcib_read_ivar), DEVMETHOD(bus_write_ivar, legacy_pcib_write_ivar), #ifdef NEW_PCIB DEVMETHOD(bus_alloc_resource, mptable_hostb_alloc_resource), DEVMETHOD(bus_adjust_resource, mptable_hostb_adjust_resource), #else DEVMETHOD(bus_alloc_resource, legacy_pcib_alloc_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), #endif #if defined(NEW_PCIB) && defined(PCI_RES_BUS) DEVMETHOD(bus_release_resource, legacy_pcib_release_resource), DEVMETHOD(bus_activate_resource, legacy_pcib_activate_resource), DEVMETHOD(bus_deactivate_resource, legacy_pcib_deactivate_resource), #else DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), #endif DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, legacy_pcib_maxslots), DEVMETHOD(pcib_read_config, legacy_pcib_read_config), DEVMETHOD(pcib_write_config, legacy_pcib_write_config), DEVMETHOD(pcib_route_interrupt, mptable_pci_route_interrupt), DEVMETHOD(pcib_alloc_msi, legacy_pcib_alloc_msi), DEVMETHOD(pcib_release_msi, pcib_release_msi), DEVMETHOD(pcib_alloc_msix, legacy_pcib_alloc_msix), DEVMETHOD(pcib_release_msix, pcib_release_msix), DEVMETHOD(pcib_map_msi, legacy_pcib_map_msi), DEVMETHOD_END }; DEFINE_CLASS_0(pcib, mptable_hostb_driver, mptable_hostb_methods, sizeof(struct mptable_hostb_softc)); DRIVER_MODULE(mptable_pcib, legacy, mptable_hostb_driver, 0, 0); /* PCI to PCI bridge driver. */ static int mptable_pcib_probe(device_t dev) { int bus; if ((pci_get_class(dev) != PCIC_BRIDGE) || (pci_get_subclass(dev) != PCIS_BRIDGE_PCI)) return (ENXIO); bus = pci_read_config(dev, PCIR_SECBUS_1, 1); if (bus == 0) return (ENXIO); if (mptable_pci_probe_table(bus) != 0) return (ENXIO); device_set_desc(dev, "MPTable PCI-PCI bridge"); return (-1000); } static device_method_t mptable_pcib_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mptable_pcib_probe), /* pcib interface */ DEVMETHOD(pcib_route_interrupt, mptable_pci_route_interrupt), {0, 0} }; DEFINE_CLASS_1(pcib, mptable_pcib_driver, mptable_pcib_pci_methods, sizeof(struct pcib_softc), pcib_driver); DRIVER_MODULE(mptable_pcib, pci, mptable_pcib_driver, 0, 0);